cpufreq.c 80 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/drivers/cpufreq/cpufreq.c
  4. *
  5. * Copyright (C) 2001 Russell King
  6. * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
  7. * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
  8. *
  9. * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
  10. * Added handling for CPU hotplug
  11. * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
  12. * Fix handling for CPU hotplug -- affected CPUs
  13. */
  14. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15. #include <linux/cpu.h>
  16. #include <linux/cpufreq.h>
  17. #include <linux/cpu_cooling.h>
  18. #include <linux/delay.h>
  19. #include <linux/device.h>
  20. #include <linux/init.h>
  21. #include <linux/kernel_stat.h>
  22. #include <linux/module.h>
  23. #include <linux/mutex.h>
  24. #include <linux/pm_qos.h>
  25. #include <linux/slab.h>
  26. #include <linux/suspend.h>
  27. #include <linux/syscore_ops.h>
  28. #include <linux/tick.h>
  29. #include <linux/units.h>
  30. #include <trace/events/power.h>
  31. static LIST_HEAD(cpufreq_policy_list);
  32. /* Macros to iterate over CPU policies */
  33. #define for_each_suitable_policy(__policy, __active) \
  34. list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
  35. if ((__active) == !policy_is_inactive(__policy))
  36. #define for_each_active_policy(__policy) \
  37. for_each_suitable_policy(__policy, true)
  38. #define for_each_inactive_policy(__policy) \
  39. for_each_suitable_policy(__policy, false)
  40. /* Iterate over governors */
  41. static LIST_HEAD(cpufreq_governor_list);
  42. #define for_each_governor(__governor) \
  43. list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
  44. static char default_governor[CPUFREQ_NAME_LEN];
  45. /*
  46. * The "cpufreq driver" - the arch- or hardware-dependent low
  47. * level driver of CPUFreq support, and its spinlock. This lock
  48. * also protects the cpufreq_cpu_data array.
  49. */
  50. static struct cpufreq_driver *cpufreq_driver;
  51. static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
  52. static DEFINE_RWLOCK(cpufreq_driver_lock);
  53. static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
  54. bool cpufreq_supports_freq_invariance(void)
  55. {
  56. return static_branch_likely(&cpufreq_freq_invariance);
  57. }
  58. /* Flag to suspend/resume CPUFreq governors */
  59. static bool cpufreq_suspended;
  60. static inline bool has_target(void)
  61. {
  62. return cpufreq_driver->target_index || cpufreq_driver->target;
  63. }
  64. bool has_target_index(void)
  65. {
  66. return !!cpufreq_driver->target_index;
  67. }
  68. /* internal prototypes */
  69. static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
  70. static int cpufreq_init_governor(struct cpufreq_policy *policy);
  71. static void cpufreq_exit_governor(struct cpufreq_policy *policy);
  72. static void cpufreq_governor_limits(struct cpufreq_policy *policy);
  73. static int cpufreq_set_policy(struct cpufreq_policy *policy,
  74. struct cpufreq_governor *new_gov,
  75. unsigned int new_pol);
  76. static bool cpufreq_boost_supported(void);
  77. /*
  78. * Two notifier lists: the "policy" list is involved in the
  79. * validation process for a new CPU frequency policy; the
  80. * "transition" list for kernel code that needs to handle
  81. * changes to devices when the CPU clock speed changes.
  82. * The mutex locks both lists.
  83. */
  84. static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
  85. SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
  86. static int off __read_mostly;
  87. static int cpufreq_disabled(void)
  88. {
  89. return off;
  90. }
  91. void disable_cpufreq(void)
  92. {
  93. off = 1;
  94. }
  95. static DEFINE_MUTEX(cpufreq_governor_mutex);
  96. bool have_governor_per_policy(void)
  97. {
  98. return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
  99. }
  100. EXPORT_SYMBOL_GPL(have_governor_per_policy);
  101. static struct kobject *cpufreq_global_kobject;
  102. struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
  103. {
  104. if (have_governor_per_policy())
  105. return &policy->kobj;
  106. else
  107. return cpufreq_global_kobject;
  108. }
  109. EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
  110. static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
  111. {
  112. struct kernel_cpustat kcpustat;
  113. u64 cur_wall_time;
  114. u64 idle_time;
  115. u64 busy_time;
  116. cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
  117. kcpustat_cpu_fetch(&kcpustat, cpu);
  118. busy_time = kcpustat.cpustat[CPUTIME_USER];
  119. busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
  120. busy_time += kcpustat.cpustat[CPUTIME_IRQ];
  121. busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
  122. busy_time += kcpustat.cpustat[CPUTIME_STEAL];
  123. busy_time += kcpustat.cpustat[CPUTIME_NICE];
  124. idle_time = cur_wall_time - busy_time;
  125. if (wall)
  126. *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
  127. return div_u64(idle_time, NSEC_PER_USEC);
  128. }
  129. u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
  130. {
  131. u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
  132. if (idle_time == -1ULL)
  133. return get_cpu_idle_time_jiffy(cpu, wall);
  134. else if (!io_busy)
  135. idle_time += get_cpu_iowait_time_us(cpu, wall);
  136. return idle_time;
  137. }
  138. EXPORT_SYMBOL_GPL(get_cpu_idle_time);
  139. /*
  140. * This is a generic cpufreq init() routine which can be used by cpufreq
  141. * drivers of SMP systems. It will do following:
  142. * - validate & show freq table passed
  143. * - set policies transition latency
  144. * - policy->cpus with all possible CPUs
  145. */
  146. void cpufreq_generic_init(struct cpufreq_policy *policy,
  147. struct cpufreq_frequency_table *table,
  148. unsigned int transition_latency)
  149. {
  150. policy->freq_table = table;
  151. policy->cpuinfo.transition_latency = transition_latency;
  152. /*
  153. * The driver only supports the SMP configuration where all processors
  154. * share the clock and voltage and clock.
  155. */
  156. cpumask_setall(policy->cpus);
  157. }
  158. EXPORT_SYMBOL_GPL(cpufreq_generic_init);
  159. struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
  160. {
  161. struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
  162. return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
  163. }
  164. EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
  165. unsigned int cpufreq_generic_get(unsigned int cpu)
  166. {
  167. struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
  168. if (!policy || IS_ERR(policy->clk)) {
  169. pr_err("%s: No %s associated to cpu: %d\n",
  170. __func__, policy ? "clk" : "policy", cpu);
  171. return 0;
  172. }
  173. return clk_get_rate(policy->clk) / 1000;
  174. }
  175. EXPORT_SYMBOL_GPL(cpufreq_generic_get);
  176. /**
  177. * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
  178. * @cpu: CPU to find the policy for.
  179. *
  180. * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
  181. * the kobject reference counter of that policy. Return a valid policy on
  182. * success or NULL on failure.
  183. *
  184. * The policy returned by this function has to be released with the help of
  185. * cpufreq_cpu_put() to balance its kobject reference counter properly.
  186. */
  187. struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
  188. {
  189. struct cpufreq_policy *policy = NULL;
  190. unsigned long flags;
  191. if (WARN_ON(cpu >= nr_cpu_ids))
  192. return NULL;
  193. /* get the cpufreq driver */
  194. read_lock_irqsave(&cpufreq_driver_lock, flags);
  195. if (cpufreq_driver) {
  196. /* get the CPU */
  197. policy = cpufreq_cpu_get_raw(cpu);
  198. if (policy)
  199. kobject_get(&policy->kobj);
  200. }
  201. read_unlock_irqrestore(&cpufreq_driver_lock, flags);
  202. return policy;
  203. }
  204. EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
  205. /**
  206. * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
  207. * @policy: cpufreq policy returned by cpufreq_cpu_get().
  208. */
  209. void cpufreq_cpu_put(struct cpufreq_policy *policy)
  210. {
  211. kobject_put(&policy->kobj);
  212. }
  213. EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
  214. /**
  215. * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
  216. * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
  217. */
  218. void cpufreq_cpu_release(struct cpufreq_policy *policy)
  219. {
  220. if (WARN_ON(!policy))
  221. return;
  222. lockdep_assert_held(&policy->rwsem);
  223. up_write(&policy->rwsem);
  224. cpufreq_cpu_put(policy);
  225. }
  226. /**
  227. * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
  228. * @cpu: CPU to find the policy for.
  229. *
  230. * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
  231. * if the policy returned by it is not NULL, acquire its rwsem for writing.
  232. * Return the policy if it is active or release it and return NULL otherwise.
  233. *
  234. * The policy returned by this function has to be released with the help of
  235. * cpufreq_cpu_release() in order to release its rwsem and balance its usage
  236. * counter properly.
  237. */
  238. struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
  239. {
  240. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  241. if (!policy)
  242. return NULL;
  243. down_write(&policy->rwsem);
  244. if (policy_is_inactive(policy)) {
  245. cpufreq_cpu_release(policy);
  246. return NULL;
  247. }
  248. return policy;
  249. }
  250. /*********************************************************************
  251. * EXTERNALLY AFFECTING FREQUENCY CHANGES *
  252. *********************************************************************/
  253. /**
  254. * adjust_jiffies - Adjust the system "loops_per_jiffy".
  255. * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
  256. * @ci: Frequency change information.
  257. *
  258. * This function alters the system "loops_per_jiffy" for the clock
  259. * speed change. Note that loops_per_jiffy cannot be updated on SMP
  260. * systems as each CPU might be scaled differently. So, use the arch
  261. * per-CPU loops_per_jiffy value wherever possible.
  262. */
  263. static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
  264. {
  265. #ifndef CONFIG_SMP
  266. static unsigned long l_p_j_ref;
  267. static unsigned int l_p_j_ref_freq;
  268. if (ci->flags & CPUFREQ_CONST_LOOPS)
  269. return;
  270. if (!l_p_j_ref_freq) {
  271. l_p_j_ref = loops_per_jiffy;
  272. l_p_j_ref_freq = ci->old;
  273. pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
  274. l_p_j_ref, l_p_j_ref_freq);
  275. }
  276. if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
  277. loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
  278. ci->new);
  279. pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
  280. loops_per_jiffy, ci->new);
  281. }
  282. #endif
  283. }
  284. /**
  285. * cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
  286. * @policy: cpufreq policy to enable fast frequency switching for.
  287. * @freqs: contain details of the frequency update.
  288. * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
  289. *
  290. * This function calls the transition notifiers and adjust_jiffies().
  291. *
  292. * It is called twice on all CPU frequency changes that have external effects.
  293. */
  294. static void cpufreq_notify_transition(struct cpufreq_policy *policy,
  295. struct cpufreq_freqs *freqs,
  296. unsigned int state)
  297. {
  298. int cpu;
  299. BUG_ON(irqs_disabled());
  300. if (cpufreq_disabled())
  301. return;
  302. freqs->policy = policy;
  303. freqs->flags = cpufreq_driver->flags;
  304. pr_debug("notification %u of frequency transition to %u kHz\n",
  305. state, freqs->new);
  306. switch (state) {
  307. case CPUFREQ_PRECHANGE:
  308. /*
  309. * Detect if the driver reported a value as "old frequency"
  310. * which is not equal to what the cpufreq core thinks is
  311. * "old frequency".
  312. */
  313. if (policy->cur && policy->cur != freqs->old) {
  314. pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
  315. freqs->old, policy->cur);
  316. freqs->old = policy->cur;
  317. }
  318. srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
  319. CPUFREQ_PRECHANGE, freqs);
  320. adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
  321. break;
  322. case CPUFREQ_POSTCHANGE:
  323. adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
  324. pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
  325. cpumask_pr_args(policy->cpus));
  326. for_each_cpu(cpu, policy->cpus)
  327. trace_cpu_frequency(freqs->new, cpu);
  328. srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
  329. CPUFREQ_POSTCHANGE, freqs);
  330. cpufreq_stats_record_transition(policy, freqs->new);
  331. policy->cur = freqs->new;
  332. }
  333. }
  334. /* Do post notifications when there are chances that transition has failed */
  335. static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
  336. struct cpufreq_freqs *freqs, int transition_failed)
  337. {
  338. cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
  339. if (!transition_failed)
  340. return;
  341. swap(freqs->old, freqs->new);
  342. cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
  343. cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
  344. }
  345. void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
  346. struct cpufreq_freqs *freqs)
  347. {
  348. /*
  349. * Catch double invocations of _begin() which lead to self-deadlock.
  350. * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
  351. * doesn't invoke _begin() on their behalf, and hence the chances of
  352. * double invocations are very low. Moreover, there are scenarios
  353. * where these checks can emit false-positive warnings in these
  354. * drivers; so we avoid that by skipping them altogether.
  355. */
  356. WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
  357. && current == policy->transition_task);
  358. wait:
  359. wait_event(policy->transition_wait, !policy->transition_ongoing);
  360. spin_lock(&policy->transition_lock);
  361. if (unlikely(policy->transition_ongoing)) {
  362. spin_unlock(&policy->transition_lock);
  363. goto wait;
  364. }
  365. policy->transition_ongoing = true;
  366. policy->transition_task = current;
  367. spin_unlock(&policy->transition_lock);
  368. cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
  369. }
  370. EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
  371. void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
  372. struct cpufreq_freqs *freqs, int transition_failed)
  373. {
  374. if (WARN_ON(!policy->transition_ongoing))
  375. return;
  376. cpufreq_notify_post_transition(policy, freqs, transition_failed);
  377. arch_set_freq_scale(policy->related_cpus,
  378. policy->cur,
  379. arch_scale_freq_ref(policy->cpu));
  380. spin_lock(&policy->transition_lock);
  381. policy->transition_ongoing = false;
  382. policy->transition_task = NULL;
  383. spin_unlock(&policy->transition_lock);
  384. wake_up(&policy->transition_wait);
  385. }
  386. EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
  387. /*
  388. * Fast frequency switching status count. Positive means "enabled", negative
  389. * means "disabled" and 0 means "not decided yet".
  390. */
  391. static int cpufreq_fast_switch_count;
  392. static DEFINE_MUTEX(cpufreq_fast_switch_lock);
  393. static void cpufreq_list_transition_notifiers(void)
  394. {
  395. struct notifier_block *nb;
  396. pr_info("Registered transition notifiers:\n");
  397. mutex_lock(&cpufreq_transition_notifier_list.mutex);
  398. for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
  399. pr_info("%pS\n", nb->notifier_call);
  400. mutex_unlock(&cpufreq_transition_notifier_list.mutex);
  401. }
  402. /**
  403. * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
  404. * @policy: cpufreq policy to enable fast frequency switching for.
  405. *
  406. * Try to enable fast frequency switching for @policy.
  407. *
  408. * The attempt will fail if there is at least one transition notifier registered
  409. * at this point, as fast frequency switching is quite fundamentally at odds
  410. * with transition notifiers. Thus if successful, it will make registration of
  411. * transition notifiers fail going forward.
  412. */
  413. void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
  414. {
  415. lockdep_assert_held(&policy->rwsem);
  416. if (!policy->fast_switch_possible)
  417. return;
  418. mutex_lock(&cpufreq_fast_switch_lock);
  419. if (cpufreq_fast_switch_count >= 0) {
  420. cpufreq_fast_switch_count++;
  421. policy->fast_switch_enabled = true;
  422. } else {
  423. pr_warn("CPU%u: Fast frequency switching not enabled\n",
  424. policy->cpu);
  425. cpufreq_list_transition_notifiers();
  426. }
  427. mutex_unlock(&cpufreq_fast_switch_lock);
  428. }
  429. EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
  430. /**
  431. * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
  432. * @policy: cpufreq policy to disable fast frequency switching for.
  433. */
  434. void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
  435. {
  436. mutex_lock(&cpufreq_fast_switch_lock);
  437. if (policy->fast_switch_enabled) {
  438. policy->fast_switch_enabled = false;
  439. if (!WARN_ON(cpufreq_fast_switch_count <= 0))
  440. cpufreq_fast_switch_count--;
  441. }
  442. mutex_unlock(&cpufreq_fast_switch_lock);
  443. }
  444. EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
  445. static unsigned int __resolve_freq(struct cpufreq_policy *policy,
  446. unsigned int target_freq,
  447. unsigned int min, unsigned int max,
  448. unsigned int relation)
  449. {
  450. unsigned int idx;
  451. target_freq = clamp_val(target_freq, min, max);
  452. if (!policy->freq_table)
  453. return target_freq;
  454. idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
  455. policy->cached_resolved_idx = idx;
  456. policy->cached_target_freq = target_freq;
  457. return policy->freq_table[idx].frequency;
  458. }
  459. /**
  460. * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
  461. * one.
  462. * @policy: associated policy to interrogate
  463. * @target_freq: target frequency to resolve.
  464. *
  465. * The target to driver frequency mapping is cached in the policy.
  466. *
  467. * Return: Lowest driver-supported frequency greater than or equal to the
  468. * given target_freq, subject to policy (min/max) and driver limitations.
  469. */
  470. unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
  471. unsigned int target_freq)
  472. {
  473. unsigned int min = READ_ONCE(policy->min);
  474. unsigned int max = READ_ONCE(policy->max);
  475. /*
  476. * If this function runs in parallel with cpufreq_set_policy(), it may
  477. * read policy->min before the update and policy->max after the update
  478. * or the other way around, so there is no ordering guarantee.
  479. *
  480. * Resolve this by always honoring the max (in case it comes from
  481. * thermal throttling or similar).
  482. */
  483. if (unlikely(min > max))
  484. min = max;
  485. return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
  486. }
  487. EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
  488. unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
  489. {
  490. unsigned int latency;
  491. if (policy->transition_delay_us)
  492. return policy->transition_delay_us;
  493. latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
  494. if (latency)
  495. /* Give a 50% breathing room between updates */
  496. return latency + (latency >> 1);
  497. return USEC_PER_MSEC;
  498. }
  499. EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
  500. /*********************************************************************
  501. * SYSFS INTERFACE *
  502. *********************************************************************/
  503. static ssize_t show_boost(struct kobject *kobj,
  504. struct kobj_attribute *attr, char *buf)
  505. {
  506. return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled);
  507. }
  508. static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
  509. const char *buf, size_t count)
  510. {
  511. bool enable;
  512. if (kstrtobool(buf, &enable))
  513. return -EINVAL;
  514. if (cpufreq_boost_trigger_state(enable)) {
  515. pr_err("%s: Cannot %s BOOST!\n",
  516. __func__, enable ? "enable" : "disable");
  517. return -EINVAL;
  518. }
  519. pr_debug("%s: cpufreq BOOST %s\n",
  520. __func__, enable ? "enabled" : "disabled");
  521. return count;
  522. }
  523. define_one_global_rw(boost);
  524. static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
  525. {
  526. return sysfs_emit(buf, "%d\n", policy->boost_enabled);
  527. }
  528. static ssize_t store_local_boost(struct cpufreq_policy *policy,
  529. const char *buf, size_t count)
  530. {
  531. int ret;
  532. bool enable;
  533. if (kstrtobool(buf, &enable))
  534. return -EINVAL;
  535. if (!cpufreq_driver->boost_enabled)
  536. return -EINVAL;
  537. if (policy->boost_enabled == enable)
  538. return count;
  539. policy->boost_enabled = enable;
  540. cpus_read_lock();
  541. ret = cpufreq_driver->set_boost(policy, enable);
  542. cpus_read_unlock();
  543. if (ret) {
  544. policy->boost_enabled = !policy->boost_enabled;
  545. return ret;
  546. }
  547. return count;
  548. }
  549. static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
  550. static struct cpufreq_governor *find_governor(const char *str_governor)
  551. {
  552. struct cpufreq_governor *t;
  553. for_each_governor(t)
  554. if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
  555. return t;
  556. return NULL;
  557. }
  558. static struct cpufreq_governor *get_governor(const char *str_governor)
  559. {
  560. struct cpufreq_governor *t;
  561. mutex_lock(&cpufreq_governor_mutex);
  562. t = find_governor(str_governor);
  563. if (!t)
  564. goto unlock;
  565. if (!try_module_get(t->owner))
  566. t = NULL;
  567. unlock:
  568. mutex_unlock(&cpufreq_governor_mutex);
  569. return t;
  570. }
  571. static unsigned int cpufreq_parse_policy(char *str_governor)
  572. {
  573. if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
  574. return CPUFREQ_POLICY_PERFORMANCE;
  575. if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
  576. return CPUFREQ_POLICY_POWERSAVE;
  577. return CPUFREQ_POLICY_UNKNOWN;
  578. }
  579. /**
  580. * cpufreq_parse_governor - parse a governor string only for has_target()
  581. * @str_governor: Governor name.
  582. */
  583. static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
  584. {
  585. struct cpufreq_governor *t;
  586. t = get_governor(str_governor);
  587. if (t)
  588. return t;
  589. if (request_module("cpufreq_%s", str_governor))
  590. return NULL;
  591. return get_governor(str_governor);
  592. }
  593. /*
  594. * cpufreq_per_cpu_attr_read() / show_##file_name() -
  595. * print out cpufreq information
  596. *
  597. * Write out information from cpufreq_driver->policy[cpu]; object must be
  598. * "unsigned int".
  599. */
  600. #define show_one(file_name, object) \
  601. static ssize_t show_##file_name \
  602. (struct cpufreq_policy *policy, char *buf) \
  603. { \
  604. return sysfs_emit(buf, "%u\n", policy->object); \
  605. }
  606. show_one(cpuinfo_min_freq, cpuinfo.min_freq);
  607. show_one(cpuinfo_max_freq, cpuinfo.max_freq);
  608. show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
  609. show_one(scaling_min_freq, min);
  610. show_one(scaling_max_freq, max);
  611. __weak unsigned int arch_freq_get_on_cpu(int cpu)
  612. {
  613. return 0;
  614. }
  615. static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
  616. {
  617. ssize_t ret;
  618. unsigned int freq;
  619. freq = arch_freq_get_on_cpu(policy->cpu);
  620. if (freq)
  621. ret = sysfs_emit(buf, "%u\n", freq);
  622. else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
  623. ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
  624. else
  625. ret = sysfs_emit(buf, "%u\n", policy->cur);
  626. return ret;
  627. }
  628. /*
  629. * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
  630. */
  631. #define store_one(file_name, object) \
  632. static ssize_t store_##file_name \
  633. (struct cpufreq_policy *policy, const char *buf, size_t count) \
  634. { \
  635. unsigned long val; \
  636. int ret; \
  637. \
  638. ret = kstrtoul(buf, 0, &val); \
  639. if (ret) \
  640. return ret; \
  641. \
  642. ret = freq_qos_update_request(policy->object##_freq_req, val);\
  643. return ret >= 0 ? count : ret; \
  644. }
  645. store_one(scaling_min_freq, min);
  646. store_one(scaling_max_freq, max);
  647. /*
  648. * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
  649. */
  650. static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
  651. char *buf)
  652. {
  653. unsigned int cur_freq = __cpufreq_get(policy);
  654. if (cur_freq)
  655. return sysfs_emit(buf, "%u\n", cur_freq);
  656. return sysfs_emit(buf, "<unknown>\n");
  657. }
  658. /*
  659. * show_scaling_governor - show the current policy for the specified CPU
  660. */
  661. static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
  662. {
  663. if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
  664. return sysfs_emit(buf, "powersave\n");
  665. else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
  666. return sysfs_emit(buf, "performance\n");
  667. else if (policy->governor)
  668. return sysfs_emit(buf, "%s\n", policy->governor->name);
  669. return -EINVAL;
  670. }
  671. /*
  672. * store_scaling_governor - store policy for the specified CPU
  673. */
  674. static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
  675. const char *buf, size_t count)
  676. {
  677. char str_governor[16];
  678. int ret;
  679. ret = sscanf(buf, "%15s", str_governor);
  680. if (ret != 1)
  681. return -EINVAL;
  682. if (cpufreq_driver->setpolicy) {
  683. unsigned int new_pol;
  684. new_pol = cpufreq_parse_policy(str_governor);
  685. if (!new_pol)
  686. return -EINVAL;
  687. ret = cpufreq_set_policy(policy, NULL, new_pol);
  688. } else {
  689. struct cpufreq_governor *new_gov;
  690. new_gov = cpufreq_parse_governor(str_governor);
  691. if (!new_gov)
  692. return -EINVAL;
  693. ret = cpufreq_set_policy(policy, new_gov,
  694. CPUFREQ_POLICY_UNKNOWN);
  695. module_put(new_gov->owner);
  696. }
  697. return ret ? ret : count;
  698. }
  699. /*
  700. * show_scaling_driver - show the cpufreq driver currently loaded
  701. */
  702. static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
  703. {
  704. return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
  705. }
  706. /*
  707. * show_scaling_available_governors - show the available CPUfreq governors
  708. */
  709. static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
  710. char *buf)
  711. {
  712. ssize_t i = 0;
  713. struct cpufreq_governor *t;
  714. if (!has_target()) {
  715. i += sysfs_emit(buf, "performance powersave");
  716. goto out;
  717. }
  718. mutex_lock(&cpufreq_governor_mutex);
  719. for_each_governor(t) {
  720. if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
  721. - (CPUFREQ_NAME_LEN + 2)))
  722. break;
  723. i += sysfs_emit_at(buf, i, "%s ", t->name);
  724. }
  725. mutex_unlock(&cpufreq_governor_mutex);
  726. out:
  727. i += sysfs_emit_at(buf, i, "\n");
  728. return i;
  729. }
  730. ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
  731. {
  732. ssize_t i = 0;
  733. unsigned int cpu;
  734. for_each_cpu(cpu, mask) {
  735. i += sysfs_emit_at(buf, i, "%u ", cpu);
  736. if (i >= (PAGE_SIZE - 5))
  737. break;
  738. }
  739. /* Remove the extra space at the end */
  740. i--;
  741. i += sysfs_emit_at(buf, i, "\n");
  742. return i;
  743. }
  744. EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
  745. /*
  746. * show_related_cpus - show the CPUs affected by each transition even if
  747. * hw coordination is in use
  748. */
  749. static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
  750. {
  751. return cpufreq_show_cpus(policy->related_cpus, buf);
  752. }
  753. /*
  754. * show_affected_cpus - show the CPUs affected by each transition
  755. */
  756. static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
  757. {
  758. return cpufreq_show_cpus(policy->cpus, buf);
  759. }
  760. static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
  761. const char *buf, size_t count)
  762. {
  763. unsigned int freq = 0;
  764. unsigned int ret;
  765. if (!policy->governor || !policy->governor->store_setspeed)
  766. return -EINVAL;
  767. ret = sscanf(buf, "%u", &freq);
  768. if (ret != 1)
  769. return -EINVAL;
  770. policy->governor->store_setspeed(policy, freq);
  771. return count;
  772. }
  773. static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
  774. {
  775. if (!policy->governor || !policy->governor->show_setspeed)
  776. return sysfs_emit(buf, "<unsupported>\n");
  777. return policy->governor->show_setspeed(policy, buf);
  778. }
  779. /*
  780. * show_bios_limit - show the current cpufreq HW/BIOS limitation
  781. */
  782. static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
  783. {
  784. unsigned int limit;
  785. int ret;
  786. ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
  787. if (!ret)
  788. return sysfs_emit(buf, "%u\n", limit);
  789. return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq);
  790. }
  791. cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
  792. cpufreq_freq_attr_ro(cpuinfo_min_freq);
  793. cpufreq_freq_attr_ro(cpuinfo_max_freq);
  794. cpufreq_freq_attr_ro(cpuinfo_transition_latency);
  795. cpufreq_freq_attr_ro(scaling_available_governors);
  796. cpufreq_freq_attr_ro(scaling_driver);
  797. cpufreq_freq_attr_ro(scaling_cur_freq);
  798. cpufreq_freq_attr_ro(bios_limit);
  799. cpufreq_freq_attr_ro(related_cpus);
  800. cpufreq_freq_attr_ro(affected_cpus);
  801. cpufreq_freq_attr_rw(scaling_min_freq);
  802. cpufreq_freq_attr_rw(scaling_max_freq);
  803. cpufreq_freq_attr_rw(scaling_governor);
  804. cpufreq_freq_attr_rw(scaling_setspeed);
  805. static struct attribute *cpufreq_attrs[] = {
  806. &cpuinfo_min_freq.attr,
  807. &cpuinfo_max_freq.attr,
  808. &cpuinfo_transition_latency.attr,
  809. &scaling_min_freq.attr,
  810. &scaling_max_freq.attr,
  811. &affected_cpus.attr,
  812. &related_cpus.attr,
  813. &scaling_governor.attr,
  814. &scaling_driver.attr,
  815. &scaling_available_governors.attr,
  816. &scaling_setspeed.attr,
  817. NULL
  818. };
  819. ATTRIBUTE_GROUPS(cpufreq);
  820. #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
  821. #define to_attr(a) container_of(a, struct freq_attr, attr)
  822. static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
  823. {
  824. struct cpufreq_policy *policy = to_policy(kobj);
  825. struct freq_attr *fattr = to_attr(attr);
  826. ssize_t ret = -EBUSY;
  827. if (!fattr->show)
  828. return -EIO;
  829. down_read(&policy->rwsem);
  830. if (likely(!policy_is_inactive(policy)))
  831. ret = fattr->show(policy, buf);
  832. up_read(&policy->rwsem);
  833. return ret;
  834. }
  835. static ssize_t store(struct kobject *kobj, struct attribute *attr,
  836. const char *buf, size_t count)
  837. {
  838. struct cpufreq_policy *policy = to_policy(kobj);
  839. struct freq_attr *fattr = to_attr(attr);
  840. ssize_t ret = -EBUSY;
  841. if (!fattr->store)
  842. return -EIO;
  843. down_write(&policy->rwsem);
  844. if (likely(!policy_is_inactive(policy)))
  845. ret = fattr->store(policy, buf, count);
  846. up_write(&policy->rwsem);
  847. return ret;
  848. }
  849. static void cpufreq_sysfs_release(struct kobject *kobj)
  850. {
  851. struct cpufreq_policy *policy = to_policy(kobj);
  852. pr_debug("last reference is dropped\n");
  853. complete(&policy->kobj_unregister);
  854. }
  855. static const struct sysfs_ops sysfs_ops = {
  856. .show = show,
  857. .store = store,
  858. };
  859. static const struct kobj_type ktype_cpufreq = {
  860. .sysfs_ops = &sysfs_ops,
  861. .default_groups = cpufreq_groups,
  862. .release = cpufreq_sysfs_release,
  863. };
  864. static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
  865. struct device *dev)
  866. {
  867. if (unlikely(!dev))
  868. return;
  869. if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
  870. return;
  871. dev_dbg(dev, "%s: Adding symlink\n", __func__);
  872. if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
  873. dev_err(dev, "cpufreq symlink creation failed\n");
  874. }
  875. static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
  876. struct device *dev)
  877. {
  878. dev_dbg(dev, "%s: Removing symlink\n", __func__);
  879. sysfs_remove_link(&dev->kobj, "cpufreq");
  880. cpumask_clear_cpu(cpu, policy->real_cpus);
  881. }
  882. static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
  883. {
  884. struct freq_attr **drv_attr;
  885. int ret = 0;
  886. /* set up files for this cpu device */
  887. drv_attr = cpufreq_driver->attr;
  888. while (drv_attr && *drv_attr) {
  889. ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
  890. if (ret)
  891. return ret;
  892. drv_attr++;
  893. }
  894. if (cpufreq_driver->get) {
  895. ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
  896. if (ret)
  897. return ret;
  898. }
  899. ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
  900. if (ret)
  901. return ret;
  902. if (cpufreq_driver->bios_limit) {
  903. ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
  904. if (ret)
  905. return ret;
  906. }
  907. if (cpufreq_boost_supported()) {
  908. ret = sysfs_create_file(&policy->kobj, &local_boost.attr);
  909. if (ret)
  910. return ret;
  911. }
  912. return 0;
  913. }
  914. static int cpufreq_init_policy(struct cpufreq_policy *policy)
  915. {
  916. struct cpufreq_governor *gov = NULL;
  917. unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
  918. int ret;
  919. if (has_target()) {
  920. /* Update policy governor to the one used before hotplug. */
  921. gov = get_governor(policy->last_governor);
  922. if (gov) {
  923. pr_debug("Restoring governor %s for cpu %d\n",
  924. gov->name, policy->cpu);
  925. } else {
  926. gov = get_governor(default_governor);
  927. }
  928. if (!gov) {
  929. gov = cpufreq_default_governor();
  930. __module_get(gov->owner);
  931. }
  932. } else {
  933. /* Use the default policy if there is no last_policy. */
  934. if (policy->last_policy) {
  935. pol = policy->last_policy;
  936. } else {
  937. pol = cpufreq_parse_policy(default_governor);
  938. /*
  939. * In case the default governor is neither "performance"
  940. * nor "powersave", fall back to the initial policy
  941. * value set by the driver.
  942. */
  943. if (pol == CPUFREQ_POLICY_UNKNOWN)
  944. pol = policy->policy;
  945. }
  946. if (pol != CPUFREQ_POLICY_PERFORMANCE &&
  947. pol != CPUFREQ_POLICY_POWERSAVE)
  948. return -ENODATA;
  949. }
  950. ret = cpufreq_set_policy(policy, gov, pol);
  951. if (gov)
  952. module_put(gov->owner);
  953. return ret;
  954. }
  955. static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
  956. {
  957. int ret = 0;
  958. /* Has this CPU been taken care of already? */
  959. if (cpumask_test_cpu(cpu, policy->cpus))
  960. return 0;
  961. down_write(&policy->rwsem);
  962. if (has_target())
  963. cpufreq_stop_governor(policy);
  964. cpumask_set_cpu(cpu, policy->cpus);
  965. if (has_target()) {
  966. ret = cpufreq_start_governor(policy);
  967. if (ret)
  968. pr_err("%s: Failed to start governor\n", __func__);
  969. }
  970. up_write(&policy->rwsem);
  971. return ret;
  972. }
  973. void refresh_frequency_limits(struct cpufreq_policy *policy)
  974. {
  975. if (!policy_is_inactive(policy)) {
  976. pr_debug("updating policy for CPU %u\n", policy->cpu);
  977. cpufreq_set_policy(policy, policy->governor, policy->policy);
  978. }
  979. }
  980. EXPORT_SYMBOL(refresh_frequency_limits);
  981. static void handle_update(struct work_struct *work)
  982. {
  983. struct cpufreq_policy *policy =
  984. container_of(work, struct cpufreq_policy, update);
  985. pr_debug("handle_update for cpu %u called\n", policy->cpu);
  986. down_write(&policy->rwsem);
  987. refresh_frequency_limits(policy);
  988. up_write(&policy->rwsem);
  989. }
  990. static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
  991. void *data)
  992. {
  993. struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
  994. schedule_work(&policy->update);
  995. return 0;
  996. }
  997. static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
  998. void *data)
  999. {
  1000. struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
  1001. schedule_work(&policy->update);
  1002. return 0;
  1003. }
  1004. static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
  1005. {
  1006. struct kobject *kobj;
  1007. struct completion *cmp;
  1008. down_write(&policy->rwsem);
  1009. cpufreq_stats_free_table(policy);
  1010. kobj = &policy->kobj;
  1011. cmp = &policy->kobj_unregister;
  1012. up_write(&policy->rwsem);
  1013. kobject_put(kobj);
  1014. /*
  1015. * We need to make sure that the underlying kobj is
  1016. * actually not referenced anymore by anybody before we
  1017. * proceed with unloading.
  1018. */
  1019. pr_debug("waiting for dropping of refcount\n");
  1020. wait_for_completion(cmp);
  1021. pr_debug("wait complete\n");
  1022. }
  1023. static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
  1024. {
  1025. struct cpufreq_policy *policy;
  1026. struct device *dev = get_cpu_device(cpu);
  1027. int ret;
  1028. if (!dev)
  1029. return NULL;
  1030. policy = kzalloc(sizeof(*policy), GFP_KERNEL);
  1031. if (!policy)
  1032. return NULL;
  1033. if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
  1034. goto err_free_policy;
  1035. if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
  1036. goto err_free_cpumask;
  1037. if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
  1038. goto err_free_rcpumask;
  1039. init_completion(&policy->kobj_unregister);
  1040. ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
  1041. cpufreq_global_kobject, "policy%u", cpu);
  1042. if (ret) {
  1043. dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
  1044. /*
  1045. * The entire policy object will be freed below, but the extra
  1046. * memory allocated for the kobject name needs to be freed by
  1047. * releasing the kobject.
  1048. */
  1049. kobject_put(&policy->kobj);
  1050. goto err_free_real_cpus;
  1051. }
  1052. init_rwsem(&policy->rwsem);
  1053. freq_constraints_init(&policy->constraints);
  1054. policy->nb_min.notifier_call = cpufreq_notifier_min;
  1055. policy->nb_max.notifier_call = cpufreq_notifier_max;
  1056. ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
  1057. &policy->nb_min);
  1058. if (ret) {
  1059. dev_err(dev, "Failed to register MIN QoS notifier: %d (CPU%u)\n",
  1060. ret, cpu);
  1061. goto err_kobj_remove;
  1062. }
  1063. ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
  1064. &policy->nb_max);
  1065. if (ret) {
  1066. dev_err(dev, "Failed to register MAX QoS notifier: %d (CPU%u)\n",
  1067. ret, cpu);
  1068. goto err_min_qos_notifier;
  1069. }
  1070. INIT_LIST_HEAD(&policy->policy_list);
  1071. spin_lock_init(&policy->transition_lock);
  1072. init_waitqueue_head(&policy->transition_wait);
  1073. INIT_WORK(&policy->update, handle_update);
  1074. policy->cpu = cpu;
  1075. return policy;
  1076. err_min_qos_notifier:
  1077. freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
  1078. &policy->nb_min);
  1079. err_kobj_remove:
  1080. cpufreq_policy_put_kobj(policy);
  1081. err_free_real_cpus:
  1082. free_cpumask_var(policy->real_cpus);
  1083. err_free_rcpumask:
  1084. free_cpumask_var(policy->related_cpus);
  1085. err_free_cpumask:
  1086. free_cpumask_var(policy->cpus);
  1087. err_free_policy:
  1088. kfree(policy);
  1089. return NULL;
  1090. }
  1091. static void cpufreq_policy_free(struct cpufreq_policy *policy)
  1092. {
  1093. unsigned long flags;
  1094. int cpu;
  1095. /*
  1096. * The callers must ensure the policy is inactive by now, to avoid any
  1097. * races with show()/store() callbacks.
  1098. */
  1099. if (unlikely(!policy_is_inactive(policy)))
  1100. pr_warn("%s: Freeing active policy\n", __func__);
  1101. /* Remove policy from list */
  1102. write_lock_irqsave(&cpufreq_driver_lock, flags);
  1103. list_del(&policy->policy_list);
  1104. for_each_cpu(cpu, policy->related_cpus)
  1105. per_cpu(cpufreq_cpu_data, cpu) = NULL;
  1106. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1107. freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
  1108. &policy->nb_max);
  1109. freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
  1110. &policy->nb_min);
  1111. /* Cancel any pending policy->update work before freeing the policy. */
  1112. cancel_work_sync(&policy->update);
  1113. if (policy->max_freq_req) {
  1114. /*
  1115. * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
  1116. * notification, since CPUFREQ_CREATE_POLICY notification was
  1117. * sent after adding max_freq_req earlier.
  1118. */
  1119. blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
  1120. CPUFREQ_REMOVE_POLICY, policy);
  1121. freq_qos_remove_request(policy->max_freq_req);
  1122. }
  1123. freq_qos_remove_request(policy->min_freq_req);
  1124. kfree(policy->min_freq_req);
  1125. cpufreq_policy_put_kobj(policy);
  1126. free_cpumask_var(policy->real_cpus);
  1127. free_cpumask_var(policy->related_cpus);
  1128. free_cpumask_var(policy->cpus);
  1129. kfree(policy);
  1130. }
  1131. static int cpufreq_online(unsigned int cpu)
  1132. {
  1133. struct cpufreq_policy *policy;
  1134. bool new_policy;
  1135. unsigned long flags;
  1136. unsigned int j;
  1137. int ret;
  1138. pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
  1139. /* Check if this CPU already has a policy to manage it */
  1140. policy = per_cpu(cpufreq_cpu_data, cpu);
  1141. if (policy) {
  1142. WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
  1143. if (!policy_is_inactive(policy))
  1144. return cpufreq_add_policy_cpu(policy, cpu);
  1145. /* This is the only online CPU for the policy. Start over. */
  1146. new_policy = false;
  1147. down_write(&policy->rwsem);
  1148. policy->cpu = cpu;
  1149. policy->governor = NULL;
  1150. } else {
  1151. new_policy = true;
  1152. policy = cpufreq_policy_alloc(cpu);
  1153. if (!policy)
  1154. return -ENOMEM;
  1155. down_write(&policy->rwsem);
  1156. }
  1157. if (!new_policy && cpufreq_driver->online) {
  1158. /* Recover policy->cpus using related_cpus */
  1159. cpumask_copy(policy->cpus, policy->related_cpus);
  1160. ret = cpufreq_driver->online(policy);
  1161. if (ret) {
  1162. pr_debug("%s: %d: initialization failed\n", __func__,
  1163. __LINE__);
  1164. goto out_exit_policy;
  1165. }
  1166. } else {
  1167. cpumask_copy(policy->cpus, cpumask_of(cpu));
  1168. /*
  1169. * Call driver. From then on the cpufreq must be able
  1170. * to accept all calls to ->verify and ->setpolicy for this CPU.
  1171. */
  1172. ret = cpufreq_driver->init(policy);
  1173. if (ret) {
  1174. pr_debug("%s: %d: initialization failed\n", __func__,
  1175. __LINE__);
  1176. goto out_free_policy;
  1177. }
  1178. /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
  1179. if (cpufreq_boost_enabled() && policy_has_boost_freq(policy))
  1180. policy->boost_enabled = true;
  1181. /*
  1182. * The initialization has succeeded and the policy is online.
  1183. * If there is a problem with its frequency table, take it
  1184. * offline and drop it.
  1185. */
  1186. ret = cpufreq_table_validate_and_sort(policy);
  1187. if (ret)
  1188. goto out_offline_policy;
  1189. /* related_cpus should at least include policy->cpus. */
  1190. cpumask_copy(policy->related_cpus, policy->cpus);
  1191. }
  1192. /*
  1193. * affected cpus must always be the one, which are online. We aren't
  1194. * managing offline cpus here.
  1195. */
  1196. cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
  1197. if (new_policy) {
  1198. for_each_cpu(j, policy->related_cpus) {
  1199. per_cpu(cpufreq_cpu_data, j) = policy;
  1200. add_cpu_dev_symlink(policy, j, get_cpu_device(j));
  1201. }
  1202. policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
  1203. GFP_KERNEL);
  1204. if (!policy->min_freq_req) {
  1205. ret = -ENOMEM;
  1206. goto out_destroy_policy;
  1207. }
  1208. ret = freq_qos_add_request(&policy->constraints,
  1209. policy->min_freq_req, FREQ_QOS_MIN,
  1210. FREQ_QOS_MIN_DEFAULT_VALUE);
  1211. if (ret < 0) {
  1212. /*
  1213. * So we don't call freq_qos_remove_request() for an
  1214. * uninitialized request.
  1215. */
  1216. kfree(policy->min_freq_req);
  1217. policy->min_freq_req = NULL;
  1218. goto out_destroy_policy;
  1219. }
  1220. /*
  1221. * This must be initialized right here to avoid calling
  1222. * freq_qos_remove_request() on uninitialized request in case
  1223. * of errors.
  1224. */
  1225. policy->max_freq_req = policy->min_freq_req + 1;
  1226. ret = freq_qos_add_request(&policy->constraints,
  1227. policy->max_freq_req, FREQ_QOS_MAX,
  1228. FREQ_QOS_MAX_DEFAULT_VALUE);
  1229. if (ret < 0) {
  1230. policy->max_freq_req = NULL;
  1231. goto out_destroy_policy;
  1232. }
  1233. blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
  1234. CPUFREQ_CREATE_POLICY, policy);
  1235. }
  1236. if (cpufreq_driver->get && has_target()) {
  1237. policy->cur = cpufreq_driver->get(policy->cpu);
  1238. if (!policy->cur) {
  1239. ret = -EIO;
  1240. pr_err("%s: ->get() failed\n", __func__);
  1241. goto out_destroy_policy;
  1242. }
  1243. }
  1244. /*
  1245. * Sometimes boot loaders set CPU frequency to a value outside of
  1246. * frequency table present with cpufreq core. In such cases CPU might be
  1247. * unstable if it has to run on that frequency for long duration of time
  1248. * and so its better to set it to a frequency which is specified in
  1249. * freq-table. This also makes cpufreq stats inconsistent as
  1250. * cpufreq-stats would fail to register because current frequency of CPU
  1251. * isn't found in freq-table.
  1252. *
  1253. * Because we don't want this change to effect boot process badly, we go
  1254. * for the next freq which is >= policy->cur ('cur' must be set by now,
  1255. * otherwise we will end up setting freq to lowest of the table as 'cur'
  1256. * is initialized to zero).
  1257. *
  1258. * We are passing target-freq as "policy->cur - 1" otherwise
  1259. * __cpufreq_driver_target() would simply fail, as policy->cur will be
  1260. * equal to target-freq.
  1261. */
  1262. if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
  1263. && has_target()) {
  1264. unsigned int old_freq = policy->cur;
  1265. /* Are we running at unknown frequency ? */
  1266. ret = cpufreq_frequency_table_get_index(policy, old_freq);
  1267. if (ret == -EINVAL) {
  1268. ret = __cpufreq_driver_target(policy, old_freq - 1,
  1269. CPUFREQ_RELATION_L);
  1270. /*
  1271. * Reaching here after boot in a few seconds may not
  1272. * mean that system will remain stable at "unknown"
  1273. * frequency for longer duration. Hence, a BUG_ON().
  1274. */
  1275. BUG_ON(ret);
  1276. pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
  1277. __func__, policy->cpu, old_freq, policy->cur);
  1278. }
  1279. }
  1280. if (new_policy) {
  1281. ret = cpufreq_add_dev_interface(policy);
  1282. if (ret)
  1283. goto out_destroy_policy;
  1284. cpufreq_stats_create_table(policy);
  1285. write_lock_irqsave(&cpufreq_driver_lock, flags);
  1286. list_add(&policy->policy_list, &cpufreq_policy_list);
  1287. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1288. /*
  1289. * Register with the energy model before
  1290. * sugov_eas_rebuild_sd() is called, which will result
  1291. * in rebuilding of the sched domains, which should only be done
  1292. * once the energy model is properly initialized for the policy
  1293. * first.
  1294. *
  1295. * Also, this should be called before the policy is registered
  1296. * with cooling framework.
  1297. */
  1298. if (cpufreq_driver->register_em)
  1299. cpufreq_driver->register_em(policy);
  1300. }
  1301. ret = cpufreq_init_policy(policy);
  1302. if (ret) {
  1303. pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
  1304. __func__, cpu, ret);
  1305. goto out_destroy_policy;
  1306. }
  1307. up_write(&policy->rwsem);
  1308. kobject_uevent(&policy->kobj, KOBJ_ADD);
  1309. /* Callback for handling stuff after policy is ready */
  1310. if (cpufreq_driver->ready)
  1311. cpufreq_driver->ready(policy);
  1312. /* Register cpufreq cooling only for a new policy */
  1313. if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
  1314. policy->cdev = of_cpufreq_cooling_register(policy);
  1315. pr_debug("initialization complete\n");
  1316. return 0;
  1317. out_destroy_policy:
  1318. for_each_cpu(j, policy->real_cpus)
  1319. remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
  1320. out_offline_policy:
  1321. if (cpufreq_driver->offline)
  1322. cpufreq_driver->offline(policy);
  1323. out_exit_policy:
  1324. if (cpufreq_driver->exit)
  1325. cpufreq_driver->exit(policy);
  1326. out_free_policy:
  1327. cpumask_clear(policy->cpus);
  1328. up_write(&policy->rwsem);
  1329. cpufreq_policy_free(policy);
  1330. return ret;
  1331. }
  1332. /**
  1333. * cpufreq_add_dev - the cpufreq interface for a CPU device.
  1334. * @dev: CPU device.
  1335. * @sif: Subsystem interface structure pointer (not used)
  1336. */
  1337. static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
  1338. {
  1339. struct cpufreq_policy *policy;
  1340. unsigned cpu = dev->id;
  1341. int ret;
  1342. dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
  1343. if (cpu_online(cpu)) {
  1344. ret = cpufreq_online(cpu);
  1345. if (ret)
  1346. return ret;
  1347. }
  1348. /* Create sysfs link on CPU registration */
  1349. policy = per_cpu(cpufreq_cpu_data, cpu);
  1350. if (policy)
  1351. add_cpu_dev_symlink(policy, cpu, dev);
  1352. return 0;
  1353. }
  1354. static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
  1355. {
  1356. int ret;
  1357. if (has_target())
  1358. cpufreq_stop_governor(policy);
  1359. cpumask_clear_cpu(cpu, policy->cpus);
  1360. if (!policy_is_inactive(policy)) {
  1361. /* Nominate a new CPU if necessary. */
  1362. if (cpu == policy->cpu)
  1363. policy->cpu = cpumask_any(policy->cpus);
  1364. /* Start the governor again for the active policy. */
  1365. if (has_target()) {
  1366. ret = cpufreq_start_governor(policy);
  1367. if (ret)
  1368. pr_err("%s: Failed to start governor\n", __func__);
  1369. }
  1370. return;
  1371. }
  1372. if (has_target())
  1373. strscpy(policy->last_governor, policy->governor->name,
  1374. CPUFREQ_NAME_LEN);
  1375. else
  1376. policy->last_policy = policy->policy;
  1377. if (has_target())
  1378. cpufreq_exit_governor(policy);
  1379. /*
  1380. * Perform the ->offline() during light-weight tear-down, as
  1381. * that allows fast recovery when the CPU comes back.
  1382. */
  1383. if (cpufreq_driver->offline) {
  1384. cpufreq_driver->offline(policy);
  1385. return;
  1386. }
  1387. if (cpufreq_driver->exit)
  1388. cpufreq_driver->exit(policy);
  1389. policy->freq_table = NULL;
  1390. }
  1391. static int cpufreq_offline(unsigned int cpu)
  1392. {
  1393. struct cpufreq_policy *policy;
  1394. pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
  1395. policy = cpufreq_cpu_get_raw(cpu);
  1396. if (!policy) {
  1397. pr_debug("%s: No cpu_data found\n", __func__);
  1398. return 0;
  1399. }
  1400. down_write(&policy->rwsem);
  1401. __cpufreq_offline(cpu, policy);
  1402. up_write(&policy->rwsem);
  1403. return 0;
  1404. }
  1405. /*
  1406. * cpufreq_remove_dev - remove a CPU device
  1407. *
  1408. * Removes the cpufreq interface for a CPU device.
  1409. */
  1410. static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
  1411. {
  1412. unsigned int cpu = dev->id;
  1413. struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
  1414. if (!policy)
  1415. return;
  1416. down_write(&policy->rwsem);
  1417. if (cpu_online(cpu))
  1418. __cpufreq_offline(cpu, policy);
  1419. remove_cpu_dev_symlink(policy, cpu, dev);
  1420. if (!cpumask_empty(policy->real_cpus)) {
  1421. up_write(&policy->rwsem);
  1422. return;
  1423. }
  1424. /*
  1425. * Unregister cpufreq cooling once all the CPUs of the policy are
  1426. * removed.
  1427. */
  1428. if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
  1429. cpufreq_cooling_unregister(policy->cdev);
  1430. policy->cdev = NULL;
  1431. }
  1432. /* We did light-weight exit earlier, do full tear down now */
  1433. if (cpufreq_driver->offline && cpufreq_driver->exit)
  1434. cpufreq_driver->exit(policy);
  1435. up_write(&policy->rwsem);
  1436. cpufreq_policy_free(policy);
  1437. }
  1438. /**
  1439. * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
  1440. * @policy: Policy managing CPUs.
  1441. * @new_freq: New CPU frequency.
  1442. *
  1443. * Adjust to the current frequency first and clean up later by either calling
  1444. * cpufreq_update_policy(), or scheduling handle_update().
  1445. */
  1446. static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
  1447. unsigned int new_freq)
  1448. {
  1449. struct cpufreq_freqs freqs;
  1450. pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
  1451. policy->cur, new_freq);
  1452. freqs.old = policy->cur;
  1453. freqs.new = new_freq;
  1454. cpufreq_freq_transition_begin(policy, &freqs);
  1455. cpufreq_freq_transition_end(policy, &freqs, 0);
  1456. }
  1457. static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
  1458. {
  1459. unsigned int new_freq;
  1460. new_freq = cpufreq_driver->get(policy->cpu);
  1461. if (!new_freq)
  1462. return 0;
  1463. /*
  1464. * If fast frequency switching is used with the given policy, the check
  1465. * against policy->cur is pointless, so skip it in that case.
  1466. */
  1467. if (policy->fast_switch_enabled || !has_target())
  1468. return new_freq;
  1469. if (policy->cur != new_freq) {
  1470. /*
  1471. * For some platforms, the frequency returned by hardware may be
  1472. * slightly different from what is provided in the frequency
  1473. * table, for example hardware may return 499 MHz instead of 500
  1474. * MHz. In such cases it is better to avoid getting into
  1475. * unnecessary frequency updates.
  1476. */
  1477. if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
  1478. return policy->cur;
  1479. cpufreq_out_of_sync(policy, new_freq);
  1480. if (update)
  1481. schedule_work(&policy->update);
  1482. }
  1483. return new_freq;
  1484. }
  1485. /**
  1486. * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
  1487. * @cpu: CPU number
  1488. *
  1489. * This is the last known freq, without actually getting it from the driver.
  1490. * Return value will be same as what is shown in scaling_cur_freq in sysfs.
  1491. */
  1492. unsigned int cpufreq_quick_get(unsigned int cpu)
  1493. {
  1494. struct cpufreq_policy *policy;
  1495. unsigned int ret_freq = 0;
  1496. unsigned long flags;
  1497. read_lock_irqsave(&cpufreq_driver_lock, flags);
  1498. if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
  1499. ret_freq = cpufreq_driver->get(cpu);
  1500. read_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1501. return ret_freq;
  1502. }
  1503. read_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1504. policy = cpufreq_cpu_get(cpu);
  1505. if (policy) {
  1506. ret_freq = policy->cur;
  1507. cpufreq_cpu_put(policy);
  1508. }
  1509. return ret_freq;
  1510. }
  1511. EXPORT_SYMBOL(cpufreq_quick_get);
  1512. /**
  1513. * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
  1514. * @cpu: CPU number
  1515. *
  1516. * Just return the max possible frequency for a given CPU.
  1517. */
  1518. unsigned int cpufreq_quick_get_max(unsigned int cpu)
  1519. {
  1520. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  1521. unsigned int ret_freq = 0;
  1522. if (policy) {
  1523. ret_freq = policy->max;
  1524. cpufreq_cpu_put(policy);
  1525. }
  1526. return ret_freq;
  1527. }
  1528. EXPORT_SYMBOL(cpufreq_quick_get_max);
  1529. /**
  1530. * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
  1531. * @cpu: CPU number
  1532. *
  1533. * The default return value is the max_freq field of cpuinfo.
  1534. */
  1535. __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
  1536. {
  1537. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  1538. unsigned int ret_freq = 0;
  1539. if (policy) {
  1540. ret_freq = policy->cpuinfo.max_freq;
  1541. cpufreq_cpu_put(policy);
  1542. }
  1543. return ret_freq;
  1544. }
  1545. EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
  1546. static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
  1547. {
  1548. if (unlikely(policy_is_inactive(policy)))
  1549. return 0;
  1550. return cpufreq_verify_current_freq(policy, true);
  1551. }
  1552. /**
  1553. * cpufreq_get - get the current CPU frequency (in kHz)
  1554. * @cpu: CPU number
  1555. *
  1556. * Get the CPU current (static) CPU frequency
  1557. */
  1558. unsigned int cpufreq_get(unsigned int cpu)
  1559. {
  1560. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  1561. unsigned int ret_freq = 0;
  1562. if (policy) {
  1563. down_read(&policy->rwsem);
  1564. if (cpufreq_driver->get)
  1565. ret_freq = __cpufreq_get(policy);
  1566. up_read(&policy->rwsem);
  1567. cpufreq_cpu_put(policy);
  1568. }
  1569. return ret_freq;
  1570. }
  1571. EXPORT_SYMBOL(cpufreq_get);
  1572. static struct subsys_interface cpufreq_interface = {
  1573. .name = "cpufreq",
  1574. .subsys = &cpu_subsys,
  1575. .add_dev = cpufreq_add_dev,
  1576. .remove_dev = cpufreq_remove_dev,
  1577. };
  1578. /*
  1579. * In case platform wants some specific frequency to be configured
  1580. * during suspend..
  1581. */
  1582. int cpufreq_generic_suspend(struct cpufreq_policy *policy)
  1583. {
  1584. int ret;
  1585. if (!policy->suspend_freq) {
  1586. pr_debug("%s: suspend_freq not defined\n", __func__);
  1587. return 0;
  1588. }
  1589. pr_debug("%s: Setting suspend-freq: %u\n", __func__,
  1590. policy->suspend_freq);
  1591. ret = __cpufreq_driver_target(policy, policy->suspend_freq,
  1592. CPUFREQ_RELATION_H);
  1593. if (ret)
  1594. pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
  1595. __func__, policy->suspend_freq, ret);
  1596. return ret;
  1597. }
  1598. EXPORT_SYMBOL(cpufreq_generic_suspend);
  1599. /**
  1600. * cpufreq_suspend() - Suspend CPUFreq governors.
  1601. *
  1602. * Called during system wide Suspend/Hibernate cycles for suspending governors
  1603. * as some platforms can't change frequency after this point in suspend cycle.
  1604. * Because some of the devices (like: i2c, regulators, etc) they use for
  1605. * changing frequency are suspended quickly after this point.
  1606. */
  1607. void cpufreq_suspend(void)
  1608. {
  1609. struct cpufreq_policy *policy;
  1610. if (!cpufreq_driver)
  1611. return;
  1612. if (!has_target() && !cpufreq_driver->suspend)
  1613. goto suspend;
  1614. pr_debug("%s: Suspending Governors\n", __func__);
  1615. for_each_active_policy(policy) {
  1616. if (has_target()) {
  1617. down_write(&policy->rwsem);
  1618. cpufreq_stop_governor(policy);
  1619. up_write(&policy->rwsem);
  1620. }
  1621. if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
  1622. pr_err("%s: Failed to suspend driver: %s\n", __func__,
  1623. cpufreq_driver->name);
  1624. }
  1625. suspend:
  1626. cpufreq_suspended = true;
  1627. }
  1628. /**
  1629. * cpufreq_resume() - Resume CPUFreq governors.
  1630. *
  1631. * Called during system wide Suspend/Hibernate cycle for resuming governors that
  1632. * are suspended with cpufreq_suspend().
  1633. */
  1634. void cpufreq_resume(void)
  1635. {
  1636. struct cpufreq_policy *policy;
  1637. int ret;
  1638. if (!cpufreq_driver)
  1639. return;
  1640. if (unlikely(!cpufreq_suspended))
  1641. return;
  1642. cpufreq_suspended = false;
  1643. if (!has_target() && !cpufreq_driver->resume)
  1644. return;
  1645. pr_debug("%s: Resuming Governors\n", __func__);
  1646. for_each_active_policy(policy) {
  1647. if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
  1648. pr_err("%s: Failed to resume driver: %s\n", __func__,
  1649. cpufreq_driver->name);
  1650. } else if (has_target()) {
  1651. down_write(&policy->rwsem);
  1652. ret = cpufreq_start_governor(policy);
  1653. up_write(&policy->rwsem);
  1654. if (ret)
  1655. pr_err("%s: Failed to start governor for CPU%u's policy\n",
  1656. __func__, policy->cpu);
  1657. }
  1658. }
  1659. }
  1660. /**
  1661. * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
  1662. * @flags: Flags to test against the current cpufreq driver's flags.
  1663. *
  1664. * Assumes that the driver is there, so callers must ensure that this is the
  1665. * case.
  1666. */
  1667. bool cpufreq_driver_test_flags(u16 flags)
  1668. {
  1669. return !!(cpufreq_driver->flags & flags);
  1670. }
  1671. /**
  1672. * cpufreq_get_current_driver - Return the current driver's name.
  1673. *
  1674. * Return the name string of the currently registered cpufreq driver or NULL if
  1675. * none.
  1676. */
  1677. const char *cpufreq_get_current_driver(void)
  1678. {
  1679. if (cpufreq_driver)
  1680. return cpufreq_driver->name;
  1681. return NULL;
  1682. }
  1683. EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
  1684. /**
  1685. * cpufreq_get_driver_data - Return current driver data.
  1686. *
  1687. * Return the private data of the currently registered cpufreq driver, or NULL
  1688. * if no cpufreq driver has been registered.
  1689. */
  1690. void *cpufreq_get_driver_data(void)
  1691. {
  1692. if (cpufreq_driver)
  1693. return cpufreq_driver->driver_data;
  1694. return NULL;
  1695. }
  1696. EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
  1697. /*********************************************************************
  1698. * NOTIFIER LISTS INTERFACE *
  1699. *********************************************************************/
  1700. /**
  1701. * cpufreq_register_notifier - Register a notifier with cpufreq.
  1702. * @nb: notifier function to register.
  1703. * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
  1704. *
  1705. * Add a notifier to one of two lists: either a list of notifiers that run on
  1706. * clock rate changes (once before and once after every transition), or a list
  1707. * of notifiers that ron on cpufreq policy changes.
  1708. *
  1709. * This function may sleep and it has the same return values as
  1710. * blocking_notifier_chain_register().
  1711. */
  1712. int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
  1713. {
  1714. int ret;
  1715. if (cpufreq_disabled())
  1716. return -EINVAL;
  1717. switch (list) {
  1718. case CPUFREQ_TRANSITION_NOTIFIER:
  1719. mutex_lock(&cpufreq_fast_switch_lock);
  1720. if (cpufreq_fast_switch_count > 0) {
  1721. mutex_unlock(&cpufreq_fast_switch_lock);
  1722. return -EBUSY;
  1723. }
  1724. ret = srcu_notifier_chain_register(
  1725. &cpufreq_transition_notifier_list, nb);
  1726. if (!ret)
  1727. cpufreq_fast_switch_count--;
  1728. mutex_unlock(&cpufreq_fast_switch_lock);
  1729. break;
  1730. case CPUFREQ_POLICY_NOTIFIER:
  1731. ret = blocking_notifier_chain_register(
  1732. &cpufreq_policy_notifier_list, nb);
  1733. break;
  1734. default:
  1735. ret = -EINVAL;
  1736. }
  1737. return ret;
  1738. }
  1739. EXPORT_SYMBOL(cpufreq_register_notifier);
  1740. /**
  1741. * cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
  1742. * @nb: notifier block to be unregistered.
  1743. * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
  1744. *
  1745. * Remove a notifier from one of the cpufreq notifier lists.
  1746. *
  1747. * This function may sleep and it has the same return values as
  1748. * blocking_notifier_chain_unregister().
  1749. */
  1750. int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
  1751. {
  1752. int ret;
  1753. if (cpufreq_disabled())
  1754. return -EINVAL;
  1755. switch (list) {
  1756. case CPUFREQ_TRANSITION_NOTIFIER:
  1757. mutex_lock(&cpufreq_fast_switch_lock);
  1758. ret = srcu_notifier_chain_unregister(
  1759. &cpufreq_transition_notifier_list, nb);
  1760. if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
  1761. cpufreq_fast_switch_count++;
  1762. mutex_unlock(&cpufreq_fast_switch_lock);
  1763. break;
  1764. case CPUFREQ_POLICY_NOTIFIER:
  1765. ret = blocking_notifier_chain_unregister(
  1766. &cpufreq_policy_notifier_list, nb);
  1767. break;
  1768. default:
  1769. ret = -EINVAL;
  1770. }
  1771. return ret;
  1772. }
  1773. EXPORT_SYMBOL(cpufreq_unregister_notifier);
  1774. /*********************************************************************
  1775. * GOVERNORS *
  1776. *********************************************************************/
  1777. /**
  1778. * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
  1779. * @policy: cpufreq policy to switch the frequency for.
  1780. * @target_freq: New frequency to set (may be approximate).
  1781. *
  1782. * Carry out a fast frequency switch without sleeping.
  1783. *
  1784. * The driver's ->fast_switch() callback invoked by this function must be
  1785. * suitable for being called from within RCU-sched read-side critical sections
  1786. * and it is expected to select the minimum available frequency greater than or
  1787. * equal to @target_freq (CPUFREQ_RELATION_L).
  1788. *
  1789. * This function must not be called if policy->fast_switch_enabled is unset.
  1790. *
  1791. * Governors calling this function must guarantee that it will never be invoked
  1792. * twice in parallel for the same policy and that it will never be called in
  1793. * parallel with either ->target() or ->target_index() for the same policy.
  1794. *
  1795. * Returns the actual frequency set for the CPU.
  1796. *
  1797. * If 0 is returned by the driver's ->fast_switch() callback to indicate an
  1798. * error condition, the hardware configuration must be preserved.
  1799. */
  1800. unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
  1801. unsigned int target_freq)
  1802. {
  1803. unsigned int freq;
  1804. int cpu;
  1805. target_freq = clamp_val(target_freq, policy->min, policy->max);
  1806. freq = cpufreq_driver->fast_switch(policy, target_freq);
  1807. if (!freq)
  1808. return 0;
  1809. policy->cur = freq;
  1810. arch_set_freq_scale(policy->related_cpus, freq,
  1811. arch_scale_freq_ref(policy->cpu));
  1812. cpufreq_stats_record_transition(policy, freq);
  1813. if (trace_cpu_frequency_enabled()) {
  1814. for_each_cpu(cpu, policy->cpus)
  1815. trace_cpu_frequency(freq, cpu);
  1816. }
  1817. return freq;
  1818. }
  1819. EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
  1820. /**
  1821. * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
  1822. * @cpu: Target CPU.
  1823. * @min_perf: Minimum (required) performance level (units of @capacity).
  1824. * @target_perf: Target (desired) performance level (units of @capacity).
  1825. * @capacity: Capacity of the target CPU.
  1826. *
  1827. * Carry out a fast performance level switch of @cpu without sleeping.
  1828. *
  1829. * The driver's ->adjust_perf() callback invoked by this function must be
  1830. * suitable for being called from within RCU-sched read-side critical sections
  1831. * and it is expected to select a suitable performance level equal to or above
  1832. * @min_perf and preferably equal to or below @target_perf.
  1833. *
  1834. * This function must not be called if policy->fast_switch_enabled is unset.
  1835. *
  1836. * Governors calling this function must guarantee that it will never be invoked
  1837. * twice in parallel for the same CPU and that it will never be called in
  1838. * parallel with either ->target() or ->target_index() or ->fast_switch() for
  1839. * the same CPU.
  1840. */
  1841. void cpufreq_driver_adjust_perf(unsigned int cpu,
  1842. unsigned long min_perf,
  1843. unsigned long target_perf,
  1844. unsigned long capacity)
  1845. {
  1846. cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
  1847. }
  1848. /**
  1849. * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
  1850. *
  1851. * Return 'true' if the ->adjust_perf callback is present for the
  1852. * current driver or 'false' otherwise.
  1853. */
  1854. bool cpufreq_driver_has_adjust_perf(void)
  1855. {
  1856. return !!cpufreq_driver->adjust_perf;
  1857. }
  1858. /* Must set freqs->new to intermediate frequency */
  1859. static int __target_intermediate(struct cpufreq_policy *policy,
  1860. struct cpufreq_freqs *freqs, int index)
  1861. {
  1862. int ret;
  1863. freqs->new = cpufreq_driver->get_intermediate(policy, index);
  1864. /* We don't need to switch to intermediate freq */
  1865. if (!freqs->new)
  1866. return 0;
  1867. pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
  1868. __func__, policy->cpu, freqs->old, freqs->new);
  1869. cpufreq_freq_transition_begin(policy, freqs);
  1870. ret = cpufreq_driver->target_intermediate(policy, index);
  1871. cpufreq_freq_transition_end(policy, freqs, ret);
  1872. if (ret)
  1873. pr_err("%s: Failed to change to intermediate frequency: %d\n",
  1874. __func__, ret);
  1875. return ret;
  1876. }
  1877. static int __target_index(struct cpufreq_policy *policy, int index)
  1878. {
  1879. struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
  1880. unsigned int restore_freq, intermediate_freq = 0;
  1881. unsigned int newfreq = policy->freq_table[index].frequency;
  1882. int retval = -EINVAL;
  1883. bool notify;
  1884. if (newfreq == policy->cur)
  1885. return 0;
  1886. /* Save last value to restore later on errors */
  1887. restore_freq = policy->cur;
  1888. notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
  1889. if (notify) {
  1890. /* Handle switching to intermediate frequency */
  1891. if (cpufreq_driver->get_intermediate) {
  1892. retval = __target_intermediate(policy, &freqs, index);
  1893. if (retval)
  1894. return retval;
  1895. intermediate_freq = freqs.new;
  1896. /* Set old freq to intermediate */
  1897. if (intermediate_freq)
  1898. freqs.old = freqs.new;
  1899. }
  1900. freqs.new = newfreq;
  1901. pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
  1902. __func__, policy->cpu, freqs.old, freqs.new);
  1903. cpufreq_freq_transition_begin(policy, &freqs);
  1904. }
  1905. retval = cpufreq_driver->target_index(policy, index);
  1906. if (retval)
  1907. pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
  1908. retval);
  1909. if (notify) {
  1910. cpufreq_freq_transition_end(policy, &freqs, retval);
  1911. /*
  1912. * Failed after setting to intermediate freq? Driver should have
  1913. * reverted back to initial frequency and so should we. Check
  1914. * here for intermediate_freq instead of get_intermediate, in
  1915. * case we haven't switched to intermediate freq at all.
  1916. */
  1917. if (unlikely(retval && intermediate_freq)) {
  1918. freqs.old = intermediate_freq;
  1919. freqs.new = restore_freq;
  1920. cpufreq_freq_transition_begin(policy, &freqs);
  1921. cpufreq_freq_transition_end(policy, &freqs, 0);
  1922. }
  1923. }
  1924. return retval;
  1925. }
  1926. int __cpufreq_driver_target(struct cpufreq_policy *policy,
  1927. unsigned int target_freq,
  1928. unsigned int relation)
  1929. {
  1930. unsigned int old_target_freq = target_freq;
  1931. if (cpufreq_disabled())
  1932. return -ENODEV;
  1933. target_freq = __resolve_freq(policy, target_freq, policy->min,
  1934. policy->max, relation);
  1935. pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
  1936. policy->cpu, target_freq, relation, old_target_freq);
  1937. /*
  1938. * This might look like a redundant call as we are checking it again
  1939. * after finding index. But it is left intentionally for cases where
  1940. * exactly same freq is called again and so we can save on few function
  1941. * calls.
  1942. */
  1943. if (target_freq == policy->cur &&
  1944. !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
  1945. return 0;
  1946. if (cpufreq_driver->target) {
  1947. /*
  1948. * If the driver hasn't setup a single inefficient frequency,
  1949. * it's unlikely it knows how to decode CPUFREQ_RELATION_E.
  1950. */
  1951. if (!policy->efficiencies_available)
  1952. relation &= ~CPUFREQ_RELATION_E;
  1953. return cpufreq_driver->target(policy, target_freq, relation);
  1954. }
  1955. if (!cpufreq_driver->target_index)
  1956. return -EINVAL;
  1957. return __target_index(policy, policy->cached_resolved_idx);
  1958. }
  1959. EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
  1960. int cpufreq_driver_target(struct cpufreq_policy *policy,
  1961. unsigned int target_freq,
  1962. unsigned int relation)
  1963. {
  1964. int ret;
  1965. down_write(&policy->rwsem);
  1966. ret = __cpufreq_driver_target(policy, target_freq, relation);
  1967. up_write(&policy->rwsem);
  1968. return ret;
  1969. }
  1970. EXPORT_SYMBOL_GPL(cpufreq_driver_target);
  1971. __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
  1972. {
  1973. return NULL;
  1974. }
  1975. static int cpufreq_init_governor(struct cpufreq_policy *policy)
  1976. {
  1977. int ret;
  1978. /* Don't start any governor operations if we are entering suspend */
  1979. if (cpufreq_suspended)
  1980. return 0;
  1981. /*
  1982. * Governor might not be initiated here if ACPI _PPC changed
  1983. * notification happened, so check it.
  1984. */
  1985. if (!policy->governor)
  1986. return -EINVAL;
  1987. /* Platform doesn't want dynamic frequency switching ? */
  1988. if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
  1989. cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
  1990. struct cpufreq_governor *gov = cpufreq_fallback_governor();
  1991. if (gov) {
  1992. pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
  1993. policy->governor->name, gov->name);
  1994. policy->governor = gov;
  1995. } else {
  1996. return -EINVAL;
  1997. }
  1998. }
  1999. if (!try_module_get(policy->governor->owner))
  2000. return -EINVAL;
  2001. pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
  2002. if (policy->governor->init) {
  2003. ret = policy->governor->init(policy);
  2004. if (ret) {
  2005. module_put(policy->governor->owner);
  2006. return ret;
  2007. }
  2008. }
  2009. policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
  2010. return 0;
  2011. }
  2012. static void cpufreq_exit_governor(struct cpufreq_policy *policy)
  2013. {
  2014. if (cpufreq_suspended || !policy->governor)
  2015. return;
  2016. pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
  2017. if (policy->governor->exit)
  2018. policy->governor->exit(policy);
  2019. module_put(policy->governor->owner);
  2020. }
  2021. int cpufreq_start_governor(struct cpufreq_policy *policy)
  2022. {
  2023. int ret;
  2024. if (cpufreq_suspended)
  2025. return 0;
  2026. if (!policy->governor)
  2027. return -EINVAL;
  2028. pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
  2029. if (cpufreq_driver->get)
  2030. cpufreq_verify_current_freq(policy, false);
  2031. if (policy->governor->start) {
  2032. ret = policy->governor->start(policy);
  2033. if (ret)
  2034. return ret;
  2035. }
  2036. if (policy->governor->limits)
  2037. policy->governor->limits(policy);
  2038. return 0;
  2039. }
  2040. void cpufreq_stop_governor(struct cpufreq_policy *policy)
  2041. {
  2042. if (cpufreq_suspended || !policy->governor)
  2043. return;
  2044. pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
  2045. if (policy->governor->stop)
  2046. policy->governor->stop(policy);
  2047. }
  2048. static void cpufreq_governor_limits(struct cpufreq_policy *policy)
  2049. {
  2050. if (cpufreq_suspended || !policy->governor)
  2051. return;
  2052. pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
  2053. if (policy->governor->limits)
  2054. policy->governor->limits(policy);
  2055. }
  2056. int cpufreq_register_governor(struct cpufreq_governor *governor)
  2057. {
  2058. int err;
  2059. if (!governor)
  2060. return -EINVAL;
  2061. if (cpufreq_disabled())
  2062. return -ENODEV;
  2063. mutex_lock(&cpufreq_governor_mutex);
  2064. err = -EBUSY;
  2065. if (!find_governor(governor->name)) {
  2066. err = 0;
  2067. list_add(&governor->governor_list, &cpufreq_governor_list);
  2068. }
  2069. mutex_unlock(&cpufreq_governor_mutex);
  2070. return err;
  2071. }
  2072. EXPORT_SYMBOL_GPL(cpufreq_register_governor);
  2073. void cpufreq_unregister_governor(struct cpufreq_governor *governor)
  2074. {
  2075. struct cpufreq_policy *policy;
  2076. unsigned long flags;
  2077. if (!governor)
  2078. return;
  2079. if (cpufreq_disabled())
  2080. return;
  2081. /* clear last_governor for all inactive policies */
  2082. read_lock_irqsave(&cpufreq_driver_lock, flags);
  2083. for_each_inactive_policy(policy) {
  2084. if (!strcmp(policy->last_governor, governor->name)) {
  2085. policy->governor = NULL;
  2086. strcpy(policy->last_governor, "\0");
  2087. }
  2088. }
  2089. read_unlock_irqrestore(&cpufreq_driver_lock, flags);
  2090. mutex_lock(&cpufreq_governor_mutex);
  2091. list_del(&governor->governor_list);
  2092. mutex_unlock(&cpufreq_governor_mutex);
  2093. }
  2094. EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
  2095. /*********************************************************************
  2096. * POLICY INTERFACE *
  2097. *********************************************************************/
  2098. /**
  2099. * cpufreq_get_policy - get the current cpufreq_policy
  2100. * @policy: struct cpufreq_policy into which the current cpufreq_policy
  2101. * is written
  2102. * @cpu: CPU to find the policy for
  2103. *
  2104. * Reads the current cpufreq policy.
  2105. */
  2106. int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
  2107. {
  2108. struct cpufreq_policy *cpu_policy;
  2109. if (!policy)
  2110. return -EINVAL;
  2111. cpu_policy = cpufreq_cpu_get(cpu);
  2112. if (!cpu_policy)
  2113. return -EINVAL;
  2114. memcpy(policy, cpu_policy, sizeof(*policy));
  2115. cpufreq_cpu_put(cpu_policy);
  2116. return 0;
  2117. }
  2118. EXPORT_SYMBOL(cpufreq_get_policy);
  2119. DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
  2120. /**
  2121. * cpufreq_update_pressure() - Update cpufreq pressure for CPUs
  2122. * @policy: cpufreq policy of the CPUs.
  2123. *
  2124. * Update the value of cpufreq pressure for all @cpus in the policy.
  2125. */
  2126. static void cpufreq_update_pressure(struct cpufreq_policy *policy)
  2127. {
  2128. unsigned long max_capacity, capped_freq, pressure;
  2129. u32 max_freq;
  2130. int cpu;
  2131. cpu = cpumask_first(policy->related_cpus);
  2132. max_freq = arch_scale_freq_ref(cpu);
  2133. capped_freq = policy->max;
  2134. /*
  2135. * Handle properly the boost frequencies, which should simply clean
  2136. * the cpufreq pressure value.
  2137. */
  2138. if (max_freq <= capped_freq) {
  2139. pressure = 0;
  2140. } else {
  2141. max_capacity = arch_scale_cpu_capacity(cpu);
  2142. pressure = max_capacity -
  2143. mult_frac(max_capacity, capped_freq, max_freq);
  2144. }
  2145. for_each_cpu(cpu, policy->related_cpus)
  2146. WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
  2147. }
  2148. /**
  2149. * cpufreq_set_policy - Modify cpufreq policy parameters.
  2150. * @policy: Policy object to modify.
  2151. * @new_gov: Policy governor pointer.
  2152. * @new_pol: Policy value (for drivers with built-in governors).
  2153. *
  2154. * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
  2155. * limits to be set for the policy, update @policy with the verified limits
  2156. * values and either invoke the driver's ->setpolicy() callback (if present) or
  2157. * carry out a governor update for @policy. That is, run the current governor's
  2158. * ->limits() callback (if @new_gov points to the same object as the one in
  2159. * @policy) or replace the governor for @policy with @new_gov.
  2160. *
  2161. * The cpuinfo part of @policy is not updated by this function.
  2162. */
  2163. static int cpufreq_set_policy(struct cpufreq_policy *policy,
  2164. struct cpufreq_governor *new_gov,
  2165. unsigned int new_pol)
  2166. {
  2167. struct cpufreq_policy_data new_data;
  2168. struct cpufreq_governor *old_gov;
  2169. int ret;
  2170. memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
  2171. new_data.freq_table = policy->freq_table;
  2172. new_data.cpu = policy->cpu;
  2173. /*
  2174. * PM QoS framework collects all the requests from users and provide us
  2175. * the final aggregated value here.
  2176. */
  2177. new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
  2178. new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
  2179. pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
  2180. new_data.cpu, new_data.min, new_data.max);
  2181. /*
  2182. * Verify that the CPU speed can be set within these limits and make sure
  2183. * that min <= max.
  2184. */
  2185. ret = cpufreq_driver->verify(&new_data);
  2186. if (ret)
  2187. return ret;
  2188. /*
  2189. * Resolve policy min/max to available frequencies. It ensures
  2190. * no frequency resolution will neither overshoot the requested maximum
  2191. * nor undershoot the requested minimum.
  2192. *
  2193. * Avoid storing intermediate values in policy->max or policy->min and
  2194. * compiler optimizations around them because they may be accessed
  2195. * concurrently by cpufreq_driver_resolve_freq() during the update.
  2196. */
  2197. WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
  2198. new_data.min, new_data.max,
  2199. CPUFREQ_RELATION_H));
  2200. new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
  2201. new_data.max, CPUFREQ_RELATION_L);
  2202. WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
  2203. trace_cpu_frequency_limits(policy);
  2204. cpufreq_update_pressure(policy);
  2205. policy->cached_target_freq = UINT_MAX;
  2206. pr_debug("new min and max freqs are %u - %u kHz\n",
  2207. policy->min, policy->max);
  2208. if (cpufreq_driver->setpolicy) {
  2209. policy->policy = new_pol;
  2210. pr_debug("setting range\n");
  2211. return cpufreq_driver->setpolicy(policy);
  2212. }
  2213. if (new_gov == policy->governor) {
  2214. pr_debug("governor limits update\n");
  2215. cpufreq_governor_limits(policy);
  2216. return 0;
  2217. }
  2218. pr_debug("governor switch\n");
  2219. /* save old, working values */
  2220. old_gov = policy->governor;
  2221. /* end old governor */
  2222. if (old_gov) {
  2223. cpufreq_stop_governor(policy);
  2224. cpufreq_exit_governor(policy);
  2225. }
  2226. /* start new governor */
  2227. policy->governor = new_gov;
  2228. ret = cpufreq_init_governor(policy);
  2229. if (!ret) {
  2230. ret = cpufreq_start_governor(policy);
  2231. if (!ret) {
  2232. pr_debug("governor change\n");
  2233. return 0;
  2234. }
  2235. cpufreq_exit_governor(policy);
  2236. }
  2237. /* new governor failed, so re-start old one */
  2238. pr_debug("starting governor %s failed\n", policy->governor->name);
  2239. if (old_gov) {
  2240. policy->governor = old_gov;
  2241. if (cpufreq_init_governor(policy)) {
  2242. policy->governor = NULL;
  2243. } else if (cpufreq_start_governor(policy)) {
  2244. cpufreq_exit_governor(policy);
  2245. policy->governor = NULL;
  2246. }
  2247. }
  2248. return ret;
  2249. }
  2250. /**
  2251. * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
  2252. * @cpu: CPU to re-evaluate the policy for.
  2253. *
  2254. * Update the current frequency for the cpufreq policy of @cpu and use
  2255. * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
  2256. * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
  2257. * for the policy in question, among other things.
  2258. */
  2259. void cpufreq_update_policy(unsigned int cpu)
  2260. {
  2261. struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
  2262. if (!policy)
  2263. return;
  2264. /*
  2265. * BIOS might change freq behind our back
  2266. * -> ask driver for current freq and notify governors about a change
  2267. */
  2268. if (cpufreq_driver->get && has_target() &&
  2269. (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
  2270. goto unlock;
  2271. refresh_frequency_limits(policy);
  2272. unlock:
  2273. cpufreq_cpu_release(policy);
  2274. }
  2275. EXPORT_SYMBOL(cpufreq_update_policy);
  2276. /**
  2277. * cpufreq_update_limits - Update policy limits for a given CPU.
  2278. * @cpu: CPU to update the policy limits for.
  2279. *
  2280. * Invoke the driver's ->update_limits callback if present or call
  2281. * cpufreq_update_policy() for @cpu.
  2282. */
  2283. void cpufreq_update_limits(unsigned int cpu)
  2284. {
  2285. struct cpufreq_policy *policy;
  2286. policy = cpufreq_cpu_get(cpu);
  2287. if (!policy)
  2288. return;
  2289. if (cpufreq_driver->update_limits)
  2290. cpufreq_driver->update_limits(cpu);
  2291. else
  2292. cpufreq_update_policy(cpu);
  2293. cpufreq_cpu_put(policy);
  2294. }
  2295. EXPORT_SYMBOL_GPL(cpufreq_update_limits);
  2296. /*********************************************************************
  2297. * BOOST *
  2298. *********************************************************************/
  2299. static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
  2300. {
  2301. int ret;
  2302. if (!policy->freq_table)
  2303. return -ENXIO;
  2304. ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
  2305. if (ret) {
  2306. pr_err("%s: Policy frequency update failed\n", __func__);
  2307. return ret;
  2308. }
  2309. ret = freq_qos_update_request(policy->max_freq_req, policy->max);
  2310. if (ret < 0)
  2311. return ret;
  2312. return 0;
  2313. }
  2314. int cpufreq_boost_trigger_state(int state)
  2315. {
  2316. struct cpufreq_policy *policy;
  2317. unsigned long flags;
  2318. int ret = 0;
  2319. if (cpufreq_driver->boost_enabled == state)
  2320. return 0;
  2321. write_lock_irqsave(&cpufreq_driver_lock, flags);
  2322. cpufreq_driver->boost_enabled = state;
  2323. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  2324. cpus_read_lock();
  2325. for_each_active_policy(policy) {
  2326. policy->boost_enabled = state;
  2327. ret = cpufreq_driver->set_boost(policy, state);
  2328. if (ret) {
  2329. policy->boost_enabled = !policy->boost_enabled;
  2330. goto err_reset_state;
  2331. }
  2332. }
  2333. cpus_read_unlock();
  2334. return 0;
  2335. err_reset_state:
  2336. cpus_read_unlock();
  2337. write_lock_irqsave(&cpufreq_driver_lock, flags);
  2338. cpufreq_driver->boost_enabled = !state;
  2339. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  2340. pr_err("%s: Cannot %s BOOST\n",
  2341. __func__, state ? "enable" : "disable");
  2342. return ret;
  2343. }
  2344. static bool cpufreq_boost_supported(void)
  2345. {
  2346. return cpufreq_driver->set_boost;
  2347. }
  2348. static int create_boost_sysfs_file(void)
  2349. {
  2350. int ret;
  2351. ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
  2352. if (ret)
  2353. pr_err("%s: cannot register global BOOST sysfs file\n",
  2354. __func__);
  2355. return ret;
  2356. }
  2357. static void remove_boost_sysfs_file(void)
  2358. {
  2359. if (cpufreq_boost_supported())
  2360. sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
  2361. }
  2362. int cpufreq_enable_boost_support(void)
  2363. {
  2364. if (!cpufreq_driver)
  2365. return -EINVAL;
  2366. if (cpufreq_boost_supported())
  2367. return 0;
  2368. cpufreq_driver->set_boost = cpufreq_boost_set_sw;
  2369. /* This will get removed on driver unregister */
  2370. return create_boost_sysfs_file();
  2371. }
  2372. EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
  2373. bool cpufreq_boost_enabled(void)
  2374. {
  2375. return cpufreq_driver->boost_enabled;
  2376. }
  2377. EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
  2378. /*********************************************************************
  2379. * REGISTER / UNREGISTER CPUFREQ DRIVER *
  2380. *********************************************************************/
  2381. static enum cpuhp_state hp_online;
  2382. static int cpuhp_cpufreq_online(unsigned int cpu)
  2383. {
  2384. cpufreq_online(cpu);
  2385. return 0;
  2386. }
  2387. static int cpuhp_cpufreq_offline(unsigned int cpu)
  2388. {
  2389. cpufreq_offline(cpu);
  2390. return 0;
  2391. }
  2392. /**
  2393. * cpufreq_register_driver - register a CPU Frequency driver
  2394. * @driver_data: A struct cpufreq_driver containing the values#
  2395. * submitted by the CPU Frequency driver.
  2396. *
  2397. * Registers a CPU Frequency driver to this core code. This code
  2398. * returns zero on success, -EEXIST when another driver got here first
  2399. * (and isn't unregistered in the meantime).
  2400. *
  2401. */
  2402. int cpufreq_register_driver(struct cpufreq_driver *driver_data)
  2403. {
  2404. unsigned long flags;
  2405. int ret;
  2406. if (cpufreq_disabled())
  2407. return -ENODEV;
  2408. /*
  2409. * The cpufreq core depends heavily on the availability of device
  2410. * structure, make sure they are available before proceeding further.
  2411. */
  2412. if (!get_cpu_device(0))
  2413. return -EPROBE_DEFER;
  2414. if (!driver_data || !driver_data->verify || !driver_data->init ||
  2415. !(driver_data->setpolicy || driver_data->target_index ||
  2416. driver_data->target) ||
  2417. (driver_data->setpolicy && (driver_data->target_index ||
  2418. driver_data->target)) ||
  2419. (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
  2420. (!driver_data->online != !driver_data->offline) ||
  2421. (driver_data->adjust_perf && !driver_data->fast_switch))
  2422. return -EINVAL;
  2423. pr_debug("trying to register driver %s\n", driver_data->name);
  2424. /* Protect against concurrent CPU online/offline. */
  2425. cpus_read_lock();
  2426. write_lock_irqsave(&cpufreq_driver_lock, flags);
  2427. if (cpufreq_driver) {
  2428. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  2429. ret = -EEXIST;
  2430. goto out;
  2431. }
  2432. cpufreq_driver = driver_data;
  2433. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  2434. if (driver_data->setpolicy)
  2435. driver_data->flags |= CPUFREQ_CONST_LOOPS;
  2436. if (cpufreq_boost_supported()) {
  2437. ret = create_boost_sysfs_file();
  2438. if (ret)
  2439. goto err_null_driver;
  2440. }
  2441. /*
  2442. * Mark support for the scheduler's frequency invariance engine for
  2443. * drivers that implement target(), target_index() or fast_switch().
  2444. */
  2445. if (!cpufreq_driver->setpolicy) {
  2446. static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
  2447. pr_debug("cpufreq: supports frequency invariance\n");
  2448. }
  2449. ret = subsys_interface_register(&cpufreq_interface);
  2450. if (ret)
  2451. goto err_boost_unreg;
  2452. if (unlikely(list_empty(&cpufreq_policy_list))) {
  2453. /* if all ->init() calls failed, unregister */
  2454. ret = -ENODEV;
  2455. pr_debug("%s: No CPU initialized for driver %s\n", __func__,
  2456. driver_data->name);
  2457. goto err_if_unreg;
  2458. }
  2459. ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
  2460. "cpufreq:online",
  2461. cpuhp_cpufreq_online,
  2462. cpuhp_cpufreq_offline);
  2463. if (ret < 0)
  2464. goto err_if_unreg;
  2465. hp_online = ret;
  2466. ret = 0;
  2467. pr_debug("driver %s up and running\n", driver_data->name);
  2468. goto out;
  2469. err_if_unreg:
  2470. subsys_interface_unregister(&cpufreq_interface);
  2471. err_boost_unreg:
  2472. if (!cpufreq_driver->setpolicy)
  2473. static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
  2474. remove_boost_sysfs_file();
  2475. err_null_driver:
  2476. write_lock_irqsave(&cpufreq_driver_lock, flags);
  2477. cpufreq_driver = NULL;
  2478. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  2479. out:
  2480. cpus_read_unlock();
  2481. return ret;
  2482. }
  2483. EXPORT_SYMBOL_GPL(cpufreq_register_driver);
  2484. /*
  2485. * cpufreq_unregister_driver - unregister the current CPUFreq driver
  2486. *
  2487. * Unregister the current CPUFreq driver. Only call this if you have
  2488. * the right to do so, i.e. if you have succeeded in initialising before!
  2489. * Returns zero if successful, and -EINVAL if the cpufreq_driver is
  2490. * currently not initialised.
  2491. */
  2492. void cpufreq_unregister_driver(struct cpufreq_driver *driver)
  2493. {
  2494. unsigned long flags;
  2495. if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver)))
  2496. return;
  2497. pr_debug("unregistering driver %s\n", driver->name);
  2498. /* Protect against concurrent cpu hotplug */
  2499. cpus_read_lock();
  2500. subsys_interface_unregister(&cpufreq_interface);
  2501. remove_boost_sysfs_file();
  2502. static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
  2503. cpuhp_remove_state_nocalls_cpuslocked(hp_online);
  2504. write_lock_irqsave(&cpufreq_driver_lock, flags);
  2505. cpufreq_driver = NULL;
  2506. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  2507. cpus_read_unlock();
  2508. }
  2509. EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
  2510. static int __init cpufreq_core_init(void)
  2511. {
  2512. struct cpufreq_governor *gov = cpufreq_default_governor();
  2513. struct device *dev_root;
  2514. if (cpufreq_disabled())
  2515. return -ENODEV;
  2516. dev_root = bus_get_dev_root(&cpu_subsys);
  2517. if (dev_root) {
  2518. cpufreq_global_kobject = kobject_create_and_add("cpufreq", &dev_root->kobj);
  2519. put_device(dev_root);
  2520. }
  2521. BUG_ON(!cpufreq_global_kobject);
  2522. if (!strlen(default_governor))
  2523. strscpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
  2524. return 0;
  2525. }
  2526. module_param(off, int, 0444);
  2527. module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
  2528. core_initcall(cpufreq_core_init);