intel_rapl_common.c 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Common code for Intel Running Average Power Limit (RAPL) support.
  4. * Copyright (c) 2019, Intel Corporation.
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/bitmap.h>
  8. #include <linux/cleanup.h>
  9. #include <linux/cpu.h>
  10. #include <linux/delay.h>
  11. #include <linux/device.h>
  12. #include <linux/intel_rapl.h>
  13. #include <linux/kernel.h>
  14. #include <linux/list.h>
  15. #include <linux/log2.h>
  16. #include <linux/module.h>
  17. #include <linux/nospec.h>
  18. #include <linux/perf_event.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/powercap.h>
  21. #include <linux/processor.h>
  22. #include <linux/slab.h>
  23. #include <linux/suspend.h>
  24. #include <linux/sysfs.h>
  25. #include <linux/types.h>
  26. #include <asm/cpu_device_id.h>
  27. #include <asm/intel-family.h>
  28. #include <asm/iosf_mbi.h>
  29. /* bitmasks for RAPL MSRs, used by primitive access functions */
  30. #define ENERGY_STATUS_MASK 0xffffffff
  31. #define POWER_LIMIT1_MASK 0x7FFF
  32. #define POWER_LIMIT1_ENABLE BIT(15)
  33. #define POWER_LIMIT1_CLAMP BIT(16)
  34. #define POWER_LIMIT2_MASK (0x7FFFULL<<32)
  35. #define POWER_LIMIT2_ENABLE BIT_ULL(47)
  36. #define POWER_LIMIT2_CLAMP BIT_ULL(48)
  37. #define POWER_HIGH_LOCK BIT_ULL(63)
  38. #define POWER_LOW_LOCK BIT(31)
  39. #define POWER_LIMIT4_MASK 0x1FFF
  40. #define TIME_WINDOW1_MASK (0x7FULL<<17)
  41. #define TIME_WINDOW2_MASK (0x7FULL<<49)
  42. #define POWER_UNIT_OFFSET 0
  43. #define POWER_UNIT_MASK 0x0F
  44. #define ENERGY_UNIT_OFFSET 0x08
  45. #define ENERGY_UNIT_MASK 0x1F00
  46. #define TIME_UNIT_OFFSET 0x10
  47. #define TIME_UNIT_MASK 0xF0000
  48. #define POWER_INFO_MAX_MASK (0x7fffULL<<32)
  49. #define POWER_INFO_MIN_MASK (0x7fffULL<<16)
  50. #define POWER_INFO_MAX_TIME_WIN_MASK (0x3fULL<<48)
  51. #define POWER_INFO_THERMAL_SPEC_MASK 0x7fff
  52. #define PERF_STATUS_THROTTLE_TIME_MASK 0xffffffff
  53. #define PP_POLICY_MASK 0x1F
  54. /*
  55. * SPR has different layout for Psys Domain PowerLimit registers.
  56. * There are 17 bits of PL1 and PL2 instead of 15 bits.
  57. * The Enable bits and TimeWindow bits are also shifted as a result.
  58. */
  59. #define PSYS_POWER_LIMIT1_MASK 0x1FFFF
  60. #define PSYS_POWER_LIMIT1_ENABLE BIT(17)
  61. #define PSYS_POWER_LIMIT2_MASK (0x1FFFFULL<<32)
  62. #define PSYS_POWER_LIMIT2_ENABLE BIT_ULL(49)
  63. #define PSYS_TIME_WINDOW1_MASK (0x7FULL<<19)
  64. #define PSYS_TIME_WINDOW2_MASK (0x7FULL<<51)
  65. /* bitmasks for RAPL TPMI, used by primitive access functions */
  66. #define TPMI_POWER_LIMIT_MASK 0x3FFFF
  67. #define TPMI_POWER_LIMIT_ENABLE BIT_ULL(62)
  68. #define TPMI_TIME_WINDOW_MASK (0x7FULL<<18)
  69. #define TPMI_INFO_SPEC_MASK 0x3FFFF
  70. #define TPMI_INFO_MIN_MASK (0x3FFFFULL << 18)
  71. #define TPMI_INFO_MAX_MASK (0x3FFFFULL << 36)
  72. #define TPMI_INFO_MAX_TIME_WIN_MASK (0x7FULL << 54)
  73. /* Non HW constants */
  74. #define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */
  75. #define RAPL_PRIMITIVE_DUMMY BIT(2)
  76. #define TIME_WINDOW_MAX_MSEC 40000
  77. #define TIME_WINDOW_MIN_MSEC 250
  78. #define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */
  79. enum unit_type {
  80. ARBITRARY_UNIT, /* no translation */
  81. POWER_UNIT,
  82. ENERGY_UNIT,
  83. TIME_UNIT,
  84. };
  85. /* per domain data, some are optional */
  86. #define NR_RAW_PRIMITIVES (NR_RAPL_PRIMITIVES - 2)
  87. #define DOMAIN_STATE_INACTIVE BIT(0)
  88. #define DOMAIN_STATE_POWER_LIMIT_SET BIT(1)
  89. static const char *pl_names[NR_POWER_LIMITS] = {
  90. [POWER_LIMIT1] = "long_term",
  91. [POWER_LIMIT2] = "short_term",
  92. [POWER_LIMIT4] = "peak_power",
  93. };
  94. enum pl_prims {
  95. PL_ENABLE,
  96. PL_CLAMP,
  97. PL_LIMIT,
  98. PL_TIME_WINDOW,
  99. PL_MAX_POWER,
  100. PL_LOCK,
  101. };
  102. static bool is_pl_valid(struct rapl_domain *rd, int pl)
  103. {
  104. if (pl < POWER_LIMIT1 || pl > POWER_LIMIT4)
  105. return false;
  106. return rd->rpl[pl].name ? true : false;
  107. }
  108. static int get_pl_lock_prim(struct rapl_domain *rd, int pl)
  109. {
  110. if (rd->rp->priv->type == RAPL_IF_TPMI) {
  111. if (pl == POWER_LIMIT1)
  112. return PL1_LOCK;
  113. if (pl == POWER_LIMIT2)
  114. return PL2_LOCK;
  115. if (pl == POWER_LIMIT4)
  116. return PL4_LOCK;
  117. }
  118. /* MSR/MMIO Interface doesn't have Lock bit for PL4 */
  119. if (pl == POWER_LIMIT4)
  120. return -EINVAL;
  121. /*
  122. * Power Limit register that supports two power limits has a different
  123. * bit position for the Lock bit.
  124. */
  125. if (rd->rp->priv->limits[rd->id] & BIT(POWER_LIMIT2))
  126. return FW_HIGH_LOCK;
  127. return FW_LOCK;
  128. }
  129. static int get_pl_prim(struct rapl_domain *rd, int pl, enum pl_prims prim)
  130. {
  131. switch (pl) {
  132. case POWER_LIMIT1:
  133. if (prim == PL_ENABLE)
  134. return PL1_ENABLE;
  135. if (prim == PL_CLAMP && rd->rp->priv->type != RAPL_IF_TPMI)
  136. return PL1_CLAMP;
  137. if (prim == PL_LIMIT)
  138. return POWER_LIMIT1;
  139. if (prim == PL_TIME_WINDOW)
  140. return TIME_WINDOW1;
  141. if (prim == PL_MAX_POWER)
  142. return THERMAL_SPEC_POWER;
  143. if (prim == PL_LOCK)
  144. return get_pl_lock_prim(rd, pl);
  145. return -EINVAL;
  146. case POWER_LIMIT2:
  147. if (prim == PL_ENABLE)
  148. return PL2_ENABLE;
  149. if (prim == PL_CLAMP && rd->rp->priv->type != RAPL_IF_TPMI)
  150. return PL2_CLAMP;
  151. if (prim == PL_LIMIT)
  152. return POWER_LIMIT2;
  153. if (prim == PL_TIME_WINDOW)
  154. return TIME_WINDOW2;
  155. if (prim == PL_MAX_POWER)
  156. return MAX_POWER;
  157. if (prim == PL_LOCK)
  158. return get_pl_lock_prim(rd, pl);
  159. return -EINVAL;
  160. case POWER_LIMIT4:
  161. if (prim == PL_LIMIT)
  162. return POWER_LIMIT4;
  163. if (prim == PL_ENABLE)
  164. return PL4_ENABLE;
  165. /* PL4 would be around two times PL2, use same prim as PL2. */
  166. if (prim == PL_MAX_POWER)
  167. return MAX_POWER;
  168. if (prim == PL_LOCK)
  169. return get_pl_lock_prim(rd, pl);
  170. return -EINVAL;
  171. default:
  172. return -EINVAL;
  173. }
  174. }
  175. #define power_zone_to_rapl_domain(_zone) \
  176. container_of(_zone, struct rapl_domain, power_zone)
  177. struct rapl_defaults {
  178. u8 floor_freq_reg_addr;
  179. int (*check_unit)(struct rapl_domain *rd);
  180. void (*set_floor_freq)(struct rapl_domain *rd, bool mode);
  181. u64 (*compute_time_window)(struct rapl_domain *rd, u64 val,
  182. bool to_raw);
  183. unsigned int dram_domain_energy_unit;
  184. unsigned int psys_domain_energy_unit;
  185. bool spr_psys_bits;
  186. };
  187. static struct rapl_defaults *defaults_msr;
  188. static const struct rapl_defaults defaults_tpmi;
  189. static struct rapl_defaults *get_defaults(struct rapl_package *rp)
  190. {
  191. return rp->priv->defaults;
  192. }
  193. /* Sideband MBI registers */
  194. #define IOSF_CPU_POWER_BUDGET_CTL_BYT (0x2)
  195. #define IOSF_CPU_POWER_BUDGET_CTL_TNG (0xdf)
  196. #define PACKAGE_PLN_INT_SAVED BIT(0)
  197. #define MAX_PRIM_NAME (32)
  198. /* per domain data. used to describe individual knobs such that access function
  199. * can be consolidated into one instead of many inline functions.
  200. */
  201. struct rapl_primitive_info {
  202. const char *name;
  203. u64 mask;
  204. int shift;
  205. enum rapl_domain_reg_id id;
  206. enum unit_type unit;
  207. u32 flag;
  208. };
  209. #define PRIMITIVE_INFO_INIT(p, m, s, i, u, f) { \
  210. .name = #p, \
  211. .mask = m, \
  212. .shift = s, \
  213. .id = i, \
  214. .unit = u, \
  215. .flag = f \
  216. }
  217. static void rapl_init_domains(struct rapl_package *rp);
  218. static int rapl_read_data_raw(struct rapl_domain *rd,
  219. enum rapl_primitives prim,
  220. bool xlate, u64 *data);
  221. static int rapl_write_data_raw(struct rapl_domain *rd,
  222. enum rapl_primitives prim,
  223. unsigned long long value);
  224. static int rapl_read_pl_data(struct rapl_domain *rd, int pl,
  225. enum pl_prims pl_prim,
  226. bool xlate, u64 *data);
  227. static int rapl_write_pl_data(struct rapl_domain *rd, int pl,
  228. enum pl_prims pl_prim,
  229. unsigned long long value);
  230. static u64 rapl_unit_xlate(struct rapl_domain *rd,
  231. enum unit_type type, u64 value, int to_raw);
  232. static void package_power_limit_irq_save(struct rapl_package *rp);
  233. static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */
  234. static const char *const rapl_domain_names[] = {
  235. "package",
  236. "core",
  237. "uncore",
  238. "dram",
  239. "psys",
  240. };
  241. static int get_energy_counter(struct powercap_zone *power_zone,
  242. u64 *energy_raw)
  243. {
  244. struct rapl_domain *rd;
  245. u64 energy_now;
  246. /* prevent CPU hotplug, make sure the RAPL domain does not go
  247. * away while reading the counter.
  248. */
  249. cpus_read_lock();
  250. rd = power_zone_to_rapl_domain(power_zone);
  251. if (!rapl_read_data_raw(rd, ENERGY_COUNTER, true, &energy_now)) {
  252. *energy_raw = energy_now;
  253. cpus_read_unlock();
  254. return 0;
  255. }
  256. cpus_read_unlock();
  257. return -EIO;
  258. }
  259. static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
  260. {
  261. struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev);
  262. *energy = rapl_unit_xlate(rd, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
  263. return 0;
  264. }
  265. static int release_zone(struct powercap_zone *power_zone)
  266. {
  267. struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
  268. struct rapl_package *rp = rd->rp;
  269. /* package zone is the last zone of a package, we can free
  270. * memory here since all children has been unregistered.
  271. */
  272. if (rd->id == RAPL_DOMAIN_PACKAGE) {
  273. kfree(rd);
  274. rp->domains = NULL;
  275. }
  276. return 0;
  277. }
  278. static int find_nr_power_limit(struct rapl_domain *rd)
  279. {
  280. int i, nr_pl = 0;
  281. for (i = 0; i < NR_POWER_LIMITS; i++) {
  282. if (is_pl_valid(rd, i))
  283. nr_pl++;
  284. }
  285. return nr_pl;
  286. }
  287. static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
  288. {
  289. struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
  290. struct rapl_defaults *defaults = get_defaults(rd->rp);
  291. u64 val;
  292. int ret;
  293. cpus_read_lock();
  294. ret = rapl_write_pl_data(rd, POWER_LIMIT1, PL_ENABLE, mode);
  295. if (ret)
  296. goto end;
  297. ret = rapl_read_pl_data(rd, POWER_LIMIT1, PL_ENABLE, false, &val);
  298. if (ret)
  299. goto end;
  300. if (mode != val) {
  301. pr_debug("%s cannot be %s\n", power_zone->name,
  302. str_enabled_disabled(mode));
  303. goto end;
  304. }
  305. if (defaults->set_floor_freq)
  306. defaults->set_floor_freq(rd, mode);
  307. end:
  308. cpus_read_unlock();
  309. return ret;
  310. }
  311. static int get_domain_enable(struct powercap_zone *power_zone, bool *mode)
  312. {
  313. struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
  314. u64 val;
  315. int ret;
  316. if (rd->rpl[POWER_LIMIT1].locked) {
  317. *mode = false;
  318. return 0;
  319. }
  320. cpus_read_lock();
  321. ret = rapl_read_pl_data(rd, POWER_LIMIT1, PL_ENABLE, true, &val);
  322. if (!ret)
  323. *mode = val;
  324. cpus_read_unlock();
  325. return ret;
  326. }
  327. /* per RAPL domain ops, in the order of rapl_domain_type */
  328. static const struct powercap_zone_ops zone_ops[] = {
  329. /* RAPL_DOMAIN_PACKAGE */
  330. {
  331. .get_energy_uj = get_energy_counter,
  332. .get_max_energy_range_uj = get_max_energy_counter,
  333. .release = release_zone,
  334. .set_enable = set_domain_enable,
  335. .get_enable = get_domain_enable,
  336. },
  337. /* RAPL_DOMAIN_PP0 */
  338. {
  339. .get_energy_uj = get_energy_counter,
  340. .get_max_energy_range_uj = get_max_energy_counter,
  341. .release = release_zone,
  342. .set_enable = set_domain_enable,
  343. .get_enable = get_domain_enable,
  344. },
  345. /* RAPL_DOMAIN_PP1 */
  346. {
  347. .get_energy_uj = get_energy_counter,
  348. .get_max_energy_range_uj = get_max_energy_counter,
  349. .release = release_zone,
  350. .set_enable = set_domain_enable,
  351. .get_enable = get_domain_enable,
  352. },
  353. /* RAPL_DOMAIN_DRAM */
  354. {
  355. .get_energy_uj = get_energy_counter,
  356. .get_max_energy_range_uj = get_max_energy_counter,
  357. .release = release_zone,
  358. .set_enable = set_domain_enable,
  359. .get_enable = get_domain_enable,
  360. },
  361. /* RAPL_DOMAIN_PLATFORM */
  362. {
  363. .get_energy_uj = get_energy_counter,
  364. .get_max_energy_range_uj = get_max_energy_counter,
  365. .release = release_zone,
  366. .set_enable = set_domain_enable,
  367. .get_enable = get_domain_enable,
  368. },
  369. };
  370. /*
  371. * Constraint index used by powercap can be different than power limit (PL)
  372. * index in that some PLs maybe missing due to non-existent MSRs. So we
  373. * need to convert here by finding the valid PLs only (name populated).
  374. */
  375. static int contraint_to_pl(struct rapl_domain *rd, int cid)
  376. {
  377. int i, j;
  378. for (i = POWER_LIMIT1, j = 0; i < NR_POWER_LIMITS; i++) {
  379. if (is_pl_valid(rd, i) && j++ == cid) {
  380. pr_debug("%s: index %d\n", __func__, i);
  381. return i;
  382. }
  383. }
  384. pr_err("Cannot find matching power limit for constraint %d\n", cid);
  385. return -EINVAL;
  386. }
  387. static int set_power_limit(struct powercap_zone *power_zone, int cid,
  388. u64 power_limit)
  389. {
  390. struct rapl_domain *rd;
  391. struct rapl_package *rp;
  392. int ret = 0;
  393. int id;
  394. cpus_read_lock();
  395. rd = power_zone_to_rapl_domain(power_zone);
  396. id = contraint_to_pl(rd, cid);
  397. rp = rd->rp;
  398. ret = rapl_write_pl_data(rd, id, PL_LIMIT, power_limit);
  399. if (!ret)
  400. package_power_limit_irq_save(rp);
  401. cpus_read_unlock();
  402. return ret;
  403. }
  404. static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
  405. u64 *data)
  406. {
  407. struct rapl_domain *rd;
  408. u64 val;
  409. int ret = 0;
  410. int id;
  411. cpus_read_lock();
  412. rd = power_zone_to_rapl_domain(power_zone);
  413. id = contraint_to_pl(rd, cid);
  414. ret = rapl_read_pl_data(rd, id, PL_LIMIT, true, &val);
  415. if (!ret)
  416. *data = val;
  417. cpus_read_unlock();
  418. return ret;
  419. }
  420. static int set_time_window(struct powercap_zone *power_zone, int cid,
  421. u64 window)
  422. {
  423. struct rapl_domain *rd;
  424. int ret = 0;
  425. int id;
  426. cpus_read_lock();
  427. rd = power_zone_to_rapl_domain(power_zone);
  428. id = contraint_to_pl(rd, cid);
  429. ret = rapl_write_pl_data(rd, id, PL_TIME_WINDOW, window);
  430. cpus_read_unlock();
  431. return ret;
  432. }
  433. static int get_time_window(struct powercap_zone *power_zone, int cid,
  434. u64 *data)
  435. {
  436. struct rapl_domain *rd;
  437. u64 val;
  438. int ret = 0;
  439. int id;
  440. cpus_read_lock();
  441. rd = power_zone_to_rapl_domain(power_zone);
  442. id = contraint_to_pl(rd, cid);
  443. ret = rapl_read_pl_data(rd, id, PL_TIME_WINDOW, true, &val);
  444. if (!ret)
  445. *data = val;
  446. cpus_read_unlock();
  447. return ret;
  448. }
  449. static const char *get_constraint_name(struct powercap_zone *power_zone,
  450. int cid)
  451. {
  452. struct rapl_domain *rd;
  453. int id;
  454. rd = power_zone_to_rapl_domain(power_zone);
  455. id = contraint_to_pl(rd, cid);
  456. if (id >= 0)
  457. return rd->rpl[id].name;
  458. return NULL;
  459. }
  460. static int get_max_power(struct powercap_zone *power_zone, int cid, u64 *data)
  461. {
  462. struct rapl_domain *rd;
  463. u64 val;
  464. int ret = 0;
  465. int id;
  466. cpus_read_lock();
  467. rd = power_zone_to_rapl_domain(power_zone);
  468. id = contraint_to_pl(rd, cid);
  469. ret = rapl_read_pl_data(rd, id, PL_MAX_POWER, true, &val);
  470. if (!ret)
  471. *data = val;
  472. /* As a generalization rule, PL4 would be around two times PL2. */
  473. if (id == POWER_LIMIT4)
  474. *data = *data * 2;
  475. cpus_read_unlock();
  476. return ret;
  477. }
  478. static const struct powercap_zone_constraint_ops constraint_ops = {
  479. .set_power_limit_uw = set_power_limit,
  480. .get_power_limit_uw = get_current_power_limit,
  481. .set_time_window_us = set_time_window,
  482. .get_time_window_us = get_time_window,
  483. .get_max_power_uw = get_max_power,
  484. .get_name = get_constraint_name,
  485. };
  486. /* Return the id used for read_raw/write_raw callback */
  487. static int get_rid(struct rapl_package *rp)
  488. {
  489. return rp->lead_cpu >= 0 ? rp->lead_cpu : rp->id;
  490. }
  491. /* called after domain detection and package level data are set */
  492. static void rapl_init_domains(struct rapl_package *rp)
  493. {
  494. enum rapl_domain_type i;
  495. enum rapl_domain_reg_id j;
  496. struct rapl_domain *rd = rp->domains;
  497. for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
  498. unsigned int mask = rp->domain_map & (1 << i);
  499. int t;
  500. if (!mask)
  501. continue;
  502. rd->rp = rp;
  503. if (i == RAPL_DOMAIN_PLATFORM && rp->id > 0) {
  504. snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "psys-%d",
  505. rp->lead_cpu >= 0 ? topology_physical_package_id(rp->lead_cpu) :
  506. rp->id);
  507. } else {
  508. snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "%s",
  509. rapl_domain_names[i]);
  510. }
  511. rd->id = i;
  512. /* PL1 is supported by default */
  513. rp->priv->limits[i] |= BIT(POWER_LIMIT1);
  514. for (t = POWER_LIMIT1; t < NR_POWER_LIMITS; t++) {
  515. if (rp->priv->limits[i] & BIT(t))
  516. rd->rpl[t].name = pl_names[t];
  517. }
  518. for (j = 0; j < RAPL_DOMAIN_REG_MAX; j++)
  519. rd->regs[j] = rp->priv->regs[i][j];
  520. rd++;
  521. }
  522. }
  523. static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type,
  524. u64 value, int to_raw)
  525. {
  526. u64 units = 1;
  527. struct rapl_defaults *defaults = get_defaults(rd->rp);
  528. u64 scale = 1;
  529. switch (type) {
  530. case POWER_UNIT:
  531. units = rd->power_unit;
  532. break;
  533. case ENERGY_UNIT:
  534. scale = ENERGY_UNIT_SCALE;
  535. units = rd->energy_unit;
  536. break;
  537. case TIME_UNIT:
  538. return defaults->compute_time_window(rd, value, to_raw);
  539. case ARBITRARY_UNIT:
  540. default:
  541. return value;
  542. }
  543. if (to_raw)
  544. return div64_u64(value, units) * scale;
  545. value *= units;
  546. return div64_u64(value, scale);
  547. }
  548. /* RAPL primitives for MSR and MMIO I/F */
  549. static struct rapl_primitive_info rpi_msr[NR_RAPL_PRIMITIVES] = {
  550. /* name, mask, shift, msr index, unit divisor */
  551. [POWER_LIMIT1] = PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0,
  552. RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
  553. [POWER_LIMIT2] = PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32,
  554. RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
  555. [POWER_LIMIT4] = PRIMITIVE_INFO_INIT(POWER_LIMIT4, POWER_LIMIT4_MASK, 0,
  556. RAPL_DOMAIN_REG_PL4, POWER_UNIT, 0),
  557. [ENERGY_COUNTER] = PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0,
  558. RAPL_DOMAIN_REG_STATUS, ENERGY_UNIT, 0),
  559. [FW_LOCK] = PRIMITIVE_INFO_INIT(FW_LOCK, POWER_LOW_LOCK, 31,
  560. RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
  561. [FW_HIGH_LOCK] = PRIMITIVE_INFO_INIT(FW_LOCK, POWER_HIGH_LOCK, 63,
  562. RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
  563. [PL1_ENABLE] = PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15,
  564. RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
  565. [PL1_CLAMP] = PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16,
  566. RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
  567. [PL2_ENABLE] = PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47,
  568. RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
  569. [PL2_CLAMP] = PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
  570. RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
  571. [TIME_WINDOW1] = PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
  572. RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
  573. [TIME_WINDOW2] = PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
  574. RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
  575. [THERMAL_SPEC_POWER] = PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK,
  576. 0, RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
  577. [MAX_POWER] = PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32,
  578. RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
  579. [MIN_POWER] = PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16,
  580. RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
  581. [MAX_TIME_WINDOW] = PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48,
  582. RAPL_DOMAIN_REG_INFO, TIME_UNIT, 0),
  583. [THROTTLED_TIME] = PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0,
  584. RAPL_DOMAIN_REG_PERF, TIME_UNIT, 0),
  585. [PRIORITY_LEVEL] = PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0,
  586. RAPL_DOMAIN_REG_POLICY, ARBITRARY_UNIT, 0),
  587. [PSYS_POWER_LIMIT1] = PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT1, PSYS_POWER_LIMIT1_MASK, 0,
  588. RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
  589. [PSYS_POWER_LIMIT2] = PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT2, PSYS_POWER_LIMIT2_MASK, 32,
  590. RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
  591. [PSYS_PL1_ENABLE] = PRIMITIVE_INFO_INIT(PSYS_PL1_ENABLE, PSYS_POWER_LIMIT1_ENABLE, 17,
  592. RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
  593. [PSYS_PL2_ENABLE] = PRIMITIVE_INFO_INIT(PSYS_PL2_ENABLE, PSYS_POWER_LIMIT2_ENABLE, 49,
  594. RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
  595. [PSYS_TIME_WINDOW1] = PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW1, PSYS_TIME_WINDOW1_MASK, 19,
  596. RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
  597. [PSYS_TIME_WINDOW2] = PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW2, PSYS_TIME_WINDOW2_MASK, 51,
  598. RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
  599. /* non-hardware */
  600. [AVERAGE_POWER] = PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT,
  601. RAPL_PRIMITIVE_DERIVED),
  602. };
  603. /* RAPL primitives for TPMI I/F */
  604. static struct rapl_primitive_info rpi_tpmi[NR_RAPL_PRIMITIVES] = {
  605. /* name, mask, shift, msr index, unit divisor */
  606. [POWER_LIMIT1] = PRIMITIVE_INFO_INIT(POWER_LIMIT1, TPMI_POWER_LIMIT_MASK, 0,
  607. RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
  608. [POWER_LIMIT2] = PRIMITIVE_INFO_INIT(POWER_LIMIT2, TPMI_POWER_LIMIT_MASK, 0,
  609. RAPL_DOMAIN_REG_PL2, POWER_UNIT, 0),
  610. [POWER_LIMIT4] = PRIMITIVE_INFO_INIT(POWER_LIMIT4, TPMI_POWER_LIMIT_MASK, 0,
  611. RAPL_DOMAIN_REG_PL4, POWER_UNIT, 0),
  612. [ENERGY_COUNTER] = PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0,
  613. RAPL_DOMAIN_REG_STATUS, ENERGY_UNIT, 0),
  614. [PL1_LOCK] = PRIMITIVE_INFO_INIT(PL1_LOCK, POWER_HIGH_LOCK, 63,
  615. RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
  616. [PL2_LOCK] = PRIMITIVE_INFO_INIT(PL2_LOCK, POWER_HIGH_LOCK, 63,
  617. RAPL_DOMAIN_REG_PL2, ARBITRARY_UNIT, 0),
  618. [PL4_LOCK] = PRIMITIVE_INFO_INIT(PL4_LOCK, POWER_HIGH_LOCK, 63,
  619. RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
  620. [PL1_ENABLE] = PRIMITIVE_INFO_INIT(PL1_ENABLE, TPMI_POWER_LIMIT_ENABLE, 62,
  621. RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
  622. [PL2_ENABLE] = PRIMITIVE_INFO_INIT(PL2_ENABLE, TPMI_POWER_LIMIT_ENABLE, 62,
  623. RAPL_DOMAIN_REG_PL2, ARBITRARY_UNIT, 0),
  624. [PL4_ENABLE] = PRIMITIVE_INFO_INIT(PL4_ENABLE, TPMI_POWER_LIMIT_ENABLE, 62,
  625. RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
  626. [TIME_WINDOW1] = PRIMITIVE_INFO_INIT(TIME_WINDOW1, TPMI_TIME_WINDOW_MASK, 18,
  627. RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
  628. [TIME_WINDOW2] = PRIMITIVE_INFO_INIT(TIME_WINDOW2, TPMI_TIME_WINDOW_MASK, 18,
  629. RAPL_DOMAIN_REG_PL2, TIME_UNIT, 0),
  630. [THERMAL_SPEC_POWER] = PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, TPMI_INFO_SPEC_MASK, 0,
  631. RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
  632. [MAX_POWER] = PRIMITIVE_INFO_INIT(MAX_POWER, TPMI_INFO_MAX_MASK, 36,
  633. RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
  634. [MIN_POWER] = PRIMITIVE_INFO_INIT(MIN_POWER, TPMI_INFO_MIN_MASK, 18,
  635. RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
  636. [MAX_TIME_WINDOW] = PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, TPMI_INFO_MAX_TIME_WIN_MASK, 54,
  637. RAPL_DOMAIN_REG_INFO, TIME_UNIT, 0),
  638. [THROTTLED_TIME] = PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0,
  639. RAPL_DOMAIN_REG_PERF, TIME_UNIT, 0),
  640. /* non-hardware */
  641. [AVERAGE_POWER] = PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0,
  642. POWER_UNIT, RAPL_PRIMITIVE_DERIVED),
  643. };
  644. static struct rapl_primitive_info *get_rpi(struct rapl_package *rp, int prim)
  645. {
  646. struct rapl_primitive_info *rpi = rp->priv->rpi;
  647. if (prim < 0 || prim >= NR_RAPL_PRIMITIVES || !rpi)
  648. return NULL;
  649. return &rpi[prim];
  650. }
  651. static int rapl_config(struct rapl_package *rp)
  652. {
  653. switch (rp->priv->type) {
  654. /* MMIO I/F shares the same register layout as MSR registers */
  655. case RAPL_IF_MMIO:
  656. case RAPL_IF_MSR:
  657. rp->priv->defaults = (void *)defaults_msr;
  658. rp->priv->rpi = (void *)rpi_msr;
  659. break;
  660. case RAPL_IF_TPMI:
  661. rp->priv->defaults = (void *)&defaults_tpmi;
  662. rp->priv->rpi = (void *)rpi_tpmi;
  663. break;
  664. default:
  665. return -EINVAL;
  666. }
  667. /* defaults_msr can be NULL on unsupported platforms */
  668. if (!rp->priv->defaults || !rp->priv->rpi)
  669. return -ENODEV;
  670. return 0;
  671. }
  672. static enum rapl_primitives
  673. prim_fixups(struct rapl_domain *rd, enum rapl_primitives prim)
  674. {
  675. struct rapl_defaults *defaults = get_defaults(rd->rp);
  676. if (!defaults->spr_psys_bits)
  677. return prim;
  678. if (rd->id != RAPL_DOMAIN_PLATFORM)
  679. return prim;
  680. switch (prim) {
  681. case POWER_LIMIT1:
  682. return PSYS_POWER_LIMIT1;
  683. case POWER_LIMIT2:
  684. return PSYS_POWER_LIMIT2;
  685. case PL1_ENABLE:
  686. return PSYS_PL1_ENABLE;
  687. case PL2_ENABLE:
  688. return PSYS_PL2_ENABLE;
  689. case TIME_WINDOW1:
  690. return PSYS_TIME_WINDOW1;
  691. case TIME_WINDOW2:
  692. return PSYS_TIME_WINDOW2;
  693. default:
  694. return prim;
  695. }
  696. }
  697. /* Read primitive data based on its related struct rapl_primitive_info.
  698. * if xlate flag is set, return translated data based on data units, i.e.
  699. * time, energy, and power.
  700. * RAPL MSRs are non-architectual and are laid out not consistently across
  701. * domains. Here we use primitive info to allow writing consolidated access
  702. * functions.
  703. * For a given primitive, it is processed by MSR mask and shift. Unit conversion
  704. * is pre-assigned based on RAPL unit MSRs read at init time.
  705. * 63-------------------------- 31--------------------------- 0
  706. * | xxxxx (mask) |
  707. * | |<- shift ----------------|
  708. * 63-------------------------- 31--------------------------- 0
  709. */
  710. static int rapl_read_data_raw(struct rapl_domain *rd,
  711. enum rapl_primitives prim, bool xlate, u64 *data)
  712. {
  713. u64 value;
  714. enum rapl_primitives prim_fixed = prim_fixups(rd, prim);
  715. struct rapl_primitive_info *rpi = get_rpi(rd->rp, prim_fixed);
  716. struct reg_action ra;
  717. if (!rpi || !rpi->name || rpi->flag & RAPL_PRIMITIVE_DUMMY)
  718. return -EINVAL;
  719. ra.reg = rd->regs[rpi->id];
  720. if (!ra.reg.val)
  721. return -EINVAL;
  722. /* non-hardware data are collected by the polling thread */
  723. if (rpi->flag & RAPL_PRIMITIVE_DERIVED) {
  724. *data = rd->rdd.primitives[prim];
  725. return 0;
  726. }
  727. ra.mask = rpi->mask;
  728. if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
  729. pr_debug("failed to read reg 0x%llx for %s:%s\n", ra.reg.val, rd->rp->name, rd->name);
  730. return -EIO;
  731. }
  732. value = ra.value >> rpi->shift;
  733. if (xlate)
  734. *data = rapl_unit_xlate(rd, rpi->unit, value, 0);
  735. else
  736. *data = value;
  737. return 0;
  738. }
  739. /* Similar use of primitive info in the read counterpart */
  740. static int rapl_write_data_raw(struct rapl_domain *rd,
  741. enum rapl_primitives prim,
  742. unsigned long long value)
  743. {
  744. enum rapl_primitives prim_fixed = prim_fixups(rd, prim);
  745. struct rapl_primitive_info *rpi = get_rpi(rd->rp, prim_fixed);
  746. u64 bits;
  747. struct reg_action ra;
  748. int ret;
  749. if (!rpi || !rpi->name || rpi->flag & RAPL_PRIMITIVE_DUMMY)
  750. return -EINVAL;
  751. bits = rapl_unit_xlate(rd, rpi->unit, value, 1);
  752. bits <<= rpi->shift;
  753. bits &= rpi->mask;
  754. memset(&ra, 0, sizeof(ra));
  755. ra.reg = rd->regs[rpi->id];
  756. ra.mask = rpi->mask;
  757. ra.value = bits;
  758. ret = rd->rp->priv->write_raw(get_rid(rd->rp), &ra);
  759. return ret;
  760. }
  761. static int rapl_read_pl_data(struct rapl_domain *rd, int pl,
  762. enum pl_prims pl_prim, bool xlate, u64 *data)
  763. {
  764. enum rapl_primitives prim = get_pl_prim(rd, pl, pl_prim);
  765. if (!is_pl_valid(rd, pl))
  766. return -EINVAL;
  767. return rapl_read_data_raw(rd, prim, xlate, data);
  768. }
  769. static int rapl_write_pl_data(struct rapl_domain *rd, int pl,
  770. enum pl_prims pl_prim,
  771. unsigned long long value)
  772. {
  773. enum rapl_primitives prim = get_pl_prim(rd, pl, pl_prim);
  774. if (!is_pl_valid(rd, pl))
  775. return -EINVAL;
  776. if (rd->rpl[pl].locked) {
  777. pr_debug("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]);
  778. return -EACCES;
  779. }
  780. return rapl_write_data_raw(rd, prim, value);
  781. }
  782. /*
  783. * Raw RAPL data stored in MSRs are in certain scales. We need to
  784. * convert them into standard units based on the units reported in
  785. * the RAPL unit MSRs. This is specific to CPUs as the method to
  786. * calculate units differ on different CPUs.
  787. * We convert the units to below format based on CPUs.
  788. * i.e.
  789. * energy unit: picoJoules : Represented in picoJoules by default
  790. * power unit : microWatts : Represented in milliWatts by default
  791. * time unit : microseconds: Represented in seconds by default
  792. */
  793. static int rapl_check_unit_core(struct rapl_domain *rd)
  794. {
  795. struct reg_action ra;
  796. u32 value;
  797. ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT];
  798. ra.mask = ~0;
  799. if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
  800. pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n",
  801. ra.reg.val, rd->rp->name, rd->name);
  802. return -ENODEV;
  803. }
  804. value = (ra.value & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
  805. rd->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
  806. value = (ra.value & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
  807. rd->power_unit = 1000000 / (1 << value);
  808. value = (ra.value & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
  809. rd->time_unit = 1000000 / (1 << value);
  810. pr_debug("Core CPU %s:%s energy=%dpJ, time=%dus, power=%duW\n",
  811. rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit);
  812. return 0;
  813. }
  814. static int rapl_check_unit_atom(struct rapl_domain *rd)
  815. {
  816. struct reg_action ra;
  817. u32 value;
  818. ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT];
  819. ra.mask = ~0;
  820. if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
  821. pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n",
  822. ra.reg.val, rd->rp->name, rd->name);
  823. return -ENODEV;
  824. }
  825. value = (ra.value & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
  826. rd->energy_unit = ENERGY_UNIT_SCALE * 1 << value;
  827. value = (ra.value & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
  828. rd->power_unit = (1 << value) * 1000;
  829. value = (ra.value & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
  830. rd->time_unit = 1000000 / (1 << value);
  831. pr_debug("Atom %s:%s energy=%dpJ, time=%dus, power=%duW\n",
  832. rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit);
  833. return 0;
  834. }
  835. static void power_limit_irq_save_cpu(void *info)
  836. {
  837. u32 l, h = 0;
  838. struct rapl_package *rp = (struct rapl_package *)info;
  839. /* save the state of PLN irq mask bit before disabling it */
  840. rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
  841. if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
  842. rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
  843. rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
  844. }
  845. l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
  846. wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
  847. }
  848. /* REVISIT:
  849. * When package power limit is set artificially low by RAPL, LVT
  850. * thermal interrupt for package power limit should be ignored
  851. * since we are not really exceeding the real limit. The intention
  852. * is to avoid excessive interrupts while we are trying to save power.
  853. * A useful feature might be routing the package_power_limit interrupt
  854. * to userspace via eventfd. once we have a usecase, this is simple
  855. * to do by adding an atomic notifier.
  856. */
  857. static void package_power_limit_irq_save(struct rapl_package *rp)
  858. {
  859. if (rp->lead_cpu < 0)
  860. return;
  861. if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
  862. return;
  863. smp_call_function_single(rp->lead_cpu, power_limit_irq_save_cpu, rp, 1);
  864. }
  865. /*
  866. * Restore per package power limit interrupt enable state. Called from cpu
  867. * hotplug code on package removal.
  868. */
  869. static void package_power_limit_irq_restore(struct rapl_package *rp)
  870. {
  871. u32 l, h;
  872. if (rp->lead_cpu < 0)
  873. return;
  874. if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
  875. return;
  876. /* irq enable state not saved, nothing to restore */
  877. if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
  878. return;
  879. rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
  880. if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
  881. l |= PACKAGE_THERM_INT_PLN_ENABLE;
  882. else
  883. l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
  884. wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
  885. }
  886. static void set_floor_freq_default(struct rapl_domain *rd, bool mode)
  887. {
  888. int i;
  889. /* always enable clamp such that p-state can go below OS requested
  890. * range. power capping priority over guranteed frequency.
  891. */
  892. rapl_write_pl_data(rd, POWER_LIMIT1, PL_CLAMP, mode);
  893. for (i = POWER_LIMIT2; i < NR_POWER_LIMITS; i++) {
  894. rapl_write_pl_data(rd, i, PL_ENABLE, mode);
  895. rapl_write_pl_data(rd, i, PL_CLAMP, mode);
  896. }
  897. }
  898. static void set_floor_freq_atom(struct rapl_domain *rd, bool enable)
  899. {
  900. static u32 power_ctrl_orig_val;
  901. struct rapl_defaults *defaults = get_defaults(rd->rp);
  902. u32 mdata;
  903. if (!defaults->floor_freq_reg_addr) {
  904. pr_err("Invalid floor frequency config register\n");
  905. return;
  906. }
  907. if (!power_ctrl_orig_val)
  908. iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_CR_READ,
  909. defaults->floor_freq_reg_addr,
  910. &power_ctrl_orig_val);
  911. mdata = power_ctrl_orig_val;
  912. if (enable) {
  913. mdata &= ~(0x7f << 8);
  914. mdata |= 1 << 8;
  915. }
  916. iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_CR_WRITE,
  917. defaults->floor_freq_reg_addr, mdata);
  918. }
  919. static u64 rapl_compute_time_window_core(struct rapl_domain *rd, u64 value,
  920. bool to_raw)
  921. {
  922. u64 f, y; /* fraction and exp. used for time unit */
  923. /*
  924. * Special processing based on 2^Y*(1+F/4), refer
  925. * to Intel Software Developer's manual Vol.3B: CH 14.9.3.
  926. */
  927. if (!to_raw) {
  928. f = (value & 0x60) >> 5;
  929. y = value & 0x1f;
  930. value = (1 << y) * (4 + f) * rd->time_unit / 4;
  931. } else {
  932. if (value < rd->time_unit)
  933. return 0;
  934. do_div(value, rd->time_unit);
  935. y = ilog2(value);
  936. /*
  937. * The target hardware field is 7 bits wide, so return all ones
  938. * if the exponent is too large.
  939. */
  940. if (y > 0x1f)
  941. return 0x7f;
  942. f = div64_u64(4 * (value - (1ULL << y)), 1ULL << y);
  943. value = (y & 0x1f) | ((f & 0x3) << 5);
  944. }
  945. return value;
  946. }
  947. static u64 rapl_compute_time_window_atom(struct rapl_domain *rd, u64 value,
  948. bool to_raw)
  949. {
  950. /*
  951. * Atom time unit encoding is straight forward val * time_unit,
  952. * where time_unit is default to 1 sec. Never 0.
  953. */
  954. if (!to_raw)
  955. return (value) ? value * rd->time_unit : rd->time_unit;
  956. value = div64_u64(value, rd->time_unit);
  957. return value;
  958. }
  959. /* TPMI Unit register has different layout */
  960. #define TPMI_POWER_UNIT_OFFSET POWER_UNIT_OFFSET
  961. #define TPMI_POWER_UNIT_MASK POWER_UNIT_MASK
  962. #define TPMI_ENERGY_UNIT_OFFSET 0x06
  963. #define TPMI_ENERGY_UNIT_MASK 0x7C0
  964. #define TPMI_TIME_UNIT_OFFSET 0x0C
  965. #define TPMI_TIME_UNIT_MASK 0xF000
  966. static int rapl_check_unit_tpmi(struct rapl_domain *rd)
  967. {
  968. struct reg_action ra;
  969. u32 value;
  970. ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT];
  971. ra.mask = ~0;
  972. if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
  973. pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n",
  974. ra.reg.val, rd->rp->name, rd->name);
  975. return -ENODEV;
  976. }
  977. value = (ra.value & TPMI_ENERGY_UNIT_MASK) >> TPMI_ENERGY_UNIT_OFFSET;
  978. rd->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
  979. value = (ra.value & TPMI_POWER_UNIT_MASK) >> TPMI_POWER_UNIT_OFFSET;
  980. rd->power_unit = 1000000 / (1 << value);
  981. value = (ra.value & TPMI_TIME_UNIT_MASK) >> TPMI_TIME_UNIT_OFFSET;
  982. rd->time_unit = 1000000 / (1 << value);
  983. pr_debug("Core CPU %s:%s energy=%dpJ, time=%dus, power=%duW\n",
  984. rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit);
  985. return 0;
  986. }
  987. static const struct rapl_defaults defaults_tpmi = {
  988. .check_unit = rapl_check_unit_tpmi,
  989. /* Reuse existing logic, ignore the PL_CLAMP failures and enable all Power Limits */
  990. .set_floor_freq = set_floor_freq_default,
  991. .compute_time_window = rapl_compute_time_window_core,
  992. };
  993. static const struct rapl_defaults rapl_defaults_core = {
  994. .floor_freq_reg_addr = 0,
  995. .check_unit = rapl_check_unit_core,
  996. .set_floor_freq = set_floor_freq_default,
  997. .compute_time_window = rapl_compute_time_window_core,
  998. };
  999. static const struct rapl_defaults rapl_defaults_hsw_server = {
  1000. .check_unit = rapl_check_unit_core,
  1001. .set_floor_freq = set_floor_freq_default,
  1002. .compute_time_window = rapl_compute_time_window_core,
  1003. .dram_domain_energy_unit = 15300,
  1004. };
  1005. static const struct rapl_defaults rapl_defaults_spr_server = {
  1006. .check_unit = rapl_check_unit_core,
  1007. .set_floor_freq = set_floor_freq_default,
  1008. .compute_time_window = rapl_compute_time_window_core,
  1009. .psys_domain_energy_unit = 1000000000,
  1010. .spr_psys_bits = true,
  1011. };
  1012. static const struct rapl_defaults rapl_defaults_byt = {
  1013. .floor_freq_reg_addr = IOSF_CPU_POWER_BUDGET_CTL_BYT,
  1014. .check_unit = rapl_check_unit_atom,
  1015. .set_floor_freq = set_floor_freq_atom,
  1016. .compute_time_window = rapl_compute_time_window_atom,
  1017. };
  1018. static const struct rapl_defaults rapl_defaults_tng = {
  1019. .floor_freq_reg_addr = IOSF_CPU_POWER_BUDGET_CTL_TNG,
  1020. .check_unit = rapl_check_unit_atom,
  1021. .set_floor_freq = set_floor_freq_atom,
  1022. .compute_time_window = rapl_compute_time_window_atom,
  1023. };
  1024. static const struct rapl_defaults rapl_defaults_ann = {
  1025. .floor_freq_reg_addr = 0,
  1026. .check_unit = rapl_check_unit_atom,
  1027. .set_floor_freq = NULL,
  1028. .compute_time_window = rapl_compute_time_window_atom,
  1029. };
  1030. static const struct rapl_defaults rapl_defaults_cht = {
  1031. .floor_freq_reg_addr = 0,
  1032. .check_unit = rapl_check_unit_atom,
  1033. .set_floor_freq = NULL,
  1034. .compute_time_window = rapl_compute_time_window_atom,
  1035. };
  1036. static const struct rapl_defaults rapl_defaults_amd = {
  1037. .check_unit = rapl_check_unit_core,
  1038. };
  1039. static const struct x86_cpu_id rapl_ids[] __initconst = {
  1040. X86_MATCH_VFM(INTEL_SANDYBRIDGE, &rapl_defaults_core),
  1041. X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &rapl_defaults_core),
  1042. X86_MATCH_VFM(INTEL_IVYBRIDGE, &rapl_defaults_core),
  1043. X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &rapl_defaults_core),
  1044. X86_MATCH_VFM(INTEL_HASWELL, &rapl_defaults_core),
  1045. X86_MATCH_VFM(INTEL_HASWELL_L, &rapl_defaults_core),
  1046. X86_MATCH_VFM(INTEL_HASWELL_G, &rapl_defaults_core),
  1047. X86_MATCH_VFM(INTEL_HASWELL_X, &rapl_defaults_hsw_server),
  1048. X86_MATCH_VFM(INTEL_BROADWELL, &rapl_defaults_core),
  1049. X86_MATCH_VFM(INTEL_BROADWELL_G, &rapl_defaults_core),
  1050. X86_MATCH_VFM(INTEL_BROADWELL_D, &rapl_defaults_core),
  1051. X86_MATCH_VFM(INTEL_BROADWELL_X, &rapl_defaults_hsw_server),
  1052. X86_MATCH_VFM(INTEL_SKYLAKE, &rapl_defaults_core),
  1053. X86_MATCH_VFM(INTEL_SKYLAKE_L, &rapl_defaults_core),
  1054. X86_MATCH_VFM(INTEL_SKYLAKE_X, &rapl_defaults_hsw_server),
  1055. X86_MATCH_VFM(INTEL_KABYLAKE_L, &rapl_defaults_core),
  1056. X86_MATCH_VFM(INTEL_KABYLAKE, &rapl_defaults_core),
  1057. X86_MATCH_VFM(INTEL_CANNONLAKE_L, &rapl_defaults_core),
  1058. X86_MATCH_VFM(INTEL_ICELAKE_L, &rapl_defaults_core),
  1059. X86_MATCH_VFM(INTEL_ICELAKE, &rapl_defaults_core),
  1060. X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &rapl_defaults_core),
  1061. X86_MATCH_VFM(INTEL_ICELAKE_X, &rapl_defaults_hsw_server),
  1062. X86_MATCH_VFM(INTEL_ICELAKE_D, &rapl_defaults_hsw_server),
  1063. X86_MATCH_VFM(INTEL_COMETLAKE_L, &rapl_defaults_core),
  1064. X86_MATCH_VFM(INTEL_COMETLAKE, &rapl_defaults_core),
  1065. X86_MATCH_VFM(INTEL_TIGERLAKE_L, &rapl_defaults_core),
  1066. X86_MATCH_VFM(INTEL_TIGERLAKE, &rapl_defaults_core),
  1067. X86_MATCH_VFM(INTEL_ROCKETLAKE, &rapl_defaults_core),
  1068. X86_MATCH_VFM(INTEL_ALDERLAKE, &rapl_defaults_core),
  1069. X86_MATCH_VFM(INTEL_ALDERLAKE_L, &rapl_defaults_core),
  1070. X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &rapl_defaults_core),
  1071. X86_MATCH_VFM(INTEL_RAPTORLAKE, &rapl_defaults_core),
  1072. X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &rapl_defaults_core),
  1073. X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &rapl_defaults_core),
  1074. X86_MATCH_VFM(INTEL_METEORLAKE, &rapl_defaults_core),
  1075. X86_MATCH_VFM(INTEL_METEORLAKE_L, &rapl_defaults_core),
  1076. X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
  1077. X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &rapl_defaults_spr_server),
  1078. X86_MATCH_VFM(INTEL_LUNARLAKE_M, &rapl_defaults_core),
  1079. X86_MATCH_VFM(INTEL_ARROWLAKE_H, &rapl_defaults_core),
  1080. X86_MATCH_VFM(INTEL_ARROWLAKE, &rapl_defaults_core),
  1081. X86_MATCH_VFM(INTEL_ARROWLAKE_U, &rapl_defaults_core),
  1082. X86_MATCH_VFM(INTEL_LAKEFIELD, &rapl_defaults_core),
  1083. X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &rapl_defaults_byt),
  1084. X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &rapl_defaults_cht),
  1085. X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &rapl_defaults_tng),
  1086. X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &rapl_defaults_ann),
  1087. X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &rapl_defaults_core),
  1088. X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &rapl_defaults_core),
  1089. X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &rapl_defaults_core),
  1090. X86_MATCH_VFM(INTEL_ATOM_TREMONT, &rapl_defaults_core),
  1091. X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &rapl_defaults_core),
  1092. X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &rapl_defaults_core),
  1093. X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &rapl_defaults_hsw_server),
  1094. X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &rapl_defaults_hsw_server),
  1095. X86_MATCH_VENDOR_FAM(AMD, 0x17, &rapl_defaults_amd),
  1096. X86_MATCH_VENDOR_FAM(AMD, 0x19, &rapl_defaults_amd),
  1097. X86_MATCH_VENDOR_FAM(AMD, 0x1A, &rapl_defaults_amd),
  1098. X86_MATCH_VENDOR_FAM(HYGON, 0x18, &rapl_defaults_amd),
  1099. {}
  1100. };
  1101. MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
  1102. /* Read once for all raw primitive data for domains */
  1103. static void rapl_update_domain_data(struct rapl_package *rp)
  1104. {
  1105. int dmn, prim;
  1106. u64 val;
  1107. for (dmn = 0; dmn < rp->nr_domains; dmn++) {
  1108. pr_debug("update %s domain %s data\n", rp->name,
  1109. rp->domains[dmn].name);
  1110. /* exclude non-raw primitives */
  1111. for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) {
  1112. struct rapl_primitive_info *rpi = get_rpi(rp, prim);
  1113. if (!rapl_read_data_raw(&rp->domains[dmn], prim,
  1114. rpi->unit, &val))
  1115. rp->domains[dmn].rdd.primitives[prim] = val;
  1116. }
  1117. }
  1118. }
  1119. static int rapl_package_register_powercap(struct rapl_package *rp)
  1120. {
  1121. struct rapl_domain *rd;
  1122. struct powercap_zone *power_zone = NULL;
  1123. int nr_pl, ret;
  1124. /* Update the domain data of the new package */
  1125. rapl_update_domain_data(rp);
  1126. /* first we register package domain as the parent zone */
  1127. for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
  1128. if (rd->id == RAPL_DOMAIN_PACKAGE) {
  1129. nr_pl = find_nr_power_limit(rd);
  1130. pr_debug("register package domain %s\n", rp->name);
  1131. power_zone = powercap_register_zone(&rd->power_zone,
  1132. rp->priv->control_type, rp->name,
  1133. NULL, &zone_ops[rd->id], nr_pl,
  1134. &constraint_ops);
  1135. if (IS_ERR(power_zone)) {
  1136. pr_debug("failed to register power zone %s\n",
  1137. rp->name);
  1138. return PTR_ERR(power_zone);
  1139. }
  1140. /* track parent zone in per package/socket data */
  1141. rp->power_zone = power_zone;
  1142. /* done, only one package domain per socket */
  1143. break;
  1144. }
  1145. }
  1146. if (!power_zone) {
  1147. pr_err("no package domain found, unknown topology!\n");
  1148. return -ENODEV;
  1149. }
  1150. /* now register domains as children of the socket/package */
  1151. for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
  1152. struct powercap_zone *parent = rp->power_zone;
  1153. if (rd->id == RAPL_DOMAIN_PACKAGE)
  1154. continue;
  1155. if (rd->id == RAPL_DOMAIN_PLATFORM)
  1156. parent = NULL;
  1157. /* number of power limits per domain varies */
  1158. nr_pl = find_nr_power_limit(rd);
  1159. power_zone = powercap_register_zone(&rd->power_zone,
  1160. rp->priv->control_type,
  1161. rd->name, parent,
  1162. &zone_ops[rd->id], nr_pl,
  1163. &constraint_ops);
  1164. if (IS_ERR(power_zone)) {
  1165. pr_debug("failed to register power_zone, %s:%s\n",
  1166. rp->name, rd->name);
  1167. ret = PTR_ERR(power_zone);
  1168. goto err_cleanup;
  1169. }
  1170. }
  1171. return 0;
  1172. err_cleanup:
  1173. /*
  1174. * Clean up previously initialized domains within the package if we
  1175. * failed after the first domain setup.
  1176. */
  1177. while (--rd >= rp->domains) {
  1178. pr_debug("unregister %s domain %s\n", rp->name, rd->name);
  1179. powercap_unregister_zone(rp->priv->control_type,
  1180. &rd->power_zone);
  1181. }
  1182. return ret;
  1183. }
  1184. static int rapl_check_domain(int domain, struct rapl_package *rp)
  1185. {
  1186. struct reg_action ra;
  1187. switch (domain) {
  1188. case RAPL_DOMAIN_PACKAGE:
  1189. case RAPL_DOMAIN_PP0:
  1190. case RAPL_DOMAIN_PP1:
  1191. case RAPL_DOMAIN_DRAM:
  1192. case RAPL_DOMAIN_PLATFORM:
  1193. ra.reg = rp->priv->regs[domain][RAPL_DOMAIN_REG_STATUS];
  1194. break;
  1195. default:
  1196. pr_err("invalid domain id %d\n", domain);
  1197. return -EINVAL;
  1198. }
  1199. /* make sure domain counters are available and contains non-zero
  1200. * values, otherwise skip it.
  1201. */
  1202. ra.mask = ENERGY_STATUS_MASK;
  1203. if (rp->priv->read_raw(get_rid(rp), &ra) || !ra.value)
  1204. return -ENODEV;
  1205. return 0;
  1206. }
  1207. /*
  1208. * Get per domain energy/power/time unit.
  1209. * RAPL Interfaces without per domain unit register will use the package
  1210. * scope unit register to set per domain units.
  1211. */
  1212. static int rapl_get_domain_unit(struct rapl_domain *rd)
  1213. {
  1214. struct rapl_defaults *defaults = get_defaults(rd->rp);
  1215. int ret;
  1216. if (!rd->regs[RAPL_DOMAIN_REG_UNIT].val) {
  1217. if (!rd->rp->priv->reg_unit.val) {
  1218. pr_err("No valid Unit register found\n");
  1219. return -ENODEV;
  1220. }
  1221. rd->regs[RAPL_DOMAIN_REG_UNIT] = rd->rp->priv->reg_unit;
  1222. }
  1223. if (!defaults->check_unit) {
  1224. pr_err("missing .check_unit() callback\n");
  1225. return -ENODEV;
  1226. }
  1227. ret = defaults->check_unit(rd);
  1228. if (ret)
  1229. return ret;
  1230. if (rd->id == RAPL_DOMAIN_DRAM && defaults->dram_domain_energy_unit)
  1231. rd->energy_unit = defaults->dram_domain_energy_unit;
  1232. if (rd->id == RAPL_DOMAIN_PLATFORM && defaults->psys_domain_energy_unit)
  1233. rd->energy_unit = defaults->psys_domain_energy_unit;
  1234. return 0;
  1235. }
  1236. /*
  1237. * Check if power limits are available. Two cases when they are not available:
  1238. * 1. Locked by BIOS, in this case we still provide read-only access so that
  1239. * users can see what limit is set by the BIOS.
  1240. * 2. Some CPUs make some domains monitoring only which means PLx MSRs may not
  1241. * exist at all. In this case, we do not show the constraints in powercap.
  1242. *
  1243. * Called after domains are detected and initialized.
  1244. */
  1245. static void rapl_detect_powerlimit(struct rapl_domain *rd)
  1246. {
  1247. u64 val64;
  1248. int i;
  1249. for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++) {
  1250. if (!rapl_read_pl_data(rd, i, PL_LOCK, false, &val64)) {
  1251. if (val64) {
  1252. rd->rpl[i].locked = true;
  1253. pr_info("%s:%s:%s locked by BIOS\n",
  1254. rd->rp->name, rd->name, pl_names[i]);
  1255. }
  1256. }
  1257. if (rapl_read_pl_data(rd, i, PL_LIMIT, false, &val64))
  1258. rd->rpl[i].name = NULL;
  1259. }
  1260. }
  1261. /* Detect active and valid domains for the given CPU, caller must
  1262. * ensure the CPU belongs to the targeted package and CPU hotlug is disabled.
  1263. */
  1264. static int rapl_detect_domains(struct rapl_package *rp)
  1265. {
  1266. struct rapl_domain *rd;
  1267. int i;
  1268. for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
  1269. /* use physical package id to read counters */
  1270. if (!rapl_check_domain(i, rp)) {
  1271. rp->domain_map |= 1 << i;
  1272. pr_info("Found RAPL domain %s\n", rapl_domain_names[i]);
  1273. }
  1274. }
  1275. rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
  1276. if (!rp->nr_domains) {
  1277. pr_debug("no valid rapl domains found in %s\n", rp->name);
  1278. return -ENODEV;
  1279. }
  1280. pr_debug("found %d domains on %s\n", rp->nr_domains, rp->name);
  1281. rp->domains = kcalloc(rp->nr_domains, sizeof(struct rapl_domain),
  1282. GFP_KERNEL);
  1283. if (!rp->domains)
  1284. return -ENOMEM;
  1285. rapl_init_domains(rp);
  1286. for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
  1287. rapl_get_domain_unit(rd);
  1288. rapl_detect_powerlimit(rd);
  1289. }
  1290. return 0;
  1291. }
  1292. #ifdef CONFIG_PERF_EVENTS
  1293. /*
  1294. * Support for RAPL PMU
  1295. *
  1296. * Register a PMU if any of the registered RAPL Packages have the requirement
  1297. * of exposing its energy counters via Perf PMU.
  1298. *
  1299. * PMU Name:
  1300. * power
  1301. *
  1302. * Events:
  1303. * Name Event id RAPL Domain
  1304. * energy_cores 0x01 RAPL_DOMAIN_PP0
  1305. * energy_pkg 0x02 RAPL_DOMAIN_PACKAGE
  1306. * energy_ram 0x03 RAPL_DOMAIN_DRAM
  1307. * energy_gpu 0x04 RAPL_DOMAIN_PP1
  1308. * energy_psys 0x05 RAPL_DOMAIN_PLATFORM
  1309. *
  1310. * Unit:
  1311. * Joules
  1312. *
  1313. * Scale:
  1314. * 2.3283064365386962890625e-10
  1315. * The same RAPL domain in different RAPL Packages may have different
  1316. * energy units. Use 2.3283064365386962890625e-10 (2^-32) Joules as
  1317. * the fixed unit for all energy counters, and covert each hardware
  1318. * counter increase to N times of PMU event counter increases.
  1319. *
  1320. * This is fully compatible with the current MSR RAPL PMU. This means that
  1321. * userspace programs like turbostat can use the same code to handle RAPL Perf
  1322. * PMU, no matter what RAPL Interface driver (MSR/TPMI, etc) is running
  1323. * underlying on the platform.
  1324. *
  1325. * Note that RAPL Packages can be probed/removed dynamically, and the events
  1326. * supported by each TPMI RAPL device can be different. Thus the RAPL PMU
  1327. * support is done on demand, which means
  1328. * 1. PMU is registered only if it is needed by a RAPL Package. PMU events for
  1329. * unsupported counters are not exposed.
  1330. * 2. PMU is unregistered and registered when a new RAPL Package is probed and
  1331. * supports new counters that are not supported by current PMU.
  1332. * 3. PMU is unregistered when all registered RAPL Packages don't need PMU.
  1333. */
  1334. struct rapl_pmu {
  1335. struct pmu pmu; /* Perf PMU structure */
  1336. u64 timer_ms; /* Maximum expiration time to avoid counter overflow */
  1337. unsigned long domain_map; /* Events supported by current registered PMU */
  1338. bool registered; /* Whether the PMU has been registered or not */
  1339. };
  1340. static struct rapl_pmu rapl_pmu;
  1341. /* PMU helpers */
  1342. static int get_pmu_cpu(struct rapl_package *rp)
  1343. {
  1344. int cpu;
  1345. if (!rp->has_pmu)
  1346. return nr_cpu_ids;
  1347. /* Only TPMI RAPL is supported for now */
  1348. if (rp->priv->type != RAPL_IF_TPMI)
  1349. return nr_cpu_ids;
  1350. /* TPMI RAPL uses any CPU in the package for PMU */
  1351. for_each_online_cpu(cpu)
  1352. if (topology_physical_package_id(cpu) == rp->id)
  1353. return cpu;
  1354. return nr_cpu_ids;
  1355. }
  1356. static bool is_rp_pmu_cpu(struct rapl_package *rp, int cpu)
  1357. {
  1358. if (!rp->has_pmu)
  1359. return false;
  1360. /* Only TPMI RAPL is supported for now */
  1361. if (rp->priv->type != RAPL_IF_TPMI)
  1362. return false;
  1363. /* TPMI RAPL uses any CPU in the package for PMU */
  1364. return topology_physical_package_id(cpu) == rp->id;
  1365. }
  1366. static struct rapl_package_pmu_data *event_to_pmu_data(struct perf_event *event)
  1367. {
  1368. struct rapl_package *rp = event->pmu_private;
  1369. return &rp->pmu_data;
  1370. }
  1371. /* PMU event callbacks */
  1372. static u64 event_read_counter(struct perf_event *event)
  1373. {
  1374. struct rapl_package *rp = event->pmu_private;
  1375. u64 val;
  1376. int ret;
  1377. /* Return 0 for unsupported events */
  1378. if (event->hw.idx < 0)
  1379. return 0;
  1380. ret = rapl_read_data_raw(&rp->domains[event->hw.idx], ENERGY_COUNTER, false, &val);
  1381. /* Return 0 for failed read */
  1382. if (ret)
  1383. return 0;
  1384. return val;
  1385. }
  1386. static void __rapl_pmu_event_start(struct perf_event *event)
  1387. {
  1388. struct rapl_package_pmu_data *data = event_to_pmu_data(event);
  1389. if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
  1390. return;
  1391. event->hw.state = 0;
  1392. list_add_tail(&event->active_entry, &data->active_list);
  1393. local64_set(&event->hw.prev_count, event_read_counter(event));
  1394. if (++data->n_active == 1)
  1395. hrtimer_start(&data->hrtimer, data->timer_interval,
  1396. HRTIMER_MODE_REL_PINNED);
  1397. }
  1398. static void rapl_pmu_event_start(struct perf_event *event, int mode)
  1399. {
  1400. struct rapl_package_pmu_data *data = event_to_pmu_data(event);
  1401. unsigned long flags;
  1402. raw_spin_lock_irqsave(&data->lock, flags);
  1403. __rapl_pmu_event_start(event);
  1404. raw_spin_unlock_irqrestore(&data->lock, flags);
  1405. }
  1406. static u64 rapl_event_update(struct perf_event *event)
  1407. {
  1408. struct hw_perf_event *hwc = &event->hw;
  1409. struct rapl_package_pmu_data *data = event_to_pmu_data(event);
  1410. u64 prev_raw_count, new_raw_count;
  1411. s64 delta, sdelta;
  1412. /*
  1413. * Follow the generic code to drain hwc->prev_count.
  1414. * The loop is not expected to run for multiple times.
  1415. */
  1416. prev_raw_count = local64_read(&hwc->prev_count);
  1417. do {
  1418. new_raw_count = event_read_counter(event);
  1419. } while (!local64_try_cmpxchg(&hwc->prev_count,
  1420. &prev_raw_count, new_raw_count));
  1421. /*
  1422. * Now we have the new raw value and have updated the prev
  1423. * timestamp already. We can now calculate the elapsed delta
  1424. * (event-)time and add that to the generic event.
  1425. */
  1426. delta = new_raw_count - prev_raw_count;
  1427. /*
  1428. * Scale delta to smallest unit (2^-32)
  1429. * users must then scale back: count * 1/(1e9*2^32) to get Joules
  1430. * or use ldexp(count, -32).
  1431. * Watts = Joules/Time delta
  1432. */
  1433. sdelta = delta * data->scale[event->hw.flags];
  1434. local64_add(sdelta, &event->count);
  1435. return new_raw_count;
  1436. }
  1437. static void rapl_pmu_event_stop(struct perf_event *event, int mode)
  1438. {
  1439. struct rapl_package_pmu_data *data = event_to_pmu_data(event);
  1440. struct hw_perf_event *hwc = &event->hw;
  1441. unsigned long flags;
  1442. raw_spin_lock_irqsave(&data->lock, flags);
  1443. /* Mark event as deactivated and stopped */
  1444. if (!(hwc->state & PERF_HES_STOPPED)) {
  1445. WARN_ON_ONCE(data->n_active <= 0);
  1446. if (--data->n_active == 0)
  1447. hrtimer_cancel(&data->hrtimer);
  1448. list_del(&event->active_entry);
  1449. WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
  1450. hwc->state |= PERF_HES_STOPPED;
  1451. }
  1452. /* Check if update of sw counter is necessary */
  1453. if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
  1454. /*
  1455. * Drain the remaining delta count out of a event
  1456. * that we are disabling:
  1457. */
  1458. rapl_event_update(event);
  1459. hwc->state |= PERF_HES_UPTODATE;
  1460. }
  1461. raw_spin_unlock_irqrestore(&data->lock, flags);
  1462. }
  1463. static int rapl_pmu_event_add(struct perf_event *event, int mode)
  1464. {
  1465. struct rapl_package_pmu_data *data = event_to_pmu_data(event);
  1466. struct hw_perf_event *hwc = &event->hw;
  1467. unsigned long flags;
  1468. raw_spin_lock_irqsave(&data->lock, flags);
  1469. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  1470. if (mode & PERF_EF_START)
  1471. __rapl_pmu_event_start(event);
  1472. raw_spin_unlock_irqrestore(&data->lock, flags);
  1473. return 0;
  1474. }
  1475. static void rapl_pmu_event_del(struct perf_event *event, int flags)
  1476. {
  1477. rapl_pmu_event_stop(event, PERF_EF_UPDATE);
  1478. }
  1479. /* RAPL PMU event ids, same as shown in sysfs */
  1480. enum perf_rapl_events {
  1481. PERF_RAPL_PP0 = 1, /* all cores */
  1482. PERF_RAPL_PKG, /* entire package */
  1483. PERF_RAPL_RAM, /* DRAM */
  1484. PERF_RAPL_PP1, /* gpu */
  1485. PERF_RAPL_PSYS, /* psys */
  1486. PERF_RAPL_MAX
  1487. };
  1488. #define RAPL_EVENT_MASK GENMASK(7, 0)
  1489. static const int event_to_domain[PERF_RAPL_MAX] = {
  1490. [PERF_RAPL_PP0] = RAPL_DOMAIN_PP0,
  1491. [PERF_RAPL_PKG] = RAPL_DOMAIN_PACKAGE,
  1492. [PERF_RAPL_RAM] = RAPL_DOMAIN_DRAM,
  1493. [PERF_RAPL_PP1] = RAPL_DOMAIN_PP1,
  1494. [PERF_RAPL_PSYS] = RAPL_DOMAIN_PLATFORM,
  1495. };
  1496. static int rapl_pmu_event_init(struct perf_event *event)
  1497. {
  1498. struct rapl_package *pos, *rp = NULL;
  1499. u64 cfg = event->attr.config & RAPL_EVENT_MASK;
  1500. int domain, idx;
  1501. /* Only look at RAPL events */
  1502. if (event->attr.type != event->pmu->type)
  1503. return -ENOENT;
  1504. /* Check for supported events only */
  1505. if (!cfg || cfg >= PERF_RAPL_MAX)
  1506. return -EINVAL;
  1507. if (event->cpu < 0)
  1508. return -EINVAL;
  1509. /* Find out which Package the event belongs to */
  1510. list_for_each_entry(pos, &rapl_packages, plist) {
  1511. if (is_rp_pmu_cpu(pos, event->cpu)) {
  1512. rp = pos;
  1513. break;
  1514. }
  1515. }
  1516. if (!rp)
  1517. return -ENODEV;
  1518. /* Find out which RAPL Domain the event belongs to */
  1519. domain = event_to_domain[cfg];
  1520. event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
  1521. event->pmu_private = rp; /* Which package */
  1522. event->hw.flags = domain; /* Which domain */
  1523. event->hw.idx = -1;
  1524. /* Find out the index in rp->domains[] to get domain pointer */
  1525. for (idx = 0; idx < rp->nr_domains; idx++) {
  1526. if (rp->domains[idx].id == domain) {
  1527. event->hw.idx = idx;
  1528. break;
  1529. }
  1530. }
  1531. return 0;
  1532. }
  1533. static void rapl_pmu_event_read(struct perf_event *event)
  1534. {
  1535. rapl_event_update(event);
  1536. }
  1537. static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
  1538. {
  1539. struct rapl_package_pmu_data *data =
  1540. container_of(hrtimer, struct rapl_package_pmu_data, hrtimer);
  1541. struct perf_event *event;
  1542. unsigned long flags;
  1543. if (!data->n_active)
  1544. return HRTIMER_NORESTART;
  1545. raw_spin_lock_irqsave(&data->lock, flags);
  1546. list_for_each_entry(event, &data->active_list, active_entry)
  1547. rapl_event_update(event);
  1548. raw_spin_unlock_irqrestore(&data->lock, flags);
  1549. hrtimer_forward_now(hrtimer, data->timer_interval);
  1550. return HRTIMER_RESTART;
  1551. }
  1552. /* PMU sysfs attributes */
  1553. /*
  1554. * There are no default events, but we need to create "events" group (with
  1555. * empty attrs) before updating it with detected events.
  1556. */
  1557. static struct attribute *attrs_empty[] = {
  1558. NULL,
  1559. };
  1560. static struct attribute_group pmu_events_group = {
  1561. .name = "events",
  1562. .attrs = attrs_empty,
  1563. };
  1564. static ssize_t cpumask_show(struct device *dev,
  1565. struct device_attribute *attr, char *buf)
  1566. {
  1567. struct rapl_package *rp;
  1568. cpumask_var_t cpu_mask;
  1569. int cpu;
  1570. int ret;
  1571. if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL))
  1572. return -ENOMEM;
  1573. cpus_read_lock();
  1574. cpumask_clear(cpu_mask);
  1575. /* Choose a cpu for each RAPL Package */
  1576. list_for_each_entry(rp, &rapl_packages, plist) {
  1577. cpu = get_pmu_cpu(rp);
  1578. if (cpu < nr_cpu_ids)
  1579. cpumask_set_cpu(cpu, cpu_mask);
  1580. }
  1581. cpus_read_unlock();
  1582. ret = cpumap_print_to_pagebuf(true, buf, cpu_mask);
  1583. free_cpumask_var(cpu_mask);
  1584. return ret;
  1585. }
  1586. static DEVICE_ATTR_RO(cpumask);
  1587. static struct attribute *pmu_cpumask_attrs[] = {
  1588. &dev_attr_cpumask.attr,
  1589. NULL
  1590. };
  1591. static struct attribute_group pmu_cpumask_group = {
  1592. .attrs = pmu_cpumask_attrs,
  1593. };
  1594. PMU_FORMAT_ATTR(event, "config:0-7");
  1595. static struct attribute *pmu_format_attr[] = {
  1596. &format_attr_event.attr,
  1597. NULL
  1598. };
  1599. static struct attribute_group pmu_format_group = {
  1600. .name = "format",
  1601. .attrs = pmu_format_attr,
  1602. };
  1603. static const struct attribute_group *pmu_attr_groups[] = {
  1604. &pmu_events_group,
  1605. &pmu_cpumask_group,
  1606. &pmu_format_group,
  1607. NULL
  1608. };
  1609. #define RAPL_EVENT_ATTR_STR(_name, v, str) \
  1610. static struct perf_pmu_events_attr event_attr_##v = { \
  1611. .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
  1612. .event_str = str, \
  1613. }
  1614. RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
  1615. RAPL_EVENT_ATTR_STR(energy-pkg, rapl_pkg, "event=0x02");
  1616. RAPL_EVENT_ATTR_STR(energy-ram, rapl_ram, "event=0x03");
  1617. RAPL_EVENT_ATTR_STR(energy-gpu, rapl_gpu, "event=0x04");
  1618. RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05");
  1619. RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_unit_cores, "Joules");
  1620. RAPL_EVENT_ATTR_STR(energy-pkg.unit, rapl_unit_pkg, "Joules");
  1621. RAPL_EVENT_ATTR_STR(energy-ram.unit, rapl_unit_ram, "Joules");
  1622. RAPL_EVENT_ATTR_STR(energy-gpu.unit, rapl_unit_gpu, "Joules");
  1623. RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_unit_psys, "Joules");
  1624. RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_scale_cores, "2.3283064365386962890625e-10");
  1625. RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_scale_pkg, "2.3283064365386962890625e-10");
  1626. RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_scale_ram, "2.3283064365386962890625e-10");
  1627. RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_scale_gpu, "2.3283064365386962890625e-10");
  1628. RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_scale_psys, "2.3283064365386962890625e-10");
  1629. #define RAPL_EVENT_GROUP(_name, domain) \
  1630. static struct attribute *pmu_attr_##_name[] = { \
  1631. &event_attr_rapl_##_name.attr.attr, \
  1632. &event_attr_rapl_unit_##_name.attr.attr, \
  1633. &event_attr_rapl_scale_##_name.attr.attr, \
  1634. NULL \
  1635. }; \
  1636. static umode_t is_visible_##_name(struct kobject *kobj, struct attribute *attr, int event) \
  1637. { \
  1638. return rapl_pmu.domain_map & BIT(domain) ? attr->mode : 0; \
  1639. } \
  1640. static struct attribute_group pmu_group_##_name = { \
  1641. .name = "events", \
  1642. .attrs = pmu_attr_##_name, \
  1643. .is_visible = is_visible_##_name, \
  1644. }
  1645. RAPL_EVENT_GROUP(cores, RAPL_DOMAIN_PP0);
  1646. RAPL_EVENT_GROUP(pkg, RAPL_DOMAIN_PACKAGE);
  1647. RAPL_EVENT_GROUP(ram, RAPL_DOMAIN_DRAM);
  1648. RAPL_EVENT_GROUP(gpu, RAPL_DOMAIN_PP1);
  1649. RAPL_EVENT_GROUP(psys, RAPL_DOMAIN_PLATFORM);
  1650. static const struct attribute_group *pmu_attr_update[] = {
  1651. &pmu_group_cores,
  1652. &pmu_group_pkg,
  1653. &pmu_group_ram,
  1654. &pmu_group_gpu,
  1655. &pmu_group_psys,
  1656. NULL
  1657. };
  1658. static int rapl_pmu_update(struct rapl_package *rp)
  1659. {
  1660. int ret = 0;
  1661. /* Return if PMU already covers all events supported by current RAPL Package */
  1662. if (rapl_pmu.registered && !(rp->domain_map & (~rapl_pmu.domain_map)))
  1663. goto end;
  1664. /* Unregister previous registered PMU */
  1665. if (rapl_pmu.registered)
  1666. perf_pmu_unregister(&rapl_pmu.pmu);
  1667. rapl_pmu.registered = false;
  1668. rapl_pmu.domain_map |= rp->domain_map;
  1669. memset(&rapl_pmu.pmu, 0, sizeof(struct pmu));
  1670. rapl_pmu.pmu.attr_groups = pmu_attr_groups;
  1671. rapl_pmu.pmu.attr_update = pmu_attr_update;
  1672. rapl_pmu.pmu.task_ctx_nr = perf_invalid_context;
  1673. rapl_pmu.pmu.event_init = rapl_pmu_event_init;
  1674. rapl_pmu.pmu.add = rapl_pmu_event_add;
  1675. rapl_pmu.pmu.del = rapl_pmu_event_del;
  1676. rapl_pmu.pmu.start = rapl_pmu_event_start;
  1677. rapl_pmu.pmu.stop = rapl_pmu_event_stop;
  1678. rapl_pmu.pmu.read = rapl_pmu_event_read;
  1679. rapl_pmu.pmu.module = THIS_MODULE;
  1680. rapl_pmu.pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT;
  1681. ret = perf_pmu_register(&rapl_pmu.pmu, "power", -1);
  1682. if (ret) {
  1683. pr_info("Failed to register PMU\n");
  1684. return ret;
  1685. }
  1686. rapl_pmu.registered = true;
  1687. end:
  1688. rp->has_pmu = true;
  1689. return ret;
  1690. }
  1691. int rapl_package_add_pmu(struct rapl_package *rp)
  1692. {
  1693. struct rapl_package_pmu_data *data = &rp->pmu_data;
  1694. int idx;
  1695. if (rp->has_pmu)
  1696. return -EEXIST;
  1697. guard(cpus_read_lock)();
  1698. for (idx = 0; idx < rp->nr_domains; idx++) {
  1699. struct rapl_domain *rd = &rp->domains[idx];
  1700. int domain = rd->id;
  1701. u64 val;
  1702. if (!test_bit(domain, &rp->domain_map))
  1703. continue;
  1704. /*
  1705. * The RAPL PMU granularity is 2^-32 Joules
  1706. * data->scale[]: times of 2^-32 Joules for each ENERGY COUNTER increase
  1707. */
  1708. val = rd->energy_unit * (1ULL << 32);
  1709. do_div(val, ENERGY_UNIT_SCALE * 1000000);
  1710. data->scale[domain] = val;
  1711. if (!rapl_pmu.timer_ms) {
  1712. struct rapl_primitive_info *rpi = get_rpi(rp, ENERGY_COUNTER);
  1713. /*
  1714. * Calculate the timer rate:
  1715. * Use reference of 200W for scaling the timeout to avoid counter
  1716. * overflows.
  1717. *
  1718. * max_count = rpi->mask >> rpi->shift + 1
  1719. * max_energy_pj = max_count * rd->energy_unit
  1720. * max_time_sec = (max_energy_pj / 1000000000) / 200w
  1721. *
  1722. * rapl_pmu.timer_ms = max_time_sec * 1000 / 2
  1723. */
  1724. val = (rpi->mask >> rpi->shift) + 1;
  1725. val *= rd->energy_unit;
  1726. do_div(val, 1000000 * 200 * 2);
  1727. rapl_pmu.timer_ms = val;
  1728. pr_debug("%llu ms overflow timer\n", rapl_pmu.timer_ms);
  1729. }
  1730. pr_debug("Domain %s: hw unit %lld * 2^-32 Joules\n", rd->name, data->scale[domain]);
  1731. }
  1732. /* Initialize per package PMU data */
  1733. raw_spin_lock_init(&data->lock);
  1734. INIT_LIST_HEAD(&data->active_list);
  1735. data->timer_interval = ms_to_ktime(rapl_pmu.timer_ms);
  1736. hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  1737. data->hrtimer.function = rapl_hrtimer_handle;
  1738. return rapl_pmu_update(rp);
  1739. }
  1740. EXPORT_SYMBOL_GPL(rapl_package_add_pmu);
  1741. void rapl_package_remove_pmu(struct rapl_package *rp)
  1742. {
  1743. struct rapl_package *pos;
  1744. if (!rp->has_pmu)
  1745. return;
  1746. guard(cpus_read_lock)();
  1747. list_for_each_entry(pos, &rapl_packages, plist) {
  1748. /* PMU is still needed */
  1749. if (pos->has_pmu && pos != rp)
  1750. return;
  1751. }
  1752. perf_pmu_unregister(&rapl_pmu.pmu);
  1753. memset(&rapl_pmu, 0, sizeof(struct rapl_pmu));
  1754. }
  1755. EXPORT_SYMBOL_GPL(rapl_package_remove_pmu);
  1756. #endif
  1757. /* called from CPU hotplug notifier, hotplug lock held */
  1758. void rapl_remove_package_cpuslocked(struct rapl_package *rp)
  1759. {
  1760. struct rapl_domain *rd, *rd_package = NULL;
  1761. package_power_limit_irq_restore(rp);
  1762. for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
  1763. int i;
  1764. for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++) {
  1765. rapl_write_pl_data(rd, i, PL_ENABLE, 0);
  1766. rapl_write_pl_data(rd, i, PL_CLAMP, 0);
  1767. }
  1768. if (rd->id == RAPL_DOMAIN_PACKAGE) {
  1769. rd_package = rd;
  1770. continue;
  1771. }
  1772. pr_debug("remove package, undo power limit on %s: %s\n",
  1773. rp->name, rd->name);
  1774. powercap_unregister_zone(rp->priv->control_type,
  1775. &rd->power_zone);
  1776. }
  1777. /* do parent zone last */
  1778. powercap_unregister_zone(rp->priv->control_type,
  1779. &rd_package->power_zone);
  1780. list_del(&rp->plist);
  1781. kfree(rp);
  1782. }
  1783. EXPORT_SYMBOL_GPL(rapl_remove_package_cpuslocked);
  1784. void rapl_remove_package(struct rapl_package *rp)
  1785. {
  1786. guard(cpus_read_lock)();
  1787. rapl_remove_package_cpuslocked(rp);
  1788. }
  1789. EXPORT_SYMBOL_GPL(rapl_remove_package);
  1790. /*
  1791. * RAPL Package energy counter scope:
  1792. * 1. AMD/HYGON platforms use per-PKG package energy counter
  1793. * 2. For Intel platforms
  1794. * 2.1 CLX-AP platform has per-DIE package energy counter
  1795. * 2.2 Other platforms that uses MSR RAPL are single die systems so the
  1796. * package energy counter can be considered as per-PKG/per-DIE,
  1797. * here it is considered as per-DIE.
  1798. * 2.3 New platforms that use TPMI RAPL doesn't care about the
  1799. * scope because they are not MSR/CPU based.
  1800. */
  1801. #define rapl_msrs_are_pkg_scope() \
  1802. (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \
  1803. boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
  1804. /* caller to ensure CPU hotplug lock is held */
  1805. struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv,
  1806. bool id_is_cpu)
  1807. {
  1808. struct rapl_package *rp;
  1809. int uid;
  1810. if (id_is_cpu) {
  1811. uid = rapl_msrs_are_pkg_scope() ?
  1812. topology_physical_package_id(id) : topology_logical_die_id(id);
  1813. if (uid < 0) {
  1814. pr_err("topology_logical_(package/die)_id() returned a negative value");
  1815. return NULL;
  1816. }
  1817. }
  1818. else
  1819. uid = id;
  1820. list_for_each_entry(rp, &rapl_packages, plist) {
  1821. if (rp->id == uid
  1822. && rp->priv->control_type == priv->control_type)
  1823. return rp;
  1824. }
  1825. return NULL;
  1826. }
  1827. EXPORT_SYMBOL_GPL(rapl_find_package_domain_cpuslocked);
  1828. struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu)
  1829. {
  1830. guard(cpus_read_lock)();
  1831. return rapl_find_package_domain_cpuslocked(id, priv, id_is_cpu);
  1832. }
  1833. EXPORT_SYMBOL_GPL(rapl_find_package_domain);
  1834. /* called from CPU hotplug notifier, hotplug lock held */
  1835. struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *priv, bool id_is_cpu)
  1836. {
  1837. struct rapl_package *rp;
  1838. int ret;
  1839. rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
  1840. if (!rp)
  1841. return ERR_PTR(-ENOMEM);
  1842. if (id_is_cpu) {
  1843. rp->id = rapl_msrs_are_pkg_scope() ?
  1844. topology_physical_package_id(id) : topology_logical_die_id(id);
  1845. if ((int)(rp->id) < 0) {
  1846. pr_err("topology_logical_(package/die)_id() returned a negative value");
  1847. return ERR_PTR(-EINVAL);
  1848. }
  1849. rp->lead_cpu = id;
  1850. if (!rapl_msrs_are_pkg_scope() && topology_max_dies_per_package() > 1)
  1851. snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d-die-%d",
  1852. topology_physical_package_id(id), topology_die_id(id));
  1853. else
  1854. snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d",
  1855. topology_physical_package_id(id));
  1856. } else {
  1857. rp->id = id;
  1858. rp->lead_cpu = -1;
  1859. snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d", id);
  1860. }
  1861. rp->priv = priv;
  1862. ret = rapl_config(rp);
  1863. if (ret)
  1864. goto err_free_package;
  1865. /* check if the package contains valid domains */
  1866. if (rapl_detect_domains(rp)) {
  1867. ret = -ENODEV;
  1868. goto err_free_package;
  1869. }
  1870. ret = rapl_package_register_powercap(rp);
  1871. if (!ret) {
  1872. INIT_LIST_HEAD(&rp->plist);
  1873. list_add(&rp->plist, &rapl_packages);
  1874. return rp;
  1875. }
  1876. err_free_package:
  1877. kfree(rp->domains);
  1878. kfree(rp);
  1879. return ERR_PTR(ret);
  1880. }
  1881. EXPORT_SYMBOL_GPL(rapl_add_package_cpuslocked);
  1882. struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu)
  1883. {
  1884. guard(cpus_read_lock)();
  1885. return rapl_add_package_cpuslocked(id, priv, id_is_cpu);
  1886. }
  1887. EXPORT_SYMBOL_GPL(rapl_add_package);
  1888. static void power_limit_state_save(void)
  1889. {
  1890. struct rapl_package *rp;
  1891. struct rapl_domain *rd;
  1892. int ret, i;
  1893. cpus_read_lock();
  1894. list_for_each_entry(rp, &rapl_packages, plist) {
  1895. if (!rp->power_zone)
  1896. continue;
  1897. rd = power_zone_to_rapl_domain(rp->power_zone);
  1898. for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++) {
  1899. ret = rapl_read_pl_data(rd, i, PL_LIMIT, true,
  1900. &rd->rpl[i].last_power_limit);
  1901. if (ret)
  1902. rd->rpl[i].last_power_limit = 0;
  1903. }
  1904. }
  1905. cpus_read_unlock();
  1906. }
  1907. static void power_limit_state_restore(void)
  1908. {
  1909. struct rapl_package *rp;
  1910. struct rapl_domain *rd;
  1911. int i;
  1912. cpus_read_lock();
  1913. list_for_each_entry(rp, &rapl_packages, plist) {
  1914. if (!rp->power_zone)
  1915. continue;
  1916. rd = power_zone_to_rapl_domain(rp->power_zone);
  1917. for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++)
  1918. if (rd->rpl[i].last_power_limit)
  1919. rapl_write_pl_data(rd, i, PL_LIMIT,
  1920. rd->rpl[i].last_power_limit);
  1921. }
  1922. cpus_read_unlock();
  1923. }
  1924. static int rapl_pm_callback(struct notifier_block *nb,
  1925. unsigned long mode, void *_unused)
  1926. {
  1927. switch (mode) {
  1928. case PM_SUSPEND_PREPARE:
  1929. power_limit_state_save();
  1930. break;
  1931. case PM_POST_SUSPEND:
  1932. power_limit_state_restore();
  1933. break;
  1934. }
  1935. return NOTIFY_OK;
  1936. }
  1937. static struct notifier_block rapl_pm_notifier = {
  1938. .notifier_call = rapl_pm_callback,
  1939. };
  1940. static struct platform_device *rapl_msr_platdev;
  1941. static int __init rapl_init(void)
  1942. {
  1943. const struct x86_cpu_id *id;
  1944. int ret;
  1945. id = x86_match_cpu(rapl_ids);
  1946. if (id) {
  1947. defaults_msr = (struct rapl_defaults *)id->driver_data;
  1948. rapl_msr_platdev = platform_device_alloc("intel_rapl_msr", 0);
  1949. if (!rapl_msr_platdev)
  1950. return -ENOMEM;
  1951. ret = platform_device_add(rapl_msr_platdev);
  1952. if (ret) {
  1953. platform_device_put(rapl_msr_platdev);
  1954. return ret;
  1955. }
  1956. }
  1957. ret = register_pm_notifier(&rapl_pm_notifier);
  1958. if (ret && rapl_msr_platdev) {
  1959. platform_device_del(rapl_msr_platdev);
  1960. platform_device_put(rapl_msr_platdev);
  1961. }
  1962. return ret;
  1963. }
  1964. static void __exit rapl_exit(void)
  1965. {
  1966. platform_device_unregister(rapl_msr_platdev);
  1967. unregister_pm_notifier(&rapl_pm_notifier);
  1968. }
  1969. fs_initcall(rapl_init);
  1970. module_exit(rapl_exit);
  1971. MODULE_DESCRIPTION("Intel Runtime Average Power Limit (RAPL) common code");
  1972. MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com>");
  1973. MODULE_LICENSE("GPL v2");