intel_idle.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * intel_idle.c - native hardware idle loop for modern Intel processors
  4. *
  5. * Copyright (c) 2013 - 2020, Intel Corporation.
  6. * Len Brown <len.brown@intel.com>
  7. * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  8. */
  9. /*
  10. * intel_idle is a cpuidle driver that loads on all Intel CPUs with MWAIT
  11. * in lieu of the legacy ACPI processor_idle driver. The intent is to
  12. * make Linux more efficient on these processors, as intel_idle knows
  13. * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs.
  14. */
  15. /*
  16. * Design Assumptions
  17. *
  18. * All CPUs have same idle states as boot CPU
  19. *
  20. * Chipset BM_STS (bus master status) bit is a NOP
  21. * for preventing entry into deep C-states
  22. *
  23. * CPU will flush caches as needed when entering a C-state via MWAIT
  24. * (in contrast to entering ACPI C3, in which case the WBINVD
  25. * instruction needs to be executed to flush the caches)
  26. */
  27. /*
  28. * Known limitations
  29. *
  30. * ACPI has a .suspend hack to turn off deep c-statees during suspend
  31. * to avoid complications with the lapic timer workaround.
  32. * Have not seen issues with suspend, but may need same workaround here.
  33. *
  34. */
  35. /* un-comment DEBUG to enable pr_debug() statements */
  36. /* #define DEBUG */
  37. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  38. #include <linux/acpi.h>
  39. #include <linux/kernel.h>
  40. #include <linux/cpuidle.h>
  41. #include <linux/tick.h>
  42. #include <trace/events/power.h>
  43. #include <linux/sched.h>
  44. #include <linux/sched/smt.h>
  45. #include <linux/notifier.h>
  46. #include <linux/cpu.h>
  47. #include <linux/moduleparam.h>
  48. #include <asm/cpu_device_id.h>
  49. #include <asm/intel-family.h>
  50. #include <asm/mwait.h>
  51. #include <asm/spec-ctrl.h>
  52. #include <asm/tsc.h>
  53. #include <asm/fpu/api.h>
  54. #define INTEL_IDLE_VERSION "0.5.1"
  55. static struct cpuidle_driver intel_idle_driver = {
  56. .name = "intel_idle",
  57. .owner = THIS_MODULE,
  58. };
  59. /* intel_idle.max_cstate=0 disables driver */
  60. static int max_cstate = CPUIDLE_STATE_MAX - 1;
  61. static unsigned int disabled_states_mask __read_mostly;
  62. static unsigned int preferred_states_mask __read_mostly;
  63. static bool force_irq_on __read_mostly;
  64. static bool ibrs_off __read_mostly;
  65. static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
  66. static unsigned long auto_demotion_disable_flags;
  67. static enum {
  68. C1E_PROMOTION_PRESERVE,
  69. C1E_PROMOTION_ENABLE,
  70. C1E_PROMOTION_DISABLE
  71. } c1e_promotion = C1E_PROMOTION_PRESERVE;
  72. struct idle_cpu {
  73. struct cpuidle_state *state_table;
  74. /*
  75. * Hardware C-state auto-demotion may not always be optimal.
  76. * Indicate which enable bits to clear here.
  77. */
  78. unsigned long auto_demotion_disable_flags;
  79. bool byt_auto_demotion_disable_flag;
  80. bool disable_promotion_to_c1e;
  81. bool use_acpi;
  82. };
  83. static const struct idle_cpu *icpu __initdata;
  84. static struct cpuidle_state *cpuidle_state_table __initdata;
  85. static unsigned int mwait_substates __initdata;
  86. /*
  87. * Enable interrupts before entering the C-state. On some platforms and for
  88. * some C-states, this may measurably decrease interrupt latency.
  89. */
  90. #define CPUIDLE_FLAG_IRQ_ENABLE BIT(14)
  91. /*
  92. * Enable this state by default even if the ACPI _CST does not list it.
  93. */
  94. #define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
  95. /*
  96. * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE
  97. * above.
  98. */
  99. #define CPUIDLE_FLAG_IBRS BIT(16)
  100. /*
  101. * Initialize large xstate for the C6-state entrance.
  102. */
  103. #define CPUIDLE_FLAG_INIT_XSTATE BIT(17)
  104. /*
  105. * Ignore the sub-state when matching mwait hints between the ACPI _CST and
  106. * custom tables.
  107. */
  108. #define CPUIDLE_FLAG_PARTIAL_HINT_MATCH BIT(18)
  109. /*
  110. * MWAIT takes an 8-bit "hint" in EAX "suggesting"
  111. * the C-state (top nibble) and sub-state (bottom nibble)
  112. * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
  113. *
  114. * We store the hint at the top of our "flags" for each state.
  115. */
  116. #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
  117. #define MWAIT2flg(eax) ((eax & 0xFF) << 24)
  118. static __always_inline int __intel_idle(struct cpuidle_device *dev,
  119. struct cpuidle_driver *drv,
  120. int index, bool irqoff)
  121. {
  122. struct cpuidle_state *state = &drv->states[index];
  123. unsigned long eax = flg2MWAIT(state->flags);
  124. unsigned long ecx = 1*irqoff; /* break on interrupt flag */
  125. mwait_idle_with_hints(eax, ecx);
  126. return index;
  127. }
  128. /**
  129. * intel_idle - Ask the processor to enter the given idle state.
  130. * @dev: cpuidle device of the target CPU.
  131. * @drv: cpuidle driver (assumed to point to intel_idle_driver).
  132. * @index: Target idle state index.
  133. *
  134. * Use the MWAIT instruction to notify the processor that the CPU represented by
  135. * @dev is idle and it can try to enter the idle state corresponding to @index.
  136. *
  137. * If the local APIC timer is not known to be reliable in the target idle state,
  138. * enable one-shot tick broadcasting for the target CPU before executing MWAIT.
  139. *
  140. * Must be called under local_irq_disable().
  141. */
  142. static __cpuidle int intel_idle(struct cpuidle_device *dev,
  143. struct cpuidle_driver *drv, int index)
  144. {
  145. return __intel_idle(dev, drv, index, true);
  146. }
  147. static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
  148. struct cpuidle_driver *drv, int index)
  149. {
  150. return __intel_idle(dev, drv, index, false);
  151. }
  152. static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
  153. struct cpuidle_driver *drv, int index)
  154. {
  155. bool smt_active = sched_smt_active();
  156. u64 spec_ctrl = spec_ctrl_current();
  157. int ret;
  158. if (smt_active)
  159. __update_spec_ctrl(0);
  160. ret = __intel_idle(dev, drv, index, true);
  161. if (smt_active)
  162. __update_spec_ctrl(spec_ctrl);
  163. return ret;
  164. }
  165. static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
  166. struct cpuidle_driver *drv, int index)
  167. {
  168. fpu_idle_fpregs();
  169. return __intel_idle(dev, drv, index, true);
  170. }
  171. /**
  172. * intel_idle_s2idle - Ask the processor to enter the given idle state.
  173. * @dev: cpuidle device of the target CPU.
  174. * @drv: cpuidle driver (assumed to point to intel_idle_driver).
  175. * @index: Target idle state index.
  176. *
  177. * Use the MWAIT instruction to notify the processor that the CPU represented by
  178. * @dev is idle and it can try to enter the idle state corresponding to @index.
  179. *
  180. * Invoked as a suspend-to-idle callback routine with frozen user space, frozen
  181. * scheduler tick and suspended scheduler clock on the target CPU.
  182. */
  183. static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
  184. struct cpuidle_driver *drv, int index)
  185. {
  186. unsigned long ecx = 1; /* break on interrupt flag */
  187. struct cpuidle_state *state = &drv->states[index];
  188. unsigned long eax = flg2MWAIT(state->flags);
  189. if (state->flags & CPUIDLE_FLAG_INIT_XSTATE)
  190. fpu_idle_fpregs();
  191. mwait_idle_with_hints(eax, ecx);
  192. return 0;
  193. }
  194. /*
  195. * States are indexed by the cstate number,
  196. * which is also the index into the MWAIT hint array.
  197. * Thus C0 is a dummy.
  198. */
  199. static struct cpuidle_state nehalem_cstates[] __initdata = {
  200. {
  201. .name = "C1",
  202. .desc = "MWAIT 0x00",
  203. .flags = MWAIT2flg(0x00),
  204. .exit_latency = 3,
  205. .target_residency = 6,
  206. .enter = &intel_idle,
  207. .enter_s2idle = intel_idle_s2idle, },
  208. {
  209. .name = "C1E",
  210. .desc = "MWAIT 0x01",
  211. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  212. .exit_latency = 10,
  213. .target_residency = 20,
  214. .enter = &intel_idle,
  215. .enter_s2idle = intel_idle_s2idle, },
  216. {
  217. .name = "C3",
  218. .desc = "MWAIT 0x10",
  219. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  220. .exit_latency = 20,
  221. .target_residency = 80,
  222. .enter = &intel_idle,
  223. .enter_s2idle = intel_idle_s2idle, },
  224. {
  225. .name = "C6",
  226. .desc = "MWAIT 0x20",
  227. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  228. .exit_latency = 200,
  229. .target_residency = 800,
  230. .enter = &intel_idle,
  231. .enter_s2idle = intel_idle_s2idle, },
  232. {
  233. .enter = NULL }
  234. };
  235. static struct cpuidle_state snb_cstates[] __initdata = {
  236. {
  237. .name = "C1",
  238. .desc = "MWAIT 0x00",
  239. .flags = MWAIT2flg(0x00),
  240. .exit_latency = 2,
  241. .target_residency = 2,
  242. .enter = &intel_idle,
  243. .enter_s2idle = intel_idle_s2idle, },
  244. {
  245. .name = "C1E",
  246. .desc = "MWAIT 0x01",
  247. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  248. .exit_latency = 10,
  249. .target_residency = 20,
  250. .enter = &intel_idle,
  251. .enter_s2idle = intel_idle_s2idle, },
  252. {
  253. .name = "C3",
  254. .desc = "MWAIT 0x10",
  255. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  256. .exit_latency = 80,
  257. .target_residency = 211,
  258. .enter = &intel_idle,
  259. .enter_s2idle = intel_idle_s2idle, },
  260. {
  261. .name = "C6",
  262. .desc = "MWAIT 0x20",
  263. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  264. .exit_latency = 104,
  265. .target_residency = 345,
  266. .enter = &intel_idle,
  267. .enter_s2idle = intel_idle_s2idle, },
  268. {
  269. .name = "C7",
  270. .desc = "MWAIT 0x30",
  271. .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
  272. .exit_latency = 109,
  273. .target_residency = 345,
  274. .enter = &intel_idle,
  275. .enter_s2idle = intel_idle_s2idle, },
  276. {
  277. .enter = NULL }
  278. };
  279. static struct cpuidle_state byt_cstates[] __initdata = {
  280. {
  281. .name = "C1",
  282. .desc = "MWAIT 0x00",
  283. .flags = MWAIT2flg(0x00),
  284. .exit_latency = 1,
  285. .target_residency = 1,
  286. .enter = &intel_idle,
  287. .enter_s2idle = intel_idle_s2idle, },
  288. {
  289. .name = "C6N",
  290. .desc = "MWAIT 0x58",
  291. .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
  292. .exit_latency = 300,
  293. .target_residency = 275,
  294. .enter = &intel_idle,
  295. .enter_s2idle = intel_idle_s2idle, },
  296. {
  297. .name = "C6S",
  298. .desc = "MWAIT 0x52",
  299. .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
  300. .exit_latency = 500,
  301. .target_residency = 560,
  302. .enter = &intel_idle,
  303. .enter_s2idle = intel_idle_s2idle, },
  304. {
  305. .name = "C7",
  306. .desc = "MWAIT 0x60",
  307. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  308. .exit_latency = 1200,
  309. .target_residency = 4000,
  310. .enter = &intel_idle,
  311. .enter_s2idle = intel_idle_s2idle, },
  312. {
  313. .name = "C7S",
  314. .desc = "MWAIT 0x64",
  315. .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
  316. .exit_latency = 10000,
  317. .target_residency = 20000,
  318. .enter = &intel_idle,
  319. .enter_s2idle = intel_idle_s2idle, },
  320. {
  321. .enter = NULL }
  322. };
  323. static struct cpuidle_state cht_cstates[] __initdata = {
  324. {
  325. .name = "C1",
  326. .desc = "MWAIT 0x00",
  327. .flags = MWAIT2flg(0x00),
  328. .exit_latency = 1,
  329. .target_residency = 1,
  330. .enter = &intel_idle,
  331. .enter_s2idle = intel_idle_s2idle, },
  332. {
  333. .name = "C6N",
  334. .desc = "MWAIT 0x58",
  335. .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
  336. .exit_latency = 80,
  337. .target_residency = 275,
  338. .enter = &intel_idle,
  339. .enter_s2idle = intel_idle_s2idle, },
  340. {
  341. .name = "C6S",
  342. .desc = "MWAIT 0x52",
  343. .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
  344. .exit_latency = 200,
  345. .target_residency = 560,
  346. .enter = &intel_idle,
  347. .enter_s2idle = intel_idle_s2idle, },
  348. {
  349. .name = "C7",
  350. .desc = "MWAIT 0x60",
  351. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  352. .exit_latency = 1200,
  353. .target_residency = 4000,
  354. .enter = &intel_idle,
  355. .enter_s2idle = intel_idle_s2idle, },
  356. {
  357. .name = "C7S",
  358. .desc = "MWAIT 0x64",
  359. .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
  360. .exit_latency = 10000,
  361. .target_residency = 20000,
  362. .enter = &intel_idle,
  363. .enter_s2idle = intel_idle_s2idle, },
  364. {
  365. .enter = NULL }
  366. };
  367. static struct cpuidle_state ivb_cstates[] __initdata = {
  368. {
  369. .name = "C1",
  370. .desc = "MWAIT 0x00",
  371. .flags = MWAIT2flg(0x00),
  372. .exit_latency = 1,
  373. .target_residency = 1,
  374. .enter = &intel_idle,
  375. .enter_s2idle = intel_idle_s2idle, },
  376. {
  377. .name = "C1E",
  378. .desc = "MWAIT 0x01",
  379. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  380. .exit_latency = 10,
  381. .target_residency = 20,
  382. .enter = &intel_idle,
  383. .enter_s2idle = intel_idle_s2idle, },
  384. {
  385. .name = "C3",
  386. .desc = "MWAIT 0x10",
  387. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  388. .exit_latency = 59,
  389. .target_residency = 156,
  390. .enter = &intel_idle,
  391. .enter_s2idle = intel_idle_s2idle, },
  392. {
  393. .name = "C6",
  394. .desc = "MWAIT 0x20",
  395. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  396. .exit_latency = 80,
  397. .target_residency = 300,
  398. .enter = &intel_idle,
  399. .enter_s2idle = intel_idle_s2idle, },
  400. {
  401. .name = "C7",
  402. .desc = "MWAIT 0x30",
  403. .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
  404. .exit_latency = 87,
  405. .target_residency = 300,
  406. .enter = &intel_idle,
  407. .enter_s2idle = intel_idle_s2idle, },
  408. {
  409. .enter = NULL }
  410. };
  411. static struct cpuidle_state ivt_cstates[] __initdata = {
  412. {
  413. .name = "C1",
  414. .desc = "MWAIT 0x00",
  415. .flags = MWAIT2flg(0x00),
  416. .exit_latency = 1,
  417. .target_residency = 1,
  418. .enter = &intel_idle,
  419. .enter_s2idle = intel_idle_s2idle, },
  420. {
  421. .name = "C1E",
  422. .desc = "MWAIT 0x01",
  423. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  424. .exit_latency = 10,
  425. .target_residency = 80,
  426. .enter = &intel_idle,
  427. .enter_s2idle = intel_idle_s2idle, },
  428. {
  429. .name = "C3",
  430. .desc = "MWAIT 0x10",
  431. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  432. .exit_latency = 59,
  433. .target_residency = 156,
  434. .enter = &intel_idle,
  435. .enter_s2idle = intel_idle_s2idle, },
  436. {
  437. .name = "C6",
  438. .desc = "MWAIT 0x20",
  439. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  440. .exit_latency = 82,
  441. .target_residency = 300,
  442. .enter = &intel_idle,
  443. .enter_s2idle = intel_idle_s2idle, },
  444. {
  445. .enter = NULL }
  446. };
  447. static struct cpuidle_state ivt_cstates_4s[] __initdata = {
  448. {
  449. .name = "C1",
  450. .desc = "MWAIT 0x00",
  451. .flags = MWAIT2flg(0x00),
  452. .exit_latency = 1,
  453. .target_residency = 1,
  454. .enter = &intel_idle,
  455. .enter_s2idle = intel_idle_s2idle, },
  456. {
  457. .name = "C1E",
  458. .desc = "MWAIT 0x01",
  459. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  460. .exit_latency = 10,
  461. .target_residency = 250,
  462. .enter = &intel_idle,
  463. .enter_s2idle = intel_idle_s2idle, },
  464. {
  465. .name = "C3",
  466. .desc = "MWAIT 0x10",
  467. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  468. .exit_latency = 59,
  469. .target_residency = 300,
  470. .enter = &intel_idle,
  471. .enter_s2idle = intel_idle_s2idle, },
  472. {
  473. .name = "C6",
  474. .desc = "MWAIT 0x20",
  475. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  476. .exit_latency = 84,
  477. .target_residency = 400,
  478. .enter = &intel_idle,
  479. .enter_s2idle = intel_idle_s2idle, },
  480. {
  481. .enter = NULL }
  482. };
  483. static struct cpuidle_state ivt_cstates_8s[] __initdata = {
  484. {
  485. .name = "C1",
  486. .desc = "MWAIT 0x00",
  487. .flags = MWAIT2flg(0x00),
  488. .exit_latency = 1,
  489. .target_residency = 1,
  490. .enter = &intel_idle,
  491. .enter_s2idle = intel_idle_s2idle, },
  492. {
  493. .name = "C1E",
  494. .desc = "MWAIT 0x01",
  495. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  496. .exit_latency = 10,
  497. .target_residency = 500,
  498. .enter = &intel_idle,
  499. .enter_s2idle = intel_idle_s2idle, },
  500. {
  501. .name = "C3",
  502. .desc = "MWAIT 0x10",
  503. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  504. .exit_latency = 59,
  505. .target_residency = 600,
  506. .enter = &intel_idle,
  507. .enter_s2idle = intel_idle_s2idle, },
  508. {
  509. .name = "C6",
  510. .desc = "MWAIT 0x20",
  511. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  512. .exit_latency = 88,
  513. .target_residency = 700,
  514. .enter = &intel_idle,
  515. .enter_s2idle = intel_idle_s2idle, },
  516. {
  517. .enter = NULL }
  518. };
  519. static struct cpuidle_state hsw_cstates[] __initdata = {
  520. {
  521. .name = "C1",
  522. .desc = "MWAIT 0x00",
  523. .flags = MWAIT2flg(0x00),
  524. .exit_latency = 2,
  525. .target_residency = 2,
  526. .enter = &intel_idle,
  527. .enter_s2idle = intel_idle_s2idle, },
  528. {
  529. .name = "C1E",
  530. .desc = "MWAIT 0x01",
  531. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  532. .exit_latency = 10,
  533. .target_residency = 20,
  534. .enter = &intel_idle,
  535. .enter_s2idle = intel_idle_s2idle, },
  536. {
  537. .name = "C3",
  538. .desc = "MWAIT 0x10",
  539. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  540. .exit_latency = 33,
  541. .target_residency = 100,
  542. .enter = &intel_idle,
  543. .enter_s2idle = intel_idle_s2idle, },
  544. {
  545. .name = "C6",
  546. .desc = "MWAIT 0x20",
  547. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  548. .exit_latency = 133,
  549. .target_residency = 400,
  550. .enter = &intel_idle,
  551. .enter_s2idle = intel_idle_s2idle, },
  552. {
  553. .name = "C7s",
  554. .desc = "MWAIT 0x32",
  555. .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
  556. .exit_latency = 166,
  557. .target_residency = 500,
  558. .enter = &intel_idle,
  559. .enter_s2idle = intel_idle_s2idle, },
  560. {
  561. .name = "C8",
  562. .desc = "MWAIT 0x40",
  563. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
  564. .exit_latency = 300,
  565. .target_residency = 900,
  566. .enter = &intel_idle,
  567. .enter_s2idle = intel_idle_s2idle, },
  568. {
  569. .name = "C9",
  570. .desc = "MWAIT 0x50",
  571. .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
  572. .exit_latency = 600,
  573. .target_residency = 1800,
  574. .enter = &intel_idle,
  575. .enter_s2idle = intel_idle_s2idle, },
  576. {
  577. .name = "C10",
  578. .desc = "MWAIT 0x60",
  579. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  580. .exit_latency = 2600,
  581. .target_residency = 7700,
  582. .enter = &intel_idle,
  583. .enter_s2idle = intel_idle_s2idle, },
  584. {
  585. .enter = NULL }
  586. };
  587. static struct cpuidle_state bdw_cstates[] __initdata = {
  588. {
  589. .name = "C1",
  590. .desc = "MWAIT 0x00",
  591. .flags = MWAIT2flg(0x00),
  592. .exit_latency = 2,
  593. .target_residency = 2,
  594. .enter = &intel_idle,
  595. .enter_s2idle = intel_idle_s2idle, },
  596. {
  597. .name = "C1E",
  598. .desc = "MWAIT 0x01",
  599. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  600. .exit_latency = 10,
  601. .target_residency = 20,
  602. .enter = &intel_idle,
  603. .enter_s2idle = intel_idle_s2idle, },
  604. {
  605. .name = "C3",
  606. .desc = "MWAIT 0x10",
  607. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  608. .exit_latency = 40,
  609. .target_residency = 100,
  610. .enter = &intel_idle,
  611. .enter_s2idle = intel_idle_s2idle, },
  612. {
  613. .name = "C6",
  614. .desc = "MWAIT 0x20",
  615. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  616. .exit_latency = 133,
  617. .target_residency = 400,
  618. .enter = &intel_idle,
  619. .enter_s2idle = intel_idle_s2idle, },
  620. {
  621. .name = "C7s",
  622. .desc = "MWAIT 0x32",
  623. .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
  624. .exit_latency = 166,
  625. .target_residency = 500,
  626. .enter = &intel_idle,
  627. .enter_s2idle = intel_idle_s2idle, },
  628. {
  629. .name = "C8",
  630. .desc = "MWAIT 0x40",
  631. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
  632. .exit_latency = 300,
  633. .target_residency = 900,
  634. .enter = &intel_idle,
  635. .enter_s2idle = intel_idle_s2idle, },
  636. {
  637. .name = "C9",
  638. .desc = "MWAIT 0x50",
  639. .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
  640. .exit_latency = 600,
  641. .target_residency = 1800,
  642. .enter = &intel_idle,
  643. .enter_s2idle = intel_idle_s2idle, },
  644. {
  645. .name = "C10",
  646. .desc = "MWAIT 0x60",
  647. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  648. .exit_latency = 2600,
  649. .target_residency = 7700,
  650. .enter = &intel_idle,
  651. .enter_s2idle = intel_idle_s2idle, },
  652. {
  653. .enter = NULL }
  654. };
  655. static struct cpuidle_state skl_cstates[] __initdata = {
  656. {
  657. .name = "C1",
  658. .desc = "MWAIT 0x00",
  659. .flags = MWAIT2flg(0x00),
  660. .exit_latency = 2,
  661. .target_residency = 2,
  662. .enter = &intel_idle,
  663. .enter_s2idle = intel_idle_s2idle, },
  664. {
  665. .name = "C1E",
  666. .desc = "MWAIT 0x01",
  667. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  668. .exit_latency = 10,
  669. .target_residency = 20,
  670. .enter = &intel_idle,
  671. .enter_s2idle = intel_idle_s2idle, },
  672. {
  673. .name = "C3",
  674. .desc = "MWAIT 0x10",
  675. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  676. .exit_latency = 70,
  677. .target_residency = 100,
  678. .enter = &intel_idle,
  679. .enter_s2idle = intel_idle_s2idle, },
  680. {
  681. .name = "C6",
  682. .desc = "MWAIT 0x20",
  683. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
  684. .exit_latency = 85,
  685. .target_residency = 200,
  686. .enter = &intel_idle,
  687. .enter_s2idle = intel_idle_s2idle, },
  688. {
  689. .name = "C7s",
  690. .desc = "MWAIT 0x33",
  691. .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
  692. .exit_latency = 124,
  693. .target_residency = 800,
  694. .enter = &intel_idle,
  695. .enter_s2idle = intel_idle_s2idle, },
  696. {
  697. .name = "C8",
  698. .desc = "MWAIT 0x40",
  699. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
  700. .exit_latency = 200,
  701. .target_residency = 800,
  702. .enter = &intel_idle,
  703. .enter_s2idle = intel_idle_s2idle, },
  704. {
  705. .name = "C9",
  706. .desc = "MWAIT 0x50",
  707. .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
  708. .exit_latency = 480,
  709. .target_residency = 5000,
  710. .enter = &intel_idle,
  711. .enter_s2idle = intel_idle_s2idle, },
  712. {
  713. .name = "C10",
  714. .desc = "MWAIT 0x60",
  715. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
  716. .exit_latency = 890,
  717. .target_residency = 5000,
  718. .enter = &intel_idle,
  719. .enter_s2idle = intel_idle_s2idle, },
  720. {
  721. .enter = NULL }
  722. };
  723. static struct cpuidle_state skx_cstates[] __initdata = {
  724. {
  725. .name = "C1",
  726. .desc = "MWAIT 0x00",
  727. .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
  728. .exit_latency = 2,
  729. .target_residency = 2,
  730. .enter = &intel_idle,
  731. .enter_s2idle = intel_idle_s2idle, },
  732. {
  733. .name = "C1E",
  734. .desc = "MWAIT 0x01",
  735. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  736. .exit_latency = 10,
  737. .target_residency = 20,
  738. .enter = &intel_idle,
  739. .enter_s2idle = intel_idle_s2idle, },
  740. {
  741. .name = "C6",
  742. .desc = "MWAIT 0x20",
  743. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
  744. .exit_latency = 133,
  745. .target_residency = 600,
  746. .enter = &intel_idle,
  747. .enter_s2idle = intel_idle_s2idle, },
  748. {
  749. .enter = NULL }
  750. };
  751. static struct cpuidle_state icx_cstates[] __initdata = {
  752. {
  753. .name = "C1",
  754. .desc = "MWAIT 0x00",
  755. .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
  756. .exit_latency = 1,
  757. .target_residency = 1,
  758. .enter = &intel_idle,
  759. .enter_s2idle = intel_idle_s2idle, },
  760. {
  761. .name = "C1E",
  762. .desc = "MWAIT 0x01",
  763. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  764. .exit_latency = 4,
  765. .target_residency = 4,
  766. .enter = &intel_idle,
  767. .enter_s2idle = intel_idle_s2idle, },
  768. {
  769. .name = "C6",
  770. .desc = "MWAIT 0x20",
  771. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  772. .exit_latency = 170,
  773. .target_residency = 600,
  774. .enter = &intel_idle,
  775. .enter_s2idle = intel_idle_s2idle, },
  776. {
  777. .enter = NULL }
  778. };
  779. /*
  780. * On AlderLake C1 has to be disabled if C1E is enabled, and vice versa.
  781. * C1E is enabled only if "C1E promotion" bit is set in MSR_IA32_POWER_CTL.
  782. * But in this case there is effectively no C1, because C1 requests are
  783. * promoted to C1E. If the "C1E promotion" bit is cleared, then both C1
  784. * and C1E requests end up with C1, so there is effectively no C1E.
  785. *
  786. * By default we enable C1E and disable C1 by marking it with
  787. * 'CPUIDLE_FLAG_UNUSABLE'.
  788. */
  789. static struct cpuidle_state adl_cstates[] __initdata = {
  790. {
  791. .name = "C1",
  792. .desc = "MWAIT 0x00",
  793. .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
  794. .exit_latency = 1,
  795. .target_residency = 1,
  796. .enter = &intel_idle,
  797. .enter_s2idle = intel_idle_s2idle, },
  798. {
  799. .name = "C1E",
  800. .desc = "MWAIT 0x01",
  801. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  802. .exit_latency = 2,
  803. .target_residency = 4,
  804. .enter = &intel_idle,
  805. .enter_s2idle = intel_idle_s2idle, },
  806. {
  807. .name = "C6",
  808. .desc = "MWAIT 0x20",
  809. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  810. .exit_latency = 220,
  811. .target_residency = 600,
  812. .enter = &intel_idle,
  813. .enter_s2idle = intel_idle_s2idle, },
  814. {
  815. .name = "C8",
  816. .desc = "MWAIT 0x40",
  817. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
  818. .exit_latency = 280,
  819. .target_residency = 800,
  820. .enter = &intel_idle,
  821. .enter_s2idle = intel_idle_s2idle, },
  822. {
  823. .name = "C10",
  824. .desc = "MWAIT 0x60",
  825. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  826. .exit_latency = 680,
  827. .target_residency = 2000,
  828. .enter = &intel_idle,
  829. .enter_s2idle = intel_idle_s2idle, },
  830. {
  831. .enter = NULL }
  832. };
  833. static struct cpuidle_state adl_l_cstates[] __initdata = {
  834. {
  835. .name = "C1",
  836. .desc = "MWAIT 0x00",
  837. .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
  838. .exit_latency = 1,
  839. .target_residency = 1,
  840. .enter = &intel_idle,
  841. .enter_s2idle = intel_idle_s2idle, },
  842. {
  843. .name = "C1E",
  844. .desc = "MWAIT 0x01",
  845. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  846. .exit_latency = 2,
  847. .target_residency = 4,
  848. .enter = &intel_idle,
  849. .enter_s2idle = intel_idle_s2idle, },
  850. {
  851. .name = "C6",
  852. .desc = "MWAIT 0x20",
  853. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  854. .exit_latency = 170,
  855. .target_residency = 500,
  856. .enter = &intel_idle,
  857. .enter_s2idle = intel_idle_s2idle, },
  858. {
  859. .name = "C8",
  860. .desc = "MWAIT 0x40",
  861. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
  862. .exit_latency = 200,
  863. .target_residency = 600,
  864. .enter = &intel_idle,
  865. .enter_s2idle = intel_idle_s2idle, },
  866. {
  867. .name = "C10",
  868. .desc = "MWAIT 0x60",
  869. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  870. .exit_latency = 230,
  871. .target_residency = 700,
  872. .enter = &intel_idle,
  873. .enter_s2idle = intel_idle_s2idle, },
  874. {
  875. .enter = NULL }
  876. };
  877. static struct cpuidle_state mtl_l_cstates[] __initdata = {
  878. {
  879. .name = "C1E",
  880. .desc = "MWAIT 0x01",
  881. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  882. .exit_latency = 1,
  883. .target_residency = 1,
  884. .enter = &intel_idle,
  885. .enter_s2idle = intel_idle_s2idle, },
  886. {
  887. .name = "C6",
  888. .desc = "MWAIT 0x20",
  889. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  890. .exit_latency = 140,
  891. .target_residency = 420,
  892. .enter = &intel_idle,
  893. .enter_s2idle = intel_idle_s2idle, },
  894. {
  895. .name = "C10",
  896. .desc = "MWAIT 0x60",
  897. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  898. .exit_latency = 310,
  899. .target_residency = 930,
  900. .enter = &intel_idle,
  901. .enter_s2idle = intel_idle_s2idle, },
  902. {
  903. .enter = NULL }
  904. };
  905. static struct cpuidle_state gmt_cstates[] __initdata = {
  906. {
  907. .name = "C1",
  908. .desc = "MWAIT 0x00",
  909. .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
  910. .exit_latency = 1,
  911. .target_residency = 1,
  912. .enter = &intel_idle,
  913. .enter_s2idle = intel_idle_s2idle, },
  914. {
  915. .name = "C1E",
  916. .desc = "MWAIT 0x01",
  917. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  918. .exit_latency = 2,
  919. .target_residency = 4,
  920. .enter = &intel_idle,
  921. .enter_s2idle = intel_idle_s2idle, },
  922. {
  923. .name = "C6",
  924. .desc = "MWAIT 0x20",
  925. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  926. .exit_latency = 195,
  927. .target_residency = 585,
  928. .enter = &intel_idle,
  929. .enter_s2idle = intel_idle_s2idle, },
  930. {
  931. .name = "C8",
  932. .desc = "MWAIT 0x40",
  933. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
  934. .exit_latency = 260,
  935. .target_residency = 1040,
  936. .enter = &intel_idle,
  937. .enter_s2idle = intel_idle_s2idle, },
  938. {
  939. .name = "C10",
  940. .desc = "MWAIT 0x60",
  941. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  942. .exit_latency = 660,
  943. .target_residency = 1980,
  944. .enter = &intel_idle,
  945. .enter_s2idle = intel_idle_s2idle, },
  946. {
  947. .enter = NULL }
  948. };
  949. static struct cpuidle_state spr_cstates[] __initdata = {
  950. {
  951. .name = "C1",
  952. .desc = "MWAIT 0x00",
  953. .flags = MWAIT2flg(0x00),
  954. .exit_latency = 1,
  955. .target_residency = 1,
  956. .enter = &intel_idle,
  957. .enter_s2idle = intel_idle_s2idle, },
  958. {
  959. .name = "C1E",
  960. .desc = "MWAIT 0x01",
  961. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  962. .exit_latency = 2,
  963. .target_residency = 4,
  964. .enter = &intel_idle,
  965. .enter_s2idle = intel_idle_s2idle, },
  966. {
  967. .name = "C6",
  968. .desc = "MWAIT 0x20",
  969. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED |
  970. CPUIDLE_FLAG_INIT_XSTATE,
  971. .exit_latency = 290,
  972. .target_residency = 800,
  973. .enter = &intel_idle,
  974. .enter_s2idle = intel_idle_s2idle, },
  975. {
  976. .enter = NULL }
  977. };
  978. static struct cpuidle_state gnr_cstates[] __initdata = {
  979. {
  980. .name = "C1",
  981. .desc = "MWAIT 0x00",
  982. .flags = MWAIT2flg(0x00),
  983. .exit_latency = 1,
  984. .target_residency = 1,
  985. .enter = &intel_idle,
  986. .enter_s2idle = intel_idle_s2idle, },
  987. {
  988. .name = "C1E",
  989. .desc = "MWAIT 0x01",
  990. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  991. .exit_latency = 4,
  992. .target_residency = 4,
  993. .enter = &intel_idle,
  994. .enter_s2idle = intel_idle_s2idle, },
  995. {
  996. .name = "C6",
  997. .desc = "MWAIT 0x20",
  998. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED |
  999. CPUIDLE_FLAG_INIT_XSTATE |
  1000. CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
  1001. .exit_latency = 170,
  1002. .target_residency = 650,
  1003. .enter = &intel_idle,
  1004. .enter_s2idle = intel_idle_s2idle, },
  1005. {
  1006. .name = "C6P",
  1007. .desc = "MWAIT 0x21",
  1008. .flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED |
  1009. CPUIDLE_FLAG_INIT_XSTATE |
  1010. CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
  1011. .exit_latency = 210,
  1012. .target_residency = 1000,
  1013. .enter = &intel_idle,
  1014. .enter_s2idle = intel_idle_s2idle, },
  1015. {
  1016. .enter = NULL }
  1017. };
  1018. static struct cpuidle_state atom_cstates[] __initdata = {
  1019. {
  1020. .name = "C1E",
  1021. .desc = "MWAIT 0x00",
  1022. .flags = MWAIT2flg(0x00),
  1023. .exit_latency = 10,
  1024. .target_residency = 20,
  1025. .enter = &intel_idle,
  1026. .enter_s2idle = intel_idle_s2idle, },
  1027. {
  1028. .name = "C2",
  1029. .desc = "MWAIT 0x10",
  1030. .flags = MWAIT2flg(0x10),
  1031. .exit_latency = 20,
  1032. .target_residency = 80,
  1033. .enter = &intel_idle,
  1034. .enter_s2idle = intel_idle_s2idle, },
  1035. {
  1036. .name = "C4",
  1037. .desc = "MWAIT 0x30",
  1038. .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
  1039. .exit_latency = 100,
  1040. .target_residency = 400,
  1041. .enter = &intel_idle,
  1042. .enter_s2idle = intel_idle_s2idle, },
  1043. {
  1044. .name = "C6",
  1045. .desc = "MWAIT 0x52",
  1046. .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
  1047. .exit_latency = 140,
  1048. .target_residency = 560,
  1049. .enter = &intel_idle,
  1050. .enter_s2idle = intel_idle_s2idle, },
  1051. {
  1052. .enter = NULL }
  1053. };
  1054. static struct cpuidle_state tangier_cstates[] __initdata = {
  1055. {
  1056. .name = "C1",
  1057. .desc = "MWAIT 0x00",
  1058. .flags = MWAIT2flg(0x00),
  1059. .exit_latency = 1,
  1060. .target_residency = 4,
  1061. .enter = &intel_idle,
  1062. .enter_s2idle = intel_idle_s2idle, },
  1063. {
  1064. .name = "C4",
  1065. .desc = "MWAIT 0x30",
  1066. .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
  1067. .exit_latency = 100,
  1068. .target_residency = 400,
  1069. .enter = &intel_idle,
  1070. .enter_s2idle = intel_idle_s2idle, },
  1071. {
  1072. .name = "C6",
  1073. .desc = "MWAIT 0x52",
  1074. .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
  1075. .exit_latency = 140,
  1076. .target_residency = 560,
  1077. .enter = &intel_idle,
  1078. .enter_s2idle = intel_idle_s2idle, },
  1079. {
  1080. .name = "C7",
  1081. .desc = "MWAIT 0x60",
  1082. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  1083. .exit_latency = 1200,
  1084. .target_residency = 4000,
  1085. .enter = &intel_idle,
  1086. .enter_s2idle = intel_idle_s2idle, },
  1087. {
  1088. .name = "C9",
  1089. .desc = "MWAIT 0x64",
  1090. .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
  1091. .exit_latency = 10000,
  1092. .target_residency = 20000,
  1093. .enter = &intel_idle,
  1094. .enter_s2idle = intel_idle_s2idle, },
  1095. {
  1096. .enter = NULL }
  1097. };
  1098. static struct cpuidle_state avn_cstates[] __initdata = {
  1099. {
  1100. .name = "C1",
  1101. .desc = "MWAIT 0x00",
  1102. .flags = MWAIT2flg(0x00),
  1103. .exit_latency = 2,
  1104. .target_residency = 2,
  1105. .enter = &intel_idle,
  1106. .enter_s2idle = intel_idle_s2idle, },
  1107. {
  1108. .name = "C6",
  1109. .desc = "MWAIT 0x51",
  1110. .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED,
  1111. .exit_latency = 15,
  1112. .target_residency = 45,
  1113. .enter = &intel_idle,
  1114. .enter_s2idle = intel_idle_s2idle, },
  1115. {
  1116. .enter = NULL }
  1117. };
  1118. static struct cpuidle_state knl_cstates[] __initdata = {
  1119. {
  1120. .name = "C1",
  1121. .desc = "MWAIT 0x00",
  1122. .flags = MWAIT2flg(0x00),
  1123. .exit_latency = 1,
  1124. .target_residency = 2,
  1125. .enter = &intel_idle,
  1126. .enter_s2idle = intel_idle_s2idle },
  1127. {
  1128. .name = "C6",
  1129. .desc = "MWAIT 0x10",
  1130. .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
  1131. .exit_latency = 120,
  1132. .target_residency = 500,
  1133. .enter = &intel_idle,
  1134. .enter_s2idle = intel_idle_s2idle },
  1135. {
  1136. .enter = NULL }
  1137. };
  1138. static struct cpuidle_state bxt_cstates[] __initdata = {
  1139. {
  1140. .name = "C1",
  1141. .desc = "MWAIT 0x00",
  1142. .flags = MWAIT2flg(0x00),
  1143. .exit_latency = 2,
  1144. .target_residency = 2,
  1145. .enter = &intel_idle,
  1146. .enter_s2idle = intel_idle_s2idle, },
  1147. {
  1148. .name = "C1E",
  1149. .desc = "MWAIT 0x01",
  1150. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  1151. .exit_latency = 10,
  1152. .target_residency = 20,
  1153. .enter = &intel_idle,
  1154. .enter_s2idle = intel_idle_s2idle, },
  1155. {
  1156. .name = "C6",
  1157. .desc = "MWAIT 0x20",
  1158. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  1159. .exit_latency = 133,
  1160. .target_residency = 133,
  1161. .enter = &intel_idle,
  1162. .enter_s2idle = intel_idle_s2idle, },
  1163. {
  1164. .name = "C7s",
  1165. .desc = "MWAIT 0x31",
  1166. .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
  1167. .exit_latency = 155,
  1168. .target_residency = 155,
  1169. .enter = &intel_idle,
  1170. .enter_s2idle = intel_idle_s2idle, },
  1171. {
  1172. .name = "C8",
  1173. .desc = "MWAIT 0x40",
  1174. .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
  1175. .exit_latency = 1000,
  1176. .target_residency = 1000,
  1177. .enter = &intel_idle,
  1178. .enter_s2idle = intel_idle_s2idle, },
  1179. {
  1180. .name = "C9",
  1181. .desc = "MWAIT 0x50",
  1182. .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
  1183. .exit_latency = 2000,
  1184. .target_residency = 2000,
  1185. .enter = &intel_idle,
  1186. .enter_s2idle = intel_idle_s2idle, },
  1187. {
  1188. .name = "C10",
  1189. .desc = "MWAIT 0x60",
  1190. .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
  1191. .exit_latency = 10000,
  1192. .target_residency = 10000,
  1193. .enter = &intel_idle,
  1194. .enter_s2idle = intel_idle_s2idle, },
  1195. {
  1196. .enter = NULL }
  1197. };
  1198. static struct cpuidle_state dnv_cstates[] __initdata = {
  1199. {
  1200. .name = "C1",
  1201. .desc = "MWAIT 0x00",
  1202. .flags = MWAIT2flg(0x00),
  1203. .exit_latency = 2,
  1204. .target_residency = 2,
  1205. .enter = &intel_idle,
  1206. .enter_s2idle = intel_idle_s2idle, },
  1207. {
  1208. .name = "C1E",
  1209. .desc = "MWAIT 0x01",
  1210. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  1211. .exit_latency = 10,
  1212. .target_residency = 20,
  1213. .enter = &intel_idle,
  1214. .enter_s2idle = intel_idle_s2idle, },
  1215. {
  1216. .name = "C6",
  1217. .desc = "MWAIT 0x20",
  1218. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  1219. .exit_latency = 50,
  1220. .target_residency = 500,
  1221. .enter = &intel_idle,
  1222. .enter_s2idle = intel_idle_s2idle, },
  1223. {
  1224. .enter = NULL }
  1225. };
  1226. /*
  1227. * Note, depending on HW and FW revision, SnowRidge SoC may or may not support
  1228. * C6, and this is indicated in the CPUID mwait leaf.
  1229. */
  1230. static struct cpuidle_state snr_cstates[] __initdata = {
  1231. {
  1232. .name = "C1",
  1233. .desc = "MWAIT 0x00",
  1234. .flags = MWAIT2flg(0x00),
  1235. .exit_latency = 2,
  1236. .target_residency = 2,
  1237. .enter = &intel_idle,
  1238. .enter_s2idle = intel_idle_s2idle, },
  1239. {
  1240. .name = "C1E",
  1241. .desc = "MWAIT 0x01",
  1242. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  1243. .exit_latency = 15,
  1244. .target_residency = 25,
  1245. .enter = &intel_idle,
  1246. .enter_s2idle = intel_idle_s2idle, },
  1247. {
  1248. .name = "C6",
  1249. .desc = "MWAIT 0x20",
  1250. .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
  1251. .exit_latency = 130,
  1252. .target_residency = 500,
  1253. .enter = &intel_idle,
  1254. .enter_s2idle = intel_idle_s2idle, },
  1255. {
  1256. .enter = NULL }
  1257. };
  1258. static struct cpuidle_state grr_cstates[] __initdata = {
  1259. {
  1260. .name = "C1",
  1261. .desc = "MWAIT 0x00",
  1262. .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  1263. .exit_latency = 1,
  1264. .target_residency = 1,
  1265. .enter = &intel_idle,
  1266. .enter_s2idle = intel_idle_s2idle, },
  1267. {
  1268. .name = "C1E",
  1269. .desc = "MWAIT 0x01",
  1270. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  1271. .exit_latency = 2,
  1272. .target_residency = 10,
  1273. .enter = &intel_idle,
  1274. .enter_s2idle = intel_idle_s2idle, },
  1275. {
  1276. .name = "C6S",
  1277. .desc = "MWAIT 0x22",
  1278. .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED,
  1279. .exit_latency = 140,
  1280. .target_residency = 500,
  1281. .enter = &intel_idle,
  1282. .enter_s2idle = intel_idle_s2idle, },
  1283. {
  1284. .enter = NULL }
  1285. };
  1286. static struct cpuidle_state srf_cstates[] __initdata = {
  1287. {
  1288. .name = "C1",
  1289. .desc = "MWAIT 0x00",
  1290. .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  1291. .exit_latency = 1,
  1292. .target_residency = 1,
  1293. .enter = &intel_idle,
  1294. .enter_s2idle = intel_idle_s2idle, },
  1295. {
  1296. .name = "C1E",
  1297. .desc = "MWAIT 0x01",
  1298. .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
  1299. .exit_latency = 2,
  1300. .target_residency = 10,
  1301. .enter = &intel_idle,
  1302. .enter_s2idle = intel_idle_s2idle, },
  1303. {
  1304. .name = "C6S",
  1305. .desc = "MWAIT 0x22",
  1306. .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED |
  1307. CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
  1308. .exit_latency = 270,
  1309. .target_residency = 700,
  1310. .enter = &intel_idle,
  1311. .enter_s2idle = intel_idle_s2idle, },
  1312. {
  1313. .name = "C6SP",
  1314. .desc = "MWAIT 0x23",
  1315. .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED |
  1316. CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
  1317. .exit_latency = 310,
  1318. .target_residency = 900,
  1319. .enter = &intel_idle,
  1320. .enter_s2idle = intel_idle_s2idle, },
  1321. {
  1322. .enter = NULL }
  1323. };
  1324. static const struct idle_cpu idle_cpu_nehalem __initconst = {
  1325. .state_table = nehalem_cstates,
  1326. .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
  1327. .disable_promotion_to_c1e = true,
  1328. };
  1329. static const struct idle_cpu idle_cpu_nhx __initconst = {
  1330. .state_table = nehalem_cstates,
  1331. .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
  1332. .disable_promotion_to_c1e = true,
  1333. .use_acpi = true,
  1334. };
  1335. static const struct idle_cpu idle_cpu_atom __initconst = {
  1336. .state_table = atom_cstates,
  1337. };
  1338. static const struct idle_cpu idle_cpu_tangier __initconst = {
  1339. .state_table = tangier_cstates,
  1340. };
  1341. static const struct idle_cpu idle_cpu_lincroft __initconst = {
  1342. .state_table = atom_cstates,
  1343. .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
  1344. };
  1345. static const struct idle_cpu idle_cpu_snb __initconst = {
  1346. .state_table = snb_cstates,
  1347. .disable_promotion_to_c1e = true,
  1348. };
  1349. static const struct idle_cpu idle_cpu_snx __initconst = {
  1350. .state_table = snb_cstates,
  1351. .disable_promotion_to_c1e = true,
  1352. .use_acpi = true,
  1353. };
  1354. static const struct idle_cpu idle_cpu_byt __initconst = {
  1355. .state_table = byt_cstates,
  1356. .disable_promotion_to_c1e = true,
  1357. .byt_auto_demotion_disable_flag = true,
  1358. };
  1359. static const struct idle_cpu idle_cpu_cht __initconst = {
  1360. .state_table = cht_cstates,
  1361. .disable_promotion_to_c1e = true,
  1362. .byt_auto_demotion_disable_flag = true,
  1363. };
  1364. static const struct idle_cpu idle_cpu_ivb __initconst = {
  1365. .state_table = ivb_cstates,
  1366. .disable_promotion_to_c1e = true,
  1367. };
  1368. static const struct idle_cpu idle_cpu_ivt __initconst = {
  1369. .state_table = ivt_cstates,
  1370. .disable_promotion_to_c1e = true,
  1371. .use_acpi = true,
  1372. };
  1373. static const struct idle_cpu idle_cpu_hsw __initconst = {
  1374. .state_table = hsw_cstates,
  1375. .disable_promotion_to_c1e = true,
  1376. };
  1377. static const struct idle_cpu idle_cpu_hsx __initconst = {
  1378. .state_table = hsw_cstates,
  1379. .disable_promotion_to_c1e = true,
  1380. .use_acpi = true,
  1381. };
  1382. static const struct idle_cpu idle_cpu_bdw __initconst = {
  1383. .state_table = bdw_cstates,
  1384. .disable_promotion_to_c1e = true,
  1385. };
  1386. static const struct idle_cpu idle_cpu_bdx __initconst = {
  1387. .state_table = bdw_cstates,
  1388. .disable_promotion_to_c1e = true,
  1389. .use_acpi = true,
  1390. };
  1391. static const struct idle_cpu idle_cpu_skl __initconst = {
  1392. .state_table = skl_cstates,
  1393. .disable_promotion_to_c1e = true,
  1394. };
  1395. static const struct idle_cpu idle_cpu_skx __initconst = {
  1396. .state_table = skx_cstates,
  1397. .disable_promotion_to_c1e = true,
  1398. .use_acpi = true,
  1399. };
  1400. static const struct idle_cpu idle_cpu_icx __initconst = {
  1401. .state_table = icx_cstates,
  1402. .disable_promotion_to_c1e = true,
  1403. .use_acpi = true,
  1404. };
  1405. static const struct idle_cpu idle_cpu_adl __initconst = {
  1406. .state_table = adl_cstates,
  1407. };
  1408. static const struct idle_cpu idle_cpu_adl_l __initconst = {
  1409. .state_table = adl_l_cstates,
  1410. };
  1411. static const struct idle_cpu idle_cpu_mtl_l __initconst = {
  1412. .state_table = mtl_l_cstates,
  1413. };
  1414. static const struct idle_cpu idle_cpu_gmt __initconst = {
  1415. .state_table = gmt_cstates,
  1416. };
  1417. static const struct idle_cpu idle_cpu_spr __initconst = {
  1418. .state_table = spr_cstates,
  1419. .disable_promotion_to_c1e = true,
  1420. .use_acpi = true,
  1421. };
  1422. static const struct idle_cpu idle_cpu_gnr __initconst = {
  1423. .state_table = gnr_cstates,
  1424. .disable_promotion_to_c1e = true,
  1425. .use_acpi = true,
  1426. };
  1427. static const struct idle_cpu idle_cpu_avn __initconst = {
  1428. .state_table = avn_cstates,
  1429. .disable_promotion_to_c1e = true,
  1430. .use_acpi = true,
  1431. };
  1432. static const struct idle_cpu idle_cpu_knl __initconst = {
  1433. .state_table = knl_cstates,
  1434. .use_acpi = true,
  1435. };
  1436. static const struct idle_cpu idle_cpu_bxt __initconst = {
  1437. .state_table = bxt_cstates,
  1438. .disable_promotion_to_c1e = true,
  1439. };
  1440. static const struct idle_cpu idle_cpu_dnv __initconst = {
  1441. .state_table = dnv_cstates,
  1442. .disable_promotion_to_c1e = true,
  1443. .use_acpi = true,
  1444. };
  1445. static const struct idle_cpu idle_cpu_tmt __initconst = {
  1446. .disable_promotion_to_c1e = true,
  1447. };
  1448. static const struct idle_cpu idle_cpu_snr __initconst = {
  1449. .state_table = snr_cstates,
  1450. .disable_promotion_to_c1e = true,
  1451. .use_acpi = true,
  1452. };
  1453. static const struct idle_cpu idle_cpu_grr __initconst = {
  1454. .state_table = grr_cstates,
  1455. .disable_promotion_to_c1e = true,
  1456. .use_acpi = true,
  1457. };
  1458. static const struct idle_cpu idle_cpu_srf __initconst = {
  1459. .state_table = srf_cstates,
  1460. .disable_promotion_to_c1e = true,
  1461. .use_acpi = true,
  1462. };
  1463. static const struct x86_cpu_id intel_idle_ids[] __initconst = {
  1464. X86_MATCH_VFM(INTEL_NEHALEM_EP, &idle_cpu_nhx),
  1465. X86_MATCH_VFM(INTEL_NEHALEM, &idle_cpu_nehalem),
  1466. X86_MATCH_VFM(INTEL_NEHALEM_G, &idle_cpu_nehalem),
  1467. X86_MATCH_VFM(INTEL_WESTMERE, &idle_cpu_nehalem),
  1468. X86_MATCH_VFM(INTEL_WESTMERE_EP, &idle_cpu_nhx),
  1469. X86_MATCH_VFM(INTEL_NEHALEM_EX, &idle_cpu_nhx),
  1470. X86_MATCH_VFM(INTEL_ATOM_BONNELL, &idle_cpu_atom),
  1471. X86_MATCH_VFM(INTEL_ATOM_BONNELL_MID, &idle_cpu_lincroft),
  1472. X86_MATCH_VFM(INTEL_WESTMERE_EX, &idle_cpu_nhx),
  1473. X86_MATCH_VFM(INTEL_SANDYBRIDGE, &idle_cpu_snb),
  1474. X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &idle_cpu_snx),
  1475. X86_MATCH_VFM(INTEL_ATOM_SALTWELL, &idle_cpu_atom),
  1476. X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &idle_cpu_byt),
  1477. X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &idle_cpu_tangier),
  1478. X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &idle_cpu_cht),
  1479. X86_MATCH_VFM(INTEL_IVYBRIDGE, &idle_cpu_ivb),
  1480. X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &idle_cpu_ivt),
  1481. X86_MATCH_VFM(INTEL_HASWELL, &idle_cpu_hsw),
  1482. X86_MATCH_VFM(INTEL_HASWELL_X, &idle_cpu_hsx),
  1483. X86_MATCH_VFM(INTEL_HASWELL_L, &idle_cpu_hsw),
  1484. X86_MATCH_VFM(INTEL_HASWELL_G, &idle_cpu_hsw),
  1485. X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &idle_cpu_avn),
  1486. X86_MATCH_VFM(INTEL_BROADWELL, &idle_cpu_bdw),
  1487. X86_MATCH_VFM(INTEL_BROADWELL_G, &idle_cpu_bdw),
  1488. X86_MATCH_VFM(INTEL_BROADWELL_X, &idle_cpu_bdx),
  1489. X86_MATCH_VFM(INTEL_BROADWELL_D, &idle_cpu_bdx),
  1490. X86_MATCH_VFM(INTEL_SKYLAKE_L, &idle_cpu_skl),
  1491. X86_MATCH_VFM(INTEL_SKYLAKE, &idle_cpu_skl),
  1492. X86_MATCH_VFM(INTEL_KABYLAKE_L, &idle_cpu_skl),
  1493. X86_MATCH_VFM(INTEL_KABYLAKE, &idle_cpu_skl),
  1494. X86_MATCH_VFM(INTEL_SKYLAKE_X, &idle_cpu_skx),
  1495. X86_MATCH_VFM(INTEL_ICELAKE_X, &idle_cpu_icx),
  1496. X86_MATCH_VFM(INTEL_ICELAKE_D, &idle_cpu_icx),
  1497. X86_MATCH_VFM(INTEL_ALDERLAKE, &idle_cpu_adl),
  1498. X86_MATCH_VFM(INTEL_ALDERLAKE_L, &idle_cpu_adl_l),
  1499. X86_MATCH_VFM(INTEL_METEORLAKE_L, &idle_cpu_mtl_l),
  1500. X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &idle_cpu_gmt),
  1501. X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &idle_cpu_spr),
  1502. X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &idle_cpu_spr),
  1503. X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &idle_cpu_gnr),
  1504. X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &idle_cpu_knl),
  1505. X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &idle_cpu_knl),
  1506. X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &idle_cpu_bxt),
  1507. X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
  1508. X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &idle_cpu_dnv),
  1509. X86_MATCH_VFM(INTEL_ATOM_TREMONT, &idle_cpu_tmt),
  1510. X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &idle_cpu_tmt),
  1511. X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr),
  1512. X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr),
  1513. X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf),
  1514. {}
  1515. };
  1516. static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
  1517. X86_MATCH_VENDOR_FAM_FEATURE(INTEL, X86_FAMILY_ANY, X86_FEATURE_MWAIT, NULL),
  1518. {}
  1519. };
  1520. static bool __init intel_idle_max_cstate_reached(int cstate)
  1521. {
  1522. if (cstate + 1 > max_cstate) {
  1523. pr_info("max_cstate %d reached\n", max_cstate);
  1524. return true;
  1525. }
  1526. return false;
  1527. }
  1528. static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state)
  1529. {
  1530. unsigned long eax = flg2MWAIT(state->flags);
  1531. if (boot_cpu_has(X86_FEATURE_ARAT))
  1532. return false;
  1533. /*
  1534. * Switch over to one-shot tick broadcast if the target C-state
  1535. * is deeper than C1.
  1536. */
  1537. return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK);
  1538. }
  1539. #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
  1540. #include <acpi/processor.h>
  1541. static bool no_acpi __read_mostly;
  1542. module_param(no_acpi, bool, 0444);
  1543. MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list");
  1544. static bool force_use_acpi __read_mostly; /* No effect if no_acpi is set. */
  1545. module_param_named(use_acpi, force_use_acpi, bool, 0444);
  1546. MODULE_PARM_DESC(use_acpi, "Use ACPI _CST for building the idle states list");
  1547. static struct acpi_processor_power acpi_state_table __initdata;
  1548. /**
  1549. * intel_idle_cst_usable - Check if the _CST information can be used.
  1550. *
  1551. * Check if all of the C-states listed by _CST in the max_cstate range are
  1552. * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT.
  1553. */
  1554. static bool __init intel_idle_cst_usable(void)
  1555. {
  1556. int cstate, limit;
  1557. limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1),
  1558. acpi_state_table.count);
  1559. for (cstate = 1; cstate < limit; cstate++) {
  1560. struct acpi_processor_cx *cx = &acpi_state_table.states[cstate];
  1561. if (cx->entry_method != ACPI_CSTATE_FFH)
  1562. return false;
  1563. }
  1564. return true;
  1565. }
  1566. static bool __init intel_idle_acpi_cst_extract(void)
  1567. {
  1568. unsigned int cpu;
  1569. if (no_acpi) {
  1570. pr_debug("Not allowed to use ACPI _CST\n");
  1571. return false;
  1572. }
  1573. for_each_possible_cpu(cpu) {
  1574. struct acpi_processor *pr = per_cpu(processors, cpu);
  1575. if (!pr)
  1576. continue;
  1577. if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table))
  1578. continue;
  1579. acpi_state_table.count++;
  1580. if (!intel_idle_cst_usable())
  1581. continue;
  1582. if (!acpi_processor_claim_cst_control())
  1583. break;
  1584. return true;
  1585. }
  1586. acpi_state_table.count = 0;
  1587. pr_debug("ACPI _CST not found or not usable\n");
  1588. return false;
  1589. }
  1590. static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
  1591. {
  1592. int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
  1593. /*
  1594. * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
  1595. * the interesting states are ACPI_CSTATE_FFH.
  1596. */
  1597. for (cstate = 1; cstate < limit; cstate++) {
  1598. struct acpi_processor_cx *cx;
  1599. struct cpuidle_state *state;
  1600. if (intel_idle_max_cstate_reached(cstate - 1))
  1601. break;
  1602. cx = &acpi_state_table.states[cstate];
  1603. state = &drv->states[drv->state_count++];
  1604. snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate);
  1605. strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
  1606. state->exit_latency = cx->latency;
  1607. /*
  1608. * For C1-type C-states use the same number for both the exit
  1609. * latency and target residency, because that is the case for
  1610. * C1 in the majority of the static C-states tables above.
  1611. * For the other types of C-states, however, set the target
  1612. * residency to 3 times the exit latency which should lead to
  1613. * a reasonable balance between energy-efficiency and
  1614. * performance in the majority of interesting cases.
  1615. */
  1616. state->target_residency = cx->latency;
  1617. if (cx->type > ACPI_STATE_C1)
  1618. state->target_residency *= 3;
  1619. state->flags = MWAIT2flg(cx->address);
  1620. if (cx->type > ACPI_STATE_C2)
  1621. state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
  1622. if (disabled_states_mask & BIT(cstate))
  1623. state->flags |= CPUIDLE_FLAG_OFF;
  1624. if (intel_idle_state_needs_timer_stop(state))
  1625. state->flags |= CPUIDLE_FLAG_TIMER_STOP;
  1626. if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
  1627. mark_tsc_unstable("TSC halts in idle");
  1628. state->enter = intel_idle;
  1629. state->enter_s2idle = intel_idle_s2idle;
  1630. }
  1631. }
  1632. static bool __init intel_idle_off_by_default(unsigned int flags, u32 mwait_hint)
  1633. {
  1634. int cstate, limit;
  1635. /*
  1636. * If there are no _CST C-states, do not disable any C-states by
  1637. * default.
  1638. */
  1639. if (!acpi_state_table.count)
  1640. return false;
  1641. limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
  1642. /*
  1643. * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
  1644. * the interesting states are ACPI_CSTATE_FFH.
  1645. */
  1646. for (cstate = 1; cstate < limit; cstate++) {
  1647. u32 acpi_hint = acpi_state_table.states[cstate].address;
  1648. u32 table_hint = mwait_hint;
  1649. if (flags & CPUIDLE_FLAG_PARTIAL_HINT_MATCH) {
  1650. acpi_hint &= ~MWAIT_SUBSTATE_MASK;
  1651. table_hint &= ~MWAIT_SUBSTATE_MASK;
  1652. }
  1653. if (acpi_hint == table_hint)
  1654. return false;
  1655. }
  1656. return true;
  1657. }
  1658. #else /* !CONFIG_ACPI_PROCESSOR_CSTATE */
  1659. #define force_use_acpi (false)
  1660. static inline bool intel_idle_acpi_cst_extract(void) { return false; }
  1661. static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
  1662. static inline bool intel_idle_off_by_default(unsigned int flags, u32 mwait_hint)
  1663. {
  1664. return false;
  1665. }
  1666. #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
  1667. /**
  1668. * ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
  1669. *
  1670. * Tune IVT multi-socket targets.
  1671. * Assumption: num_sockets == (max_package_num + 1).
  1672. */
  1673. static void __init ivt_idle_state_table_update(void)
  1674. {
  1675. /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
  1676. int cpu, package_num, num_sockets = 1;
  1677. for_each_online_cpu(cpu) {
  1678. package_num = topology_physical_package_id(cpu);
  1679. if (package_num + 1 > num_sockets) {
  1680. num_sockets = package_num + 1;
  1681. if (num_sockets > 4) {
  1682. cpuidle_state_table = ivt_cstates_8s;
  1683. return;
  1684. }
  1685. }
  1686. }
  1687. if (num_sockets > 2)
  1688. cpuidle_state_table = ivt_cstates_4s;
  1689. /* else, 1 and 2 socket systems use default ivt_cstates */
  1690. }
  1691. /**
  1692. * irtl_2_usec - IRTL to microseconds conversion.
  1693. * @irtl: IRTL MSR value.
  1694. *
  1695. * Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds.
  1696. */
  1697. static unsigned long long __init irtl_2_usec(unsigned long long irtl)
  1698. {
  1699. static const unsigned int irtl_ns_units[] __initconst = {
  1700. 1, 32, 1024, 32768, 1048576, 33554432, 0, 0
  1701. };
  1702. unsigned long long ns;
  1703. if (!irtl)
  1704. return 0;
  1705. ns = irtl_ns_units[(irtl >> 10) & 0x7];
  1706. return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC);
  1707. }
  1708. /**
  1709. * bxt_idle_state_table_update - Fix up the Broxton idle states table.
  1710. *
  1711. * On BXT, trust the IRTL (Interrupt Response Time Limit) MSR to show the
  1712. * definitive maximum latency and use the same value for target_residency.
  1713. */
  1714. static void __init bxt_idle_state_table_update(void)
  1715. {
  1716. unsigned long long msr;
  1717. unsigned int usec;
  1718. rdmsrl(MSR_PKGC6_IRTL, msr);
  1719. usec = irtl_2_usec(msr);
  1720. if (usec) {
  1721. bxt_cstates[2].exit_latency = usec;
  1722. bxt_cstates[2].target_residency = usec;
  1723. }
  1724. rdmsrl(MSR_PKGC7_IRTL, msr);
  1725. usec = irtl_2_usec(msr);
  1726. if (usec) {
  1727. bxt_cstates[3].exit_latency = usec;
  1728. bxt_cstates[3].target_residency = usec;
  1729. }
  1730. rdmsrl(MSR_PKGC8_IRTL, msr);
  1731. usec = irtl_2_usec(msr);
  1732. if (usec) {
  1733. bxt_cstates[4].exit_latency = usec;
  1734. bxt_cstates[4].target_residency = usec;
  1735. }
  1736. rdmsrl(MSR_PKGC9_IRTL, msr);
  1737. usec = irtl_2_usec(msr);
  1738. if (usec) {
  1739. bxt_cstates[5].exit_latency = usec;
  1740. bxt_cstates[5].target_residency = usec;
  1741. }
  1742. rdmsrl(MSR_PKGC10_IRTL, msr);
  1743. usec = irtl_2_usec(msr);
  1744. if (usec) {
  1745. bxt_cstates[6].exit_latency = usec;
  1746. bxt_cstates[6].target_residency = usec;
  1747. }
  1748. }
  1749. /**
  1750. * sklh_idle_state_table_update - Fix up the Sky Lake idle states table.
  1751. *
  1752. * On SKL-H (model 0x5e) skip C8 and C9 if C10 is enabled and SGX disabled.
  1753. */
  1754. static void __init sklh_idle_state_table_update(void)
  1755. {
  1756. unsigned long long msr;
  1757. unsigned int eax, ebx, ecx, edx;
  1758. /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
  1759. if (max_cstate <= 7)
  1760. return;
  1761. /* if PC10 not present in CPUID.MWAIT.EDX */
  1762. if ((mwait_substates & (0xF << 28)) == 0)
  1763. return;
  1764. rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
  1765. /* PC10 is not enabled in PKG C-state limit */
  1766. if ((msr & 0xF) != 8)
  1767. return;
  1768. ecx = 0;
  1769. cpuid(7, &eax, &ebx, &ecx, &edx);
  1770. /* if SGX is present */
  1771. if (ebx & (1 << 2)) {
  1772. rdmsrl(MSR_IA32_FEAT_CTL, msr);
  1773. /* if SGX is enabled */
  1774. if (msr & (1 << 18))
  1775. return;
  1776. }
  1777. skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */
  1778. skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */
  1779. }
  1780. /**
  1781. * skx_idle_state_table_update - Adjust the Sky Lake/Cascade Lake
  1782. * idle states table.
  1783. */
  1784. static void __init skx_idle_state_table_update(void)
  1785. {
  1786. unsigned long long msr;
  1787. rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
  1788. /*
  1789. * 000b: C0/C1 (no package C-state support)
  1790. * 001b: C2
  1791. * 010b: C6 (non-retention)
  1792. * 011b: C6 (retention)
  1793. * 111b: No Package C state limits.
  1794. */
  1795. if ((msr & 0x7) < 2) {
  1796. /*
  1797. * Uses the CC6 + PC0 latency and 3 times of
  1798. * latency for target_residency if the PC6
  1799. * is disabled in BIOS. This is consistent
  1800. * with how intel_idle driver uses _CST
  1801. * to set the target_residency.
  1802. */
  1803. skx_cstates[2].exit_latency = 92;
  1804. skx_cstates[2].target_residency = 276;
  1805. }
  1806. }
  1807. /**
  1808. * adl_idle_state_table_update - Adjust AlderLake idle states table.
  1809. */
  1810. static void __init adl_idle_state_table_update(void)
  1811. {
  1812. /* Check if user prefers C1 over C1E. */
  1813. if (preferred_states_mask & BIT(1) && !(preferred_states_mask & BIT(2))) {
  1814. cpuidle_state_table[0].flags &= ~CPUIDLE_FLAG_UNUSABLE;
  1815. cpuidle_state_table[1].flags |= CPUIDLE_FLAG_UNUSABLE;
  1816. /* Disable C1E by clearing the "C1E promotion" bit. */
  1817. c1e_promotion = C1E_PROMOTION_DISABLE;
  1818. return;
  1819. }
  1820. /* Make sure C1E is enabled by default */
  1821. c1e_promotion = C1E_PROMOTION_ENABLE;
  1822. }
  1823. /**
  1824. * spr_idle_state_table_update - Adjust Sapphire Rapids idle states table.
  1825. */
  1826. static void __init spr_idle_state_table_update(void)
  1827. {
  1828. unsigned long long msr;
  1829. /*
  1830. * By default, the C6 state assumes the worst-case scenario of package
  1831. * C6. However, if PC6 is disabled, we update the numbers to match
  1832. * core C6.
  1833. */
  1834. rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
  1835. /* Limit value 2 and above allow for PC6. */
  1836. if ((msr & 0x7) < 2) {
  1837. spr_cstates[2].exit_latency = 190;
  1838. spr_cstates[2].target_residency = 600;
  1839. }
  1840. }
  1841. static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
  1842. {
  1843. unsigned int mwait_cstate = (MWAIT_HINT2CSTATE(mwait_hint) + 1) &
  1844. MWAIT_CSTATE_MASK;
  1845. unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) &
  1846. MWAIT_SUBSTATE_MASK;
  1847. /* Ignore the C-state if there are NO sub-states in CPUID for it. */
  1848. if (num_substates == 0)
  1849. return false;
  1850. if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
  1851. mark_tsc_unstable("TSC halts in idle states deeper than C2");
  1852. return true;
  1853. }
  1854. static void state_update_enter_method(struct cpuidle_state *state, int cstate)
  1855. {
  1856. if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) {
  1857. /*
  1858. * Combining with XSTATE with IBRS or IRQ_ENABLE flags
  1859. * is not currently supported but this driver.
  1860. */
  1861. WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IBRS);
  1862. WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE);
  1863. state->enter = intel_idle_xstate;
  1864. return;
  1865. }
  1866. if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
  1867. ((state->flags & CPUIDLE_FLAG_IBRS) || ibrs_off)) {
  1868. /*
  1869. * IBRS mitigation requires that C-states are entered
  1870. * with interrupts disabled.
  1871. */
  1872. if (ibrs_off && (state->flags & CPUIDLE_FLAG_IRQ_ENABLE))
  1873. state->flags &= ~CPUIDLE_FLAG_IRQ_ENABLE;
  1874. WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE);
  1875. state->enter = intel_idle_ibrs;
  1876. return;
  1877. }
  1878. if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) {
  1879. state->enter = intel_idle_irq;
  1880. return;
  1881. }
  1882. if (force_irq_on) {
  1883. pr_info("forced intel_idle_irq for state %d\n", cstate);
  1884. state->enter = intel_idle_irq;
  1885. }
  1886. }
  1887. static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
  1888. {
  1889. int cstate;
  1890. switch (boot_cpu_data.x86_vfm) {
  1891. case INTEL_IVYBRIDGE_X:
  1892. ivt_idle_state_table_update();
  1893. break;
  1894. case INTEL_ATOM_GOLDMONT:
  1895. case INTEL_ATOM_GOLDMONT_PLUS:
  1896. bxt_idle_state_table_update();
  1897. break;
  1898. case INTEL_SKYLAKE:
  1899. sklh_idle_state_table_update();
  1900. break;
  1901. case INTEL_SKYLAKE_X:
  1902. skx_idle_state_table_update();
  1903. break;
  1904. case INTEL_SAPPHIRERAPIDS_X:
  1905. case INTEL_EMERALDRAPIDS_X:
  1906. spr_idle_state_table_update();
  1907. break;
  1908. case INTEL_ALDERLAKE:
  1909. case INTEL_ALDERLAKE_L:
  1910. case INTEL_ATOM_GRACEMONT:
  1911. adl_idle_state_table_update();
  1912. break;
  1913. }
  1914. for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
  1915. struct cpuidle_state *state;
  1916. unsigned int mwait_hint;
  1917. if (intel_idle_max_cstate_reached(cstate))
  1918. break;
  1919. if (!cpuidle_state_table[cstate].enter &&
  1920. !cpuidle_state_table[cstate].enter_s2idle)
  1921. break;
  1922. /* If marked as unusable, skip this state. */
  1923. if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
  1924. pr_debug("state %s is disabled\n",
  1925. cpuidle_state_table[cstate].name);
  1926. continue;
  1927. }
  1928. mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
  1929. if (!intel_idle_verify_cstate(mwait_hint))
  1930. continue;
  1931. /* Structure copy. */
  1932. drv->states[drv->state_count] = cpuidle_state_table[cstate];
  1933. state = &drv->states[drv->state_count];
  1934. state_update_enter_method(state, cstate);
  1935. if ((disabled_states_mask & BIT(drv->state_count)) ||
  1936. ((icpu->use_acpi || force_use_acpi) &&
  1937. intel_idle_off_by_default(state->flags, mwait_hint) &&
  1938. !(state->flags & CPUIDLE_FLAG_ALWAYS_ENABLE)))
  1939. state->flags |= CPUIDLE_FLAG_OFF;
  1940. if (intel_idle_state_needs_timer_stop(state))
  1941. state->flags |= CPUIDLE_FLAG_TIMER_STOP;
  1942. drv->state_count++;
  1943. }
  1944. if (icpu->byt_auto_demotion_disable_flag) {
  1945. wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
  1946. wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
  1947. }
  1948. }
  1949. /**
  1950. * intel_idle_cpuidle_driver_init - Create the list of available idle states.
  1951. * @drv: cpuidle driver structure to initialize.
  1952. */
  1953. static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
  1954. {
  1955. cpuidle_poll_state_init(drv);
  1956. if (disabled_states_mask & BIT(0))
  1957. drv->states[0].flags |= CPUIDLE_FLAG_OFF;
  1958. drv->state_count = 1;
  1959. if (icpu && icpu->state_table)
  1960. intel_idle_init_cstates_icpu(drv);
  1961. else
  1962. intel_idle_init_cstates_acpi(drv);
  1963. }
  1964. static void auto_demotion_disable(void)
  1965. {
  1966. unsigned long long msr_bits;
  1967. rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
  1968. msr_bits &= ~auto_demotion_disable_flags;
  1969. wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
  1970. }
  1971. static void c1e_promotion_enable(void)
  1972. {
  1973. unsigned long long msr_bits;
  1974. rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
  1975. msr_bits |= 0x2;
  1976. wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
  1977. }
  1978. static void c1e_promotion_disable(void)
  1979. {
  1980. unsigned long long msr_bits;
  1981. rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
  1982. msr_bits &= ~0x2;
  1983. wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
  1984. }
  1985. /**
  1986. * intel_idle_cpu_init - Register the target CPU with the cpuidle core.
  1987. * @cpu: CPU to initialize.
  1988. *
  1989. * Register a cpuidle device object for @cpu and update its MSRs in accordance
  1990. * with the processor model flags.
  1991. */
  1992. static int intel_idle_cpu_init(unsigned int cpu)
  1993. {
  1994. struct cpuidle_device *dev;
  1995. dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
  1996. dev->cpu = cpu;
  1997. if (cpuidle_register_device(dev)) {
  1998. pr_debug("cpuidle_register_device %d failed!\n", cpu);
  1999. return -EIO;
  2000. }
  2001. if (auto_demotion_disable_flags)
  2002. auto_demotion_disable();
  2003. if (c1e_promotion == C1E_PROMOTION_ENABLE)
  2004. c1e_promotion_enable();
  2005. else if (c1e_promotion == C1E_PROMOTION_DISABLE)
  2006. c1e_promotion_disable();
  2007. return 0;
  2008. }
  2009. static int intel_idle_cpu_online(unsigned int cpu)
  2010. {
  2011. struct cpuidle_device *dev;
  2012. if (!boot_cpu_has(X86_FEATURE_ARAT))
  2013. tick_broadcast_enable();
  2014. /*
  2015. * Some systems can hotplug a cpu at runtime after
  2016. * the kernel has booted, we have to initialize the
  2017. * driver in this case
  2018. */
  2019. dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
  2020. if (!dev->registered)
  2021. return intel_idle_cpu_init(cpu);
  2022. return 0;
  2023. }
  2024. /**
  2025. * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices.
  2026. */
  2027. static void __init intel_idle_cpuidle_devices_uninit(void)
  2028. {
  2029. int i;
  2030. for_each_online_cpu(i)
  2031. cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
  2032. }
  2033. static int __init intel_idle_init(void)
  2034. {
  2035. const struct x86_cpu_id *id;
  2036. unsigned int eax, ebx, ecx;
  2037. int retval;
  2038. /* Do not load intel_idle at all for now if idle= is passed */
  2039. if (boot_option_idle_override != IDLE_NO_OVERRIDE)
  2040. return -ENODEV;
  2041. if (max_cstate == 0) {
  2042. pr_debug("disabled\n");
  2043. return -EPERM;
  2044. }
  2045. id = x86_match_cpu(intel_idle_ids);
  2046. if (id) {
  2047. if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
  2048. pr_debug("Please enable MWAIT in BIOS SETUP\n");
  2049. return -ENODEV;
  2050. }
  2051. } else {
  2052. id = x86_match_cpu(intel_mwait_ids);
  2053. if (!id)
  2054. return -ENODEV;
  2055. }
  2056. if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
  2057. return -ENODEV;
  2058. cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
  2059. if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
  2060. !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
  2061. !mwait_substates)
  2062. return -ENODEV;
  2063. pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
  2064. icpu = (const struct idle_cpu *)id->driver_data;
  2065. if (icpu) {
  2066. if (icpu->state_table)
  2067. cpuidle_state_table = icpu->state_table;
  2068. else if (!intel_idle_acpi_cst_extract())
  2069. return -ENODEV;
  2070. auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
  2071. if (icpu->disable_promotion_to_c1e)
  2072. c1e_promotion = C1E_PROMOTION_DISABLE;
  2073. if (icpu->use_acpi || force_use_acpi)
  2074. intel_idle_acpi_cst_extract();
  2075. } else if (!intel_idle_acpi_cst_extract()) {
  2076. return -ENODEV;
  2077. }
  2078. pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
  2079. boot_cpu_data.x86_model);
  2080. intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
  2081. if (!intel_idle_cpuidle_devices)
  2082. return -ENOMEM;
  2083. intel_idle_cpuidle_driver_init(&intel_idle_driver);
  2084. retval = cpuidle_register_driver(&intel_idle_driver);
  2085. if (retval) {
  2086. struct cpuidle_driver *drv = cpuidle_get_driver();
  2087. printk(KERN_DEBUG pr_fmt("intel_idle yielding to %s\n"),
  2088. drv ? drv->name : "none");
  2089. goto init_driver_fail;
  2090. }
  2091. retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
  2092. intel_idle_cpu_online, NULL);
  2093. if (retval < 0)
  2094. goto hp_setup_fail;
  2095. pr_debug("Local APIC timer is reliable in %s\n",
  2096. boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1");
  2097. return 0;
  2098. hp_setup_fail:
  2099. intel_idle_cpuidle_devices_uninit();
  2100. cpuidle_unregister_driver(&intel_idle_driver);
  2101. init_driver_fail:
  2102. free_percpu(intel_idle_cpuidle_devices);
  2103. return retval;
  2104. }
  2105. device_initcall(intel_idle_init);
  2106. /*
  2107. * We are not really modular, but we used to support that. Meaning we also
  2108. * support "intel_idle.max_cstate=..." at boot and also a read-only export of
  2109. * it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param
  2110. * is the easiest way (currently) to continue doing that.
  2111. */
  2112. module_param(max_cstate, int, 0444);
  2113. /*
  2114. * The positions of the bits that are set in this number are the indices of the
  2115. * idle states to be disabled by default (as reflected by the names of the
  2116. * corresponding idle state directories in sysfs, "state0", "state1" ...
  2117. * "state<i>" ..., where <i> is the index of the given state).
  2118. */
  2119. module_param_named(states_off, disabled_states_mask, uint, 0444);
  2120. MODULE_PARM_DESC(states_off, "Mask of disabled idle states");
  2121. /*
  2122. * Some platforms come with mutually exclusive C-states, so that if one is
  2123. * enabled, the other C-states must not be used. Example: C1 and C1E on
  2124. * Sapphire Rapids platform. This parameter allows for selecting the
  2125. * preferred C-states among the groups of mutually exclusive C-states - the
  2126. * selected C-states will be registered, the other C-states from the mutually
  2127. * exclusive group won't be registered. If the platform has no mutually
  2128. * exclusive C-states, this parameter has no effect.
  2129. */
  2130. module_param_named(preferred_cstates, preferred_states_mask, uint, 0444);
  2131. MODULE_PARM_DESC(preferred_cstates, "Mask of preferred idle states");
  2132. /*
  2133. * Debugging option that forces the driver to enter all C-states with
  2134. * interrupts enabled. Does not apply to C-states with
  2135. * 'CPUIDLE_FLAG_INIT_XSTATE' and 'CPUIDLE_FLAG_IBRS' flags.
  2136. */
  2137. module_param(force_irq_on, bool, 0444);
  2138. /*
  2139. * Force the disabling of IBRS when X86_FEATURE_KERNEL_IBRS is on and
  2140. * CPUIDLE_FLAG_IRQ_ENABLE isn't set.
  2141. */
  2142. module_param(ibrs_off, bool, 0444);
  2143. MODULE_PARM_DESC(ibrs_off, "Disable IBRS when idle");