therm_throt.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521
  1. /*
  2. * Thermal throttle event support code (such as syslog messaging and rate
  3. * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
  4. *
  5. * This allows consistent reporting of CPU thermal throttle events.
  6. *
  7. * Maintains a counter in /sys that keeps track of the number of thermal
  8. * events, such that the user knows how bad the thermal problem might be
  9. * (since the logging to syslog is rate limited).
  10. *
  11. * Author: Dmitriy Zavin (dmitriyz@google.com)
  12. *
  13. * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
  14. * Inspired by Ross Biro's and Al Borchers' counter code.
  15. */
  16. #include <linux/interrupt.h>
  17. #include <linux/notifier.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/kernel.h>
  20. #include <linux/percpu.h>
  21. #include <linux/export.h>
  22. #include <linux/types.h>
  23. #include <linux/init.h>
  24. #include <linux/smp.h>
  25. #include <linux/cpu.h>
  26. #include <asm/processor.h>
  27. #include <asm/traps.h>
  28. #include <asm/apic.h>
  29. #include <asm/mce.h>
  30. #include <asm/msr.h>
  31. #include <asm/trace/irq_vectors.h>
  32. /* How long to wait between reporting thermal events */
  33. #define CHECK_INTERVAL (300 * HZ)
  34. #define THERMAL_THROTTLING_EVENT 0
  35. #define POWER_LIMIT_EVENT 1
  36. /*
  37. * Current thermal event state:
  38. */
  39. struct _thermal_state {
  40. bool new_event;
  41. int event;
  42. u64 next_check;
  43. unsigned long count;
  44. unsigned long last_count;
  45. };
  46. struct thermal_state {
  47. struct _thermal_state core_throttle;
  48. struct _thermal_state core_power_limit;
  49. struct _thermal_state package_throttle;
  50. struct _thermal_state package_power_limit;
  51. struct _thermal_state core_thresh0;
  52. struct _thermal_state core_thresh1;
  53. struct _thermal_state pkg_thresh0;
  54. struct _thermal_state pkg_thresh1;
  55. };
  56. /* Callback to handle core threshold interrupts */
  57. int (*platform_thermal_notify)(__u64 msr_val);
  58. EXPORT_SYMBOL(platform_thermal_notify);
  59. /* Callback to handle core package threshold_interrupts */
  60. int (*platform_thermal_package_notify)(__u64 msr_val);
  61. EXPORT_SYMBOL_GPL(platform_thermal_package_notify);
  62. /* Callback support of rate control, return true, if
  63. * callback has rate control */
  64. bool (*platform_thermal_package_rate_control)(void);
  65. EXPORT_SYMBOL_GPL(platform_thermal_package_rate_control);
  66. static DEFINE_PER_CPU(struct thermal_state, thermal_state);
  67. static atomic_t therm_throt_en = ATOMIC_INIT(0);
  68. static u32 lvtthmr_init __read_mostly;
  69. #ifdef CONFIG_SYSFS
  70. #define define_therm_throt_device_one_ro(_name) \
  71. static DEVICE_ATTR(_name, 0444, \
  72. therm_throt_device_show_##_name, \
  73. NULL) \
  74. #define define_therm_throt_device_show_func(event, name) \
  75. \
  76. static ssize_t therm_throt_device_show_##event##_##name( \
  77. struct device *dev, \
  78. struct device_attribute *attr, \
  79. char *buf) \
  80. { \
  81. unsigned int cpu = dev->id; \
  82. ssize_t ret; \
  83. \
  84. preempt_disable(); /* CPU hotplug */ \
  85. if (cpu_online(cpu)) { \
  86. ret = sprintf(buf, "%lu\n", \
  87. per_cpu(thermal_state, cpu).event.name); \
  88. } else \
  89. ret = 0; \
  90. preempt_enable(); \
  91. \
  92. return ret; \
  93. }
  94. define_therm_throt_device_show_func(core_throttle, count);
  95. define_therm_throt_device_one_ro(core_throttle_count);
  96. define_therm_throt_device_show_func(core_power_limit, count);
  97. define_therm_throt_device_one_ro(core_power_limit_count);
  98. define_therm_throt_device_show_func(package_throttle, count);
  99. define_therm_throt_device_one_ro(package_throttle_count);
  100. define_therm_throt_device_show_func(package_power_limit, count);
  101. define_therm_throt_device_one_ro(package_power_limit_count);
  102. static struct attribute *thermal_throttle_attrs[] = {
  103. &dev_attr_core_throttle_count.attr,
  104. NULL
  105. };
  106. static const struct attribute_group thermal_attr_group = {
  107. .attrs = thermal_throttle_attrs,
  108. .name = "thermal_throttle"
  109. };
  110. #endif /* CONFIG_SYSFS */
  111. #define CORE_LEVEL 0
  112. #define PACKAGE_LEVEL 1
  113. /***
  114. * therm_throt_process - Process thermal throttling event from interrupt
  115. * @curr: Whether the condition is current or not (boolean), since the
  116. * thermal interrupt normally gets called both when the thermal
  117. * event begins and once the event has ended.
  118. *
  119. * This function is called by the thermal interrupt after the
  120. * IRQ has been acknowledged.
  121. *
  122. * It will take care of rate limiting and printing messages to the syslog.
  123. */
  124. static void therm_throt_process(bool new_event, int event, int level)
  125. {
  126. struct _thermal_state *state;
  127. unsigned int this_cpu = smp_processor_id();
  128. bool old_event;
  129. u64 now;
  130. struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
  131. now = get_jiffies_64();
  132. if (level == CORE_LEVEL) {
  133. if (event == THERMAL_THROTTLING_EVENT)
  134. state = &pstate->core_throttle;
  135. else if (event == POWER_LIMIT_EVENT)
  136. state = &pstate->core_power_limit;
  137. else
  138. return;
  139. } else if (level == PACKAGE_LEVEL) {
  140. if (event == THERMAL_THROTTLING_EVENT)
  141. state = &pstate->package_throttle;
  142. else if (event == POWER_LIMIT_EVENT)
  143. state = &pstate->package_power_limit;
  144. else
  145. return;
  146. } else
  147. return;
  148. old_event = state->new_event;
  149. state->new_event = new_event;
  150. if (new_event)
  151. state->count++;
  152. if (time_before64(now, state->next_check) &&
  153. state->count != state->last_count)
  154. return;
  155. state->next_check = now + CHECK_INTERVAL;
  156. state->last_count = state->count;
  157. /* if we just entered the thermal event */
  158. if (new_event) {
  159. if (event == THERMAL_THROTTLING_EVENT)
  160. pr_warn("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
  161. this_cpu,
  162. level == CORE_LEVEL ? "Core" : "Package",
  163. state->count);
  164. return;
  165. }
  166. if (old_event) {
  167. if (event == THERMAL_THROTTLING_EVENT)
  168. pr_info("CPU%d: %s temperature/speed normal\n", this_cpu,
  169. level == CORE_LEVEL ? "Core" : "Package");
  170. return;
  171. }
  172. }
  173. static int thresh_event_valid(int level, int event)
  174. {
  175. struct _thermal_state *state;
  176. unsigned int this_cpu = smp_processor_id();
  177. struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
  178. u64 now = get_jiffies_64();
  179. if (level == PACKAGE_LEVEL)
  180. state = (event == 0) ? &pstate->pkg_thresh0 :
  181. &pstate->pkg_thresh1;
  182. else
  183. state = (event == 0) ? &pstate->core_thresh0 :
  184. &pstate->core_thresh1;
  185. if (time_before64(now, state->next_check))
  186. return 0;
  187. state->next_check = now + CHECK_INTERVAL;
  188. return 1;
  189. }
  190. static bool int_pln_enable;
  191. static int __init int_pln_enable_setup(char *s)
  192. {
  193. int_pln_enable = true;
  194. return 1;
  195. }
  196. __setup("int_pln_enable", int_pln_enable_setup);
  197. #ifdef CONFIG_SYSFS
  198. /* Add/Remove thermal_throttle interface for CPU device: */
  199. static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu)
  200. {
  201. int err;
  202. struct cpuinfo_x86 *c = &cpu_data(cpu);
  203. err = sysfs_create_group(&dev->kobj, &thermal_attr_group);
  204. if (err)
  205. return err;
  206. if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
  207. err = sysfs_add_file_to_group(&dev->kobj,
  208. &dev_attr_core_power_limit_count.attr,
  209. thermal_attr_group.name);
  210. if (cpu_has(c, X86_FEATURE_PTS)) {
  211. err = sysfs_add_file_to_group(&dev->kobj,
  212. &dev_attr_package_throttle_count.attr,
  213. thermal_attr_group.name);
  214. if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
  215. err = sysfs_add_file_to_group(&dev->kobj,
  216. &dev_attr_package_power_limit_count.attr,
  217. thermal_attr_group.name);
  218. }
  219. return err;
  220. }
  221. static void thermal_throttle_remove_dev(struct device *dev)
  222. {
  223. sysfs_remove_group(&dev->kobj, &thermal_attr_group);
  224. }
  225. /* Get notified when a cpu comes on/off. Be hotplug friendly. */
  226. static int thermal_throttle_online(unsigned int cpu)
  227. {
  228. struct device *dev = get_cpu_device(cpu);
  229. return thermal_throttle_add_dev(dev, cpu);
  230. }
  231. static int thermal_throttle_offline(unsigned int cpu)
  232. {
  233. struct device *dev = get_cpu_device(cpu);
  234. thermal_throttle_remove_dev(dev);
  235. return 0;
  236. }
  237. static __init int thermal_throttle_init_device(void)
  238. {
  239. int ret;
  240. if (!atomic_read(&therm_throt_en))
  241. return 0;
  242. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/therm:online",
  243. thermal_throttle_online,
  244. thermal_throttle_offline);
  245. return ret < 0 ? ret : 0;
  246. }
  247. device_initcall(thermal_throttle_init_device);
  248. #endif /* CONFIG_SYSFS */
  249. static void notify_package_thresholds(__u64 msr_val)
  250. {
  251. bool notify_thres_0 = false;
  252. bool notify_thres_1 = false;
  253. if (!platform_thermal_package_notify)
  254. return;
  255. /* lower threshold check */
  256. if (msr_val & THERM_LOG_THRESHOLD0)
  257. notify_thres_0 = true;
  258. /* higher threshold check */
  259. if (msr_val & THERM_LOG_THRESHOLD1)
  260. notify_thres_1 = true;
  261. if (!notify_thres_0 && !notify_thres_1)
  262. return;
  263. if (platform_thermal_package_rate_control &&
  264. platform_thermal_package_rate_control()) {
  265. /* Rate control is implemented in callback */
  266. platform_thermal_package_notify(msr_val);
  267. return;
  268. }
  269. /* lower threshold reached */
  270. if (notify_thres_0 && thresh_event_valid(PACKAGE_LEVEL, 0))
  271. platform_thermal_package_notify(msr_val);
  272. /* higher threshold reached */
  273. if (notify_thres_1 && thresh_event_valid(PACKAGE_LEVEL, 1))
  274. platform_thermal_package_notify(msr_val);
  275. }
  276. static void notify_thresholds(__u64 msr_val)
  277. {
  278. /* check whether the interrupt handler is defined;
  279. * otherwise simply return
  280. */
  281. if (!platform_thermal_notify)
  282. return;
  283. /* lower threshold reached */
  284. if ((msr_val & THERM_LOG_THRESHOLD0) &&
  285. thresh_event_valid(CORE_LEVEL, 0))
  286. platform_thermal_notify(msr_val);
  287. /* higher threshold reached */
  288. if ((msr_val & THERM_LOG_THRESHOLD1) &&
  289. thresh_event_valid(CORE_LEVEL, 1))
  290. platform_thermal_notify(msr_val);
  291. }
  292. /* Thermal transition interrupt handler */
  293. static void intel_thermal_interrupt(void)
  294. {
  295. __u64 msr_val;
  296. if (static_cpu_has(X86_FEATURE_HWP))
  297. wrmsrl_safe(MSR_HWP_STATUS, 0);
  298. rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
  299. /* Check for violation of core thermal thresholds*/
  300. notify_thresholds(msr_val);
  301. therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
  302. THERMAL_THROTTLING_EVENT,
  303. CORE_LEVEL);
  304. if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable)
  305. therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
  306. POWER_LIMIT_EVENT,
  307. CORE_LEVEL);
  308. if (this_cpu_has(X86_FEATURE_PTS)) {
  309. rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
  310. /* check violations of package thermal thresholds */
  311. notify_package_thresholds(msr_val);
  312. therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
  313. THERMAL_THROTTLING_EVENT,
  314. PACKAGE_LEVEL);
  315. if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable)
  316. therm_throt_process(msr_val &
  317. PACKAGE_THERM_STATUS_POWER_LIMIT,
  318. POWER_LIMIT_EVENT,
  319. PACKAGE_LEVEL);
  320. }
  321. }
  322. static void unexpected_thermal_interrupt(void)
  323. {
  324. pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
  325. smp_processor_id());
  326. }
  327. static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
  328. asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs)
  329. {
  330. entering_irq();
  331. trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
  332. inc_irq_stat(irq_thermal_count);
  333. smp_thermal_vector();
  334. trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
  335. exiting_ack_irq();
  336. }
  337. /* Thermal monitoring depends on APIC, ACPI and clock modulation */
  338. static int intel_thermal_supported(struct cpuinfo_x86 *c)
  339. {
  340. if (!boot_cpu_has(X86_FEATURE_APIC))
  341. return 0;
  342. if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
  343. return 0;
  344. return 1;
  345. }
  346. void __init mcheck_intel_therm_init(void)
  347. {
  348. /*
  349. * This function is only called on boot CPU. Save the init thermal
  350. * LVT value on BSP and use that value to restore APs' thermal LVT
  351. * entry BIOS programmed later
  352. */
  353. if (intel_thermal_supported(&boot_cpu_data))
  354. lvtthmr_init = apic_read(APIC_LVTTHMR);
  355. }
  356. void intel_init_thermal(struct cpuinfo_x86 *c)
  357. {
  358. unsigned int cpu = smp_processor_id();
  359. int tm2 = 0;
  360. u32 l, h;
  361. if (!intel_thermal_supported(c))
  362. return;
  363. /*
  364. * First check if its enabled already, in which case there might
  365. * be some SMM goo which handles it, so we can't even put a handler
  366. * since it might be delivered via SMI already:
  367. */
  368. rdmsr(MSR_IA32_MISC_ENABLE, l, h);
  369. h = lvtthmr_init;
  370. /*
  371. * The initial value of thermal LVT entries on all APs always reads
  372. * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
  373. * sequence to them and LVT registers are reset to 0s except for
  374. * the mask bits which are set to 1s when APs receive INIT IPI.
  375. * If BIOS takes over the thermal interrupt and sets its interrupt
  376. * delivery mode to SMI (not fixed), it restores the value that the
  377. * BIOS has programmed on AP based on BSP's info we saved since BIOS
  378. * is always setting the same value for all threads/cores.
  379. */
  380. if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED)
  381. apic_write(APIC_LVTTHMR, lvtthmr_init);
  382. if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
  383. if (system_state == SYSTEM_BOOTING)
  384. pr_debug("CPU%d: Thermal monitoring handled by SMI\n", cpu);
  385. return;
  386. }
  387. /* early Pentium M models use different method for enabling TM2 */
  388. if (cpu_has(c, X86_FEATURE_TM2)) {
  389. if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) {
  390. rdmsr(MSR_THERM2_CTL, l, h);
  391. if (l & MSR_THERM2_CTL_TM_SELECT)
  392. tm2 = 1;
  393. } else if (l & MSR_IA32_MISC_ENABLE_TM2)
  394. tm2 = 1;
  395. }
  396. /* We'll mask the thermal vector in the lapic till we're ready: */
  397. h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
  398. apic_write(APIC_LVTTHMR, h);
  399. rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
  400. if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable)
  401. wrmsr(MSR_IA32_THERM_INTERRUPT,
  402. (l | (THERM_INT_LOW_ENABLE
  403. | THERM_INT_HIGH_ENABLE)) & ~THERM_INT_PLN_ENABLE, h);
  404. else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
  405. wrmsr(MSR_IA32_THERM_INTERRUPT,
  406. l | (THERM_INT_LOW_ENABLE
  407. | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h);
  408. else
  409. wrmsr(MSR_IA32_THERM_INTERRUPT,
  410. l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);
  411. if (cpu_has(c, X86_FEATURE_PTS)) {
  412. rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
  413. if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable)
  414. wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
  415. (l | (PACKAGE_THERM_INT_LOW_ENABLE
  416. | PACKAGE_THERM_INT_HIGH_ENABLE))
  417. & ~PACKAGE_THERM_INT_PLN_ENABLE, h);
  418. else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
  419. wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
  420. l | (PACKAGE_THERM_INT_LOW_ENABLE
  421. | PACKAGE_THERM_INT_HIGH_ENABLE
  422. | PACKAGE_THERM_INT_PLN_ENABLE), h);
  423. else
  424. wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
  425. l | (PACKAGE_THERM_INT_LOW_ENABLE
  426. | PACKAGE_THERM_INT_HIGH_ENABLE), h);
  427. }
  428. smp_thermal_vector = intel_thermal_interrupt;
  429. rdmsr(MSR_IA32_MISC_ENABLE, l, h);
  430. wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
  431. /* Unmask the thermal vector: */
  432. l = apic_read(APIC_LVTTHMR);
  433. apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
  434. pr_info_once("CPU0: Thermal monitoring enabled (%s)\n",
  435. tm2 ? "TM2" : "TM1");
  436. /* enable thermal throttle processing */
  437. atomic_set(&therm_throt_en, 1);
  438. }