idle.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Idle daemon for PowerPC. Idle daemon will handle any action
  4. * that needs to be taken when the system becomes idle.
  5. *
  6. * Originally written by Cort Dougan (cort@cs.nmt.edu).
  7. * Subsequent 32-bit hacking by Tom Rini, Armin Kuster,
  8. * Paul Mackerras and others.
  9. *
  10. * iSeries supported added by Mike Corrigan <mikejc@us.ibm.com>
  11. *
  12. * Additional shared processor, SMT, and firmware support
  13. * Copyright (c) 2003 Dave Engebretsen <engebret@us.ibm.com>
  14. *
  15. * 32-bit and 64-bit versions merged by Paul Mackerras <paulus@samba.org>
  16. */
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/smp.h>
  20. #include <linux/cpu.h>
  21. #include <linux/sysctl.h>
  22. #include <linux/tick.h>
  23. #include <asm/processor.h>
  24. #include <asm/cputable.h>
  25. #include <asm/time.h>
  26. #include <asm/machdep.h>
  27. #include <asm/runlatch.h>
  28. #include <asm/smp.h>
  29. unsigned long cpuidle_disable = IDLE_NO_OVERRIDE;
  30. EXPORT_SYMBOL(cpuidle_disable);
  31. static int __init powersave_off(char *arg)
  32. {
  33. ppc_md.power_save = NULL;
  34. cpuidle_disable = IDLE_POWERSAVE_OFF;
  35. return 1;
  36. }
  37. __setup("powersave=off", powersave_off);
  38. void arch_cpu_idle(void)
  39. {
  40. ppc64_runlatch_off();
  41. if (ppc_md.power_save) {
  42. ppc_md.power_save();
  43. /*
  44. * Some power_save functions return with
  45. * interrupts enabled, some don't.
  46. */
  47. if (!irqs_disabled())
  48. raw_local_irq_disable();
  49. } else {
  50. /*
  51. * Go into low thread priority and possibly
  52. * low power mode.
  53. */
  54. HMT_low();
  55. HMT_very_low();
  56. }
  57. HMT_medium();
  58. ppc64_runlatch_on();
  59. }
  60. int powersave_nap;
  61. #ifdef CONFIG_PPC_970_NAP
  62. void power4_idle(void)
  63. {
  64. if (!cpu_has_feature(CPU_FTR_CAN_NAP))
  65. return;
  66. if (!powersave_nap)
  67. return;
  68. if (!prep_irq_for_idle())
  69. return;
  70. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  71. asm volatile(PPC_DSSALL " ; sync" ::: "memory");
  72. power4_idle_nap();
  73. /*
  74. * power4_idle_nap returns with interrupts enabled (soft and hard).
  75. * to our caller with interrupts enabled (soft and hard). Our caller
  76. * can cope with either interrupts disabled or enabled upon return.
  77. */
  78. }
  79. #endif
  80. #ifdef CONFIG_SYSCTL
  81. /*
  82. * Register the sysctl to set/clear powersave_nap.
  83. */
  84. static struct ctl_table powersave_nap_ctl_table[] = {
  85. {
  86. .procname = "powersave-nap",
  87. .data = &powersave_nap,
  88. .maxlen = sizeof(int),
  89. .mode = 0644,
  90. .proc_handler = proc_dointvec,
  91. },
  92. };
  93. static int __init
  94. register_powersave_nap_sysctl(void)
  95. {
  96. register_sysctl("kernel", powersave_nap_ctl_table);
  97. return 0;
  98. }
  99. __initcall(register_powersave_nap_sysctl);
  100. #endif