csrc-r4k.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2007 by Ralf Baechle
  7. */
  8. #include <linux/clocksource.h>
  9. #include <linux/cpufreq.h>
  10. #include <linux/init.h>
  11. #include <linux/sched_clock.h>
  12. #include <asm/time.h>
  13. static u64 c0_hpt_read(struct clocksource *cs)
  14. {
  15. return read_c0_count();
  16. }
  17. static struct clocksource clocksource_mips = {
  18. .name = "MIPS",
  19. .read = c0_hpt_read,
  20. .mask = CLOCKSOURCE_MASK(32),
  21. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  22. };
  23. static u64 __maybe_unused notrace r4k_read_sched_clock(void)
  24. {
  25. return read_c0_count();
  26. }
  27. static inline unsigned int rdhwr_count(void)
  28. {
  29. unsigned int count;
  30. __asm__ __volatile__(
  31. " .set push\n"
  32. " .set mips32r2\n"
  33. " rdhwr %0, $2\n"
  34. " .set pop\n"
  35. : "=r" (count));
  36. return count;
  37. }
  38. static bool rdhwr_count_usable(void)
  39. {
  40. unsigned int prev, curr, i;
  41. /*
  42. * Older QEMUs have a broken implementation of RDHWR for the CP0 count
  43. * which always returns a constant value. Try to identify this and don't
  44. * use it in the VDSO if it is broken. This workaround can be removed
  45. * once the fix has been in QEMU stable for a reasonable amount of time.
  46. */
  47. for (i = 0, prev = rdhwr_count(); i < 100; i++) {
  48. curr = rdhwr_count();
  49. if (curr != prev)
  50. return true;
  51. prev = curr;
  52. }
  53. pr_warn("Not using R4K clocksource in VDSO due to broken RDHWR\n");
  54. return false;
  55. }
  56. static inline __init bool count_can_be_sched_clock(void)
  57. {
  58. if (IS_ENABLED(CONFIG_CPU_FREQ))
  59. return false;
  60. if (num_possible_cpus() > 1 &&
  61. !IS_ENABLED(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK))
  62. return false;
  63. return true;
  64. }
  65. #ifdef CONFIG_CPU_FREQ
  66. static bool __read_mostly r4k_clock_unstable;
  67. static void r4k_clocksource_unstable(char *reason)
  68. {
  69. if (r4k_clock_unstable)
  70. return;
  71. r4k_clock_unstable = true;
  72. pr_info("R4K timer is unstable due to %s\n", reason);
  73. clocksource_mark_unstable(&clocksource_mips);
  74. }
  75. static int r4k_cpufreq_callback(struct notifier_block *nb,
  76. unsigned long val, void *data)
  77. {
  78. if (val == CPUFREQ_POSTCHANGE)
  79. r4k_clocksource_unstable("CPU frequency change");
  80. return 0;
  81. }
  82. static struct notifier_block r4k_cpufreq_notifier = {
  83. .notifier_call = r4k_cpufreq_callback,
  84. };
  85. static int __init r4k_register_cpufreq_notifier(void)
  86. {
  87. return cpufreq_register_notifier(&r4k_cpufreq_notifier,
  88. CPUFREQ_TRANSITION_NOTIFIER);
  89. }
  90. core_initcall(r4k_register_cpufreq_notifier);
  91. #endif /* !CONFIG_CPU_FREQ */
  92. int __init init_r4k_clocksource(void)
  93. {
  94. if (!cpu_has_counter || !mips_hpt_frequency)
  95. return -ENXIO;
  96. /* Calculate a somewhat reasonable rating value */
  97. clocksource_mips.rating = 200;
  98. clocksource_mips.rating += clamp(mips_hpt_frequency / 10000000, 0, 99);
  99. /*
  100. * R2 onwards makes the count accessible to user mode so it can be used
  101. * by the VDSO (HWREna is configured by configure_hwrena()).
  102. */
  103. if (cpu_has_mips_r2_r6 && rdhwr_count_usable())
  104. clocksource_mips.vdso_clock_mode = VDSO_CLOCKMODE_R4K;
  105. clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
  106. if (count_can_be_sched_clock())
  107. sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
  108. return 0;
  109. }