timer-riscv.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2012 Regents of the University of California
  4. * Copyright (C) 2017 SiFive
  5. *
  6. * All RISC-V systems have a timer attached to every hart. These timers can
  7. * either be read from the "time" and "timeh" CSRs, and can use the SBI to
  8. * setup events, or directly accessed using MMIO registers.
  9. */
  10. #define pr_fmt(fmt) "riscv-timer: " fmt
  11. #include <linux/acpi.h>
  12. #include <linux/clocksource.h>
  13. #include <linux/clockchips.h>
  14. #include <linux/cpu.h>
  15. #include <linux/delay.h>
  16. #include <linux/irq.h>
  17. #include <linux/irqdomain.h>
  18. #include <linux/module.h>
  19. #include <linux/sched_clock.h>
  20. #include <linux/io-64-nonatomic-lo-hi.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/of_irq.h>
  23. #include <linux/limits.h>
  24. #include <clocksource/timer-riscv.h>
  25. #include <asm/smp.h>
  26. #include <asm/cpufeature.h>
  27. #include <asm/sbi.h>
  28. #include <asm/timex.h>
  29. static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
  30. static bool riscv_timer_cannot_wake_cpu;
  31. static void riscv_clock_event_stop(void)
  32. {
  33. if (static_branch_likely(&riscv_sstc_available)) {
  34. csr_write(CSR_STIMECMP, ULONG_MAX);
  35. if (IS_ENABLED(CONFIG_32BIT))
  36. csr_write(CSR_STIMECMPH, ULONG_MAX);
  37. } else {
  38. sbi_set_timer(U64_MAX);
  39. }
  40. }
  41. static int riscv_clock_next_event(unsigned long delta,
  42. struct clock_event_device *ce)
  43. {
  44. u64 next_tval = get_cycles64() + delta;
  45. if (static_branch_likely(&riscv_sstc_available)) {
  46. #if defined(CONFIG_32BIT)
  47. csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
  48. csr_write(CSR_STIMECMPH, next_tval >> 32);
  49. #else
  50. csr_write(CSR_STIMECMP, next_tval);
  51. #endif
  52. } else
  53. sbi_set_timer(next_tval);
  54. return 0;
  55. }
  56. static int riscv_clock_shutdown(struct clock_event_device *evt)
  57. {
  58. riscv_clock_event_stop();
  59. return 0;
  60. }
  61. static unsigned int riscv_clock_event_irq;
  62. static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
  63. .name = "riscv_timer_clockevent",
  64. .features = CLOCK_EVT_FEAT_ONESHOT,
  65. .rating = 100,
  66. .set_next_event = riscv_clock_next_event,
  67. .set_state_shutdown = riscv_clock_shutdown,
  68. };
  69. /*
  70. * It is guaranteed that all the timers across all the harts are synchronized
  71. * within one tick of each other, so while this could technically go
  72. * backwards when hopping between CPUs, practically it won't happen.
  73. */
  74. static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
  75. {
  76. return get_cycles64();
  77. }
  78. static u64 notrace riscv_sched_clock(void)
  79. {
  80. return get_cycles64();
  81. }
  82. static struct clocksource riscv_clocksource = {
  83. .name = "riscv_clocksource",
  84. .rating = 400,
  85. .mask = CLOCKSOURCE_MASK(64),
  86. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  87. .read = riscv_clocksource_rdtime,
  88. #if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY)
  89. .vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER,
  90. #else
  91. .vdso_clock_mode = VDSO_CLOCKMODE_NONE,
  92. #endif
  93. };
  94. static int riscv_timer_starting_cpu(unsigned int cpu)
  95. {
  96. struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
  97. /* Clear timer interrupt */
  98. riscv_clock_event_stop();
  99. ce->cpumask = cpumask_of(cpu);
  100. ce->irq = riscv_clock_event_irq;
  101. if (riscv_timer_cannot_wake_cpu)
  102. ce->features |= CLOCK_EVT_FEAT_C3STOP;
  103. if (static_branch_likely(&riscv_sstc_available))
  104. ce->rating = 450;
  105. clockevents_config_and_register(ce, riscv_timebase, 100, ULONG_MAX);
  106. enable_percpu_irq(riscv_clock_event_irq,
  107. irq_get_trigger_type(riscv_clock_event_irq));
  108. return 0;
  109. }
  110. static int riscv_timer_dying_cpu(unsigned int cpu)
  111. {
  112. /*
  113. * Stop the timer when the cpu is going to be offline otherwise
  114. * the timer interrupt may be pending while performing power-down.
  115. */
  116. riscv_clock_event_stop();
  117. disable_percpu_irq(riscv_clock_event_irq);
  118. return 0;
  119. }
  120. void riscv_cs_get_mult_shift(u32 *mult, u32 *shift)
  121. {
  122. *mult = riscv_clocksource.mult;
  123. *shift = riscv_clocksource.shift;
  124. }
  125. EXPORT_SYMBOL_GPL(riscv_cs_get_mult_shift);
  126. /* called directly from the low-level interrupt handler */
  127. static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
  128. {
  129. struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
  130. riscv_clock_event_stop();
  131. evdev->event_handler(evdev);
  132. return IRQ_HANDLED;
  133. }
  134. static int __init riscv_timer_init_common(void)
  135. {
  136. int error;
  137. struct irq_domain *domain;
  138. struct fwnode_handle *intc_fwnode = riscv_get_intc_hwnode();
  139. domain = irq_find_matching_fwnode(intc_fwnode, DOMAIN_BUS_ANY);
  140. if (!domain) {
  141. pr_err("Failed to find irq_domain for INTC node [%pfwP]\n",
  142. intc_fwnode);
  143. return -ENODEV;
  144. }
  145. riscv_clock_event_irq = irq_create_mapping(domain, RV_IRQ_TIMER);
  146. if (!riscv_clock_event_irq) {
  147. pr_err("Failed to map timer interrupt for node [%pfwP]\n", intc_fwnode);
  148. return -ENODEV;
  149. }
  150. error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
  151. if (error) {
  152. pr_err("RISCV timer registration failed [%d]\n", error);
  153. return error;
  154. }
  155. sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
  156. error = request_percpu_irq(riscv_clock_event_irq,
  157. riscv_timer_interrupt,
  158. "riscv-timer", &riscv_clock_event);
  159. if (error) {
  160. pr_err("registering percpu irq failed [%d]\n", error);
  161. return error;
  162. }
  163. if (riscv_isa_extension_available(NULL, SSTC)) {
  164. pr_info("Timer interrupt in S-mode is available via sstc extension\n");
  165. static_branch_enable(&riscv_sstc_available);
  166. }
  167. error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
  168. "clockevents/riscv/timer:starting",
  169. riscv_timer_starting_cpu, riscv_timer_dying_cpu);
  170. if (error)
  171. pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
  172. error);
  173. return error;
  174. }
  175. static int __init riscv_timer_init_dt(struct device_node *n)
  176. {
  177. int cpuid, error;
  178. unsigned long hartid;
  179. struct device_node *child;
  180. error = riscv_of_processor_hartid(n, &hartid);
  181. if (error < 0) {
  182. pr_warn("Invalid hartid for node [%pOF] error = [%lu]\n",
  183. n, hartid);
  184. return error;
  185. }
  186. cpuid = riscv_hartid_to_cpuid(hartid);
  187. if (cpuid < 0) {
  188. pr_warn("Invalid cpuid for hartid [%lu]\n", hartid);
  189. return cpuid;
  190. }
  191. if (cpuid != smp_processor_id())
  192. return 0;
  193. child = of_find_compatible_node(NULL, NULL, "riscv,timer");
  194. if (child) {
  195. riscv_timer_cannot_wake_cpu = of_property_read_bool(child,
  196. "riscv,timer-cannot-wake-cpu");
  197. of_node_put(child);
  198. }
  199. return riscv_timer_init_common();
  200. }
  201. TIMER_OF_DECLARE(riscv_timer, "riscv", riscv_timer_init_dt);
  202. #ifdef CONFIG_ACPI
  203. static int __init riscv_timer_acpi_init(struct acpi_table_header *table)
  204. {
  205. struct acpi_table_rhct *rhct = (struct acpi_table_rhct *)table;
  206. riscv_timer_cannot_wake_cpu = rhct->flags & ACPI_RHCT_TIMER_CANNOT_WAKEUP_CPU;
  207. return riscv_timer_init_common();
  208. }
  209. TIMER_ACPI_DECLARE(aclint_mtimer, ACPI_SIG_RHCT, riscv_timer_acpi_init);
  210. #endif