arch_timer.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * arch/arm64/include/asm/arch_timer.h
  4. *
  5. * Copyright (C) 2012 ARM Ltd.
  6. * Author: Marc Zyngier <marc.zyngier@arm.com>
  7. */
  8. #ifndef __ASM_ARCH_TIMER_H
  9. #define __ASM_ARCH_TIMER_H
  10. #include <asm/barrier.h>
  11. #include <asm/hwcap.h>
  12. #include <asm/sysreg.h>
  13. #include <linux/bug.h>
  14. #include <linux/init.h>
  15. #include <linux/jump_label.h>
  16. #include <linux/percpu.h>
  17. #include <linux/types.h>
  18. #include <clocksource/arm_arch_timer.h>
  19. #if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
  20. #define has_erratum_handler(h) \
  21. ({ \
  22. const struct arch_timer_erratum_workaround *__wa; \
  23. __wa = __this_cpu_read(timer_unstable_counter_workaround); \
  24. (__wa && __wa->h); \
  25. })
  26. #define erratum_handler(h) \
  27. ({ \
  28. const struct arch_timer_erratum_workaround *__wa; \
  29. __wa = __this_cpu_read(timer_unstable_counter_workaround); \
  30. (__wa && __wa->h) ? ({ isb(); __wa->h;}) : arch_timer_##h; \
  31. })
  32. #else
  33. #define has_erratum_handler(h) false
  34. #define erratum_handler(h) (arch_timer_##h)
  35. #endif
  36. enum arch_timer_erratum_match_type {
  37. ate_match_dt,
  38. ate_match_local_cap_id,
  39. ate_match_acpi_oem_info,
  40. };
  41. struct clock_event_device;
  42. struct arch_timer_erratum_workaround {
  43. enum arch_timer_erratum_match_type match_type;
  44. const void *id;
  45. const char *desc;
  46. u64 (*read_cntpct_el0)(void);
  47. u64 (*read_cntvct_el0)(void);
  48. int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
  49. int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
  50. bool disable_compat_vdso;
  51. };
  52. DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
  53. timer_unstable_counter_workaround);
  54. static inline notrace u64 arch_timer_read_cntpct_el0(void)
  55. {
  56. u64 cnt;
  57. asm volatile(ALTERNATIVE("isb\n mrs %0, cntpct_el0",
  58. "nop\n" __mrs_s("%0", SYS_CNTPCTSS_EL0),
  59. ARM64_HAS_ECV)
  60. : "=r" (cnt));
  61. return cnt;
  62. }
  63. static inline notrace u64 arch_timer_read_cntvct_el0(void)
  64. {
  65. u64 cnt;
  66. asm volatile(ALTERNATIVE("isb\n mrs %0, cntvct_el0",
  67. "nop\n" __mrs_s("%0", SYS_CNTVCTSS_EL0),
  68. ARM64_HAS_ECV)
  69. : "=r" (cnt));
  70. return cnt;
  71. }
  72. #define arch_timer_reg_read_stable(reg) \
  73. ({ \
  74. erratum_handler(read_ ## reg)(); \
  75. })
  76. /*
  77. * These register accessors are marked inline so the compiler can
  78. * nicely work out which register we want, and chuck away the rest of
  79. * the code.
  80. */
  81. static __always_inline
  82. void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u64 val)
  83. {
  84. if (access == ARCH_TIMER_PHYS_ACCESS) {
  85. switch (reg) {
  86. case ARCH_TIMER_REG_CTRL:
  87. write_sysreg(val, cntp_ctl_el0);
  88. isb();
  89. break;
  90. case ARCH_TIMER_REG_CVAL:
  91. write_sysreg(val, cntp_cval_el0);
  92. break;
  93. default:
  94. BUILD_BUG();
  95. }
  96. } else if (access == ARCH_TIMER_VIRT_ACCESS) {
  97. switch (reg) {
  98. case ARCH_TIMER_REG_CTRL:
  99. write_sysreg(val, cntv_ctl_el0);
  100. isb();
  101. break;
  102. case ARCH_TIMER_REG_CVAL:
  103. write_sysreg(val, cntv_cval_el0);
  104. break;
  105. default:
  106. BUILD_BUG();
  107. }
  108. } else {
  109. BUILD_BUG();
  110. }
  111. }
  112. static __always_inline
  113. u64 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
  114. {
  115. if (access == ARCH_TIMER_PHYS_ACCESS) {
  116. switch (reg) {
  117. case ARCH_TIMER_REG_CTRL:
  118. return read_sysreg(cntp_ctl_el0);
  119. default:
  120. BUILD_BUG();
  121. }
  122. } else if (access == ARCH_TIMER_VIRT_ACCESS) {
  123. switch (reg) {
  124. case ARCH_TIMER_REG_CTRL:
  125. return read_sysreg(cntv_ctl_el0);
  126. default:
  127. BUILD_BUG();
  128. }
  129. }
  130. BUILD_BUG();
  131. unreachable();
  132. }
  133. static inline u32 arch_timer_get_cntfrq(void)
  134. {
  135. return read_sysreg(cntfrq_el0);
  136. }
  137. static inline u32 arch_timer_get_cntkctl(void)
  138. {
  139. return read_sysreg(cntkctl_el1);
  140. }
  141. static inline void arch_timer_set_cntkctl(u32 cntkctl)
  142. {
  143. write_sysreg(cntkctl, cntkctl_el1);
  144. isb();
  145. }
  146. static __always_inline u64 __arch_counter_get_cntpct_stable(void)
  147. {
  148. u64 cnt;
  149. cnt = arch_timer_reg_read_stable(cntpct_el0);
  150. arch_counter_enforce_ordering(cnt);
  151. return cnt;
  152. }
  153. static __always_inline u64 __arch_counter_get_cntpct(void)
  154. {
  155. u64 cnt;
  156. asm volatile(ALTERNATIVE("isb\n mrs %0, cntpct_el0",
  157. "nop\n" __mrs_s("%0", SYS_CNTPCTSS_EL0),
  158. ARM64_HAS_ECV)
  159. : "=r" (cnt));
  160. arch_counter_enforce_ordering(cnt);
  161. return cnt;
  162. }
  163. static __always_inline u64 __arch_counter_get_cntvct_stable(void)
  164. {
  165. u64 cnt;
  166. cnt = arch_timer_reg_read_stable(cntvct_el0);
  167. arch_counter_enforce_ordering(cnt);
  168. return cnt;
  169. }
  170. static __always_inline u64 __arch_counter_get_cntvct(void)
  171. {
  172. u64 cnt;
  173. asm volatile(ALTERNATIVE("isb\n mrs %0, cntvct_el0",
  174. "nop\n" __mrs_s("%0", SYS_CNTVCTSS_EL0),
  175. ARM64_HAS_ECV)
  176. : "=r" (cnt));
  177. arch_counter_enforce_ordering(cnt);
  178. return cnt;
  179. }
  180. static inline int arch_timer_arch_init(void)
  181. {
  182. return 0;
  183. }
  184. static inline void arch_timer_set_evtstrm_feature(void)
  185. {
  186. cpu_set_named_feature(EVTSTRM);
  187. #ifdef CONFIG_COMPAT
  188. compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
  189. #endif
  190. }
  191. static inline bool arch_timer_have_evtstrm_feature(void)
  192. {
  193. return cpu_have_named_feature(EVTSTRM);
  194. }
  195. #endif