arc_timer.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /*
  2. * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
  3. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. /* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be
  10. * programmed to go from @count to @limit and optionally interrupt.
  11. * We've designated TIMER0 for clockevents and TIMER1 for clocksource
  12. *
  13. * ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP)
  14. * which are suitable for UP and SMP based clocksources respectively
  15. */
  16. #include <linux/interrupt.h>
  17. #include <linux/clk.h>
  18. #include <linux/clk-provider.h>
  19. #include <linux/clocksource.h>
  20. #include <linux/clockchips.h>
  21. #include <linux/cpu.h>
  22. #include <linux/of.h>
  23. #include <linux/of_irq.h>
  24. #include <linux/sched_clock.h>
  25. #include <soc/arc/timers.h>
  26. #include <soc/arc/mcip.h>
  27. static unsigned long arc_timer_freq;
  28. static int noinline arc_get_timer_clk(struct device_node *node)
  29. {
  30. struct clk *clk;
  31. int ret;
  32. clk = of_clk_get(node, 0);
  33. if (IS_ERR(clk)) {
  34. pr_err("timer missing clk\n");
  35. return PTR_ERR(clk);
  36. }
  37. ret = clk_prepare_enable(clk);
  38. if (ret) {
  39. pr_err("Couldn't enable parent clk\n");
  40. return ret;
  41. }
  42. arc_timer_freq = clk_get_rate(clk);
  43. return 0;
  44. }
  45. /********** Clock Source Device *********/
  46. #ifdef CONFIG_ARC_TIMERS_64BIT
  47. static u64 arc_read_gfrc(struct clocksource *cs)
  48. {
  49. unsigned long flags;
  50. u32 l, h;
  51. /*
  52. * From a programming model pov, there seems to be just one instance of
  53. * MCIP_CMD/MCIP_READBACK however micro-architecturally there's
  54. * an instance PER ARC CORE (not per cluster), and there are dedicated
  55. * hardware decode logic (per core) inside ARConnect to handle
  56. * simultaneous read/write accesses from cores via those two registers.
  57. * So several concurrent commands to ARConnect are OK if they are
  58. * trying to access two different sub-components (like GFRC,
  59. * inter-core interrupt, etc...). HW also supports simultaneously
  60. * accessing GFRC by multiple cores.
  61. * That's why it is safe to disable hard interrupts on the local CPU
  62. * before access to GFRC instead of taking global MCIP spinlock
  63. * defined in arch/arc/kernel/mcip.c
  64. */
  65. local_irq_save(flags);
  66. __mcip_cmd(CMD_GFRC_READ_LO, 0);
  67. l = read_aux_reg(ARC_REG_MCIP_READBACK);
  68. __mcip_cmd(CMD_GFRC_READ_HI, 0);
  69. h = read_aux_reg(ARC_REG_MCIP_READBACK);
  70. local_irq_restore(flags);
  71. return (((u64)h) << 32) | l;
  72. }
  73. static notrace u64 arc_gfrc_clock_read(void)
  74. {
  75. return arc_read_gfrc(NULL);
  76. }
  77. static struct clocksource arc_counter_gfrc = {
  78. .name = "ARConnect GFRC",
  79. .rating = 400,
  80. .read = arc_read_gfrc,
  81. .mask = CLOCKSOURCE_MASK(64),
  82. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  83. };
  84. static int __init arc_cs_setup_gfrc(struct device_node *node)
  85. {
  86. struct mcip_bcr mp;
  87. int ret;
  88. READ_BCR(ARC_REG_MCIP_BCR, mp);
  89. if (!mp.gfrc) {
  90. pr_warn("Global-64-bit-Ctr clocksource not detected\n");
  91. return -ENXIO;
  92. }
  93. ret = arc_get_timer_clk(node);
  94. if (ret)
  95. return ret;
  96. sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq);
  97. return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
  98. }
  99. TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
  100. #define AUX_RTC_CTRL 0x103
  101. #define AUX_RTC_LOW 0x104
  102. #define AUX_RTC_HIGH 0x105
  103. static u64 arc_read_rtc(struct clocksource *cs)
  104. {
  105. unsigned long status;
  106. u32 l, h;
  107. /*
  108. * hardware has an internal state machine which tracks readout of
  109. * low/high and updates the CTRL.status if
  110. * - interrupt/exception taken between the two reads
  111. * - high increments after low has been read
  112. */
  113. do {
  114. l = read_aux_reg(AUX_RTC_LOW);
  115. h = read_aux_reg(AUX_RTC_HIGH);
  116. status = read_aux_reg(AUX_RTC_CTRL);
  117. } while (!(status & _BITUL(31)));
  118. return (((u64)h) << 32) | l;
  119. }
  120. static notrace u64 arc_rtc_clock_read(void)
  121. {
  122. return arc_read_rtc(NULL);
  123. }
  124. static struct clocksource arc_counter_rtc = {
  125. .name = "ARCv2 RTC",
  126. .rating = 350,
  127. .read = arc_read_rtc,
  128. .mask = CLOCKSOURCE_MASK(64),
  129. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  130. };
  131. static int __init arc_cs_setup_rtc(struct device_node *node)
  132. {
  133. struct bcr_timer timer;
  134. int ret;
  135. READ_BCR(ARC_REG_TIMERS_BCR, timer);
  136. if (!timer.rtc) {
  137. pr_warn("Local-64-bit-Ctr clocksource not detected\n");
  138. return -ENXIO;
  139. }
  140. /* Local to CPU hence not usable in SMP */
  141. if (IS_ENABLED(CONFIG_SMP)) {
  142. pr_warn("Local-64-bit-Ctr not usable in SMP\n");
  143. return -EINVAL;
  144. }
  145. ret = arc_get_timer_clk(node);
  146. if (ret)
  147. return ret;
  148. write_aux_reg(AUX_RTC_CTRL, 1);
  149. sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq);
  150. return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
  151. }
  152. TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
  153. #endif
  154. /*
  155. * 32bit TIMER1 to keep counting monotonically and wraparound
  156. */
  157. static u64 arc_read_timer1(struct clocksource *cs)
  158. {
  159. return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
  160. }
  161. static notrace u64 arc_timer1_clock_read(void)
  162. {
  163. return arc_read_timer1(NULL);
  164. }
  165. static struct clocksource arc_counter_timer1 = {
  166. .name = "ARC Timer1",
  167. .rating = 300,
  168. .read = arc_read_timer1,
  169. .mask = CLOCKSOURCE_MASK(32),
  170. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  171. };
  172. static int __init arc_cs_setup_timer1(struct device_node *node)
  173. {
  174. int ret;
  175. /* Local to CPU hence not usable in SMP */
  176. if (IS_ENABLED(CONFIG_SMP))
  177. return -EINVAL;
  178. ret = arc_get_timer_clk(node);
  179. if (ret)
  180. return ret;
  181. write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX);
  182. write_aux_reg(ARC_REG_TIMER1_CNT, 0);
  183. write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
  184. sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
  185. return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
  186. }
  187. /********** Clock Event Device *********/
  188. static int arc_timer_irq;
  189. /*
  190. * Arm the timer to interrupt after @cycles
  191. * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
  192. */
  193. static void arc_timer_event_setup(unsigned int cycles)
  194. {
  195. write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
  196. write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
  197. write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
  198. }
  199. static int arc_clkevent_set_next_event(unsigned long delta,
  200. struct clock_event_device *dev)
  201. {
  202. arc_timer_event_setup(delta);
  203. return 0;
  204. }
  205. static int arc_clkevent_set_periodic(struct clock_event_device *dev)
  206. {
  207. /*
  208. * At X Hz, 1 sec = 1000ms -> X cycles;
  209. * 10ms -> X / 100 cycles
  210. */
  211. arc_timer_event_setup(arc_timer_freq / HZ);
  212. return 0;
  213. }
  214. static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
  215. .name = "ARC Timer0",
  216. .features = CLOCK_EVT_FEAT_ONESHOT |
  217. CLOCK_EVT_FEAT_PERIODIC,
  218. .rating = 300,
  219. .set_next_event = arc_clkevent_set_next_event,
  220. .set_state_periodic = arc_clkevent_set_periodic,
  221. };
  222. static irqreturn_t timer_irq_handler(int irq, void *dev_id)
  223. {
  224. /*
  225. * Note that generic IRQ core could have passed @evt for @dev_id if
  226. * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
  227. */
  228. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  229. int irq_reenable = clockevent_state_periodic(evt);
  230. /*
  231. * 1. ACK the interrupt
  232. * - For ARC700, any write to CTRL reg ACKs it, so just rewrite
  233. * Count when [N]ot [H]alted bit.
  234. * - For HS3x, it is a bit subtle. On taken count-down interrupt,
  235. * IP bit [3] is set, which needs to be cleared for ACK'ing.
  236. * The write below can only update the other two bits, hence
  237. * explicitly clears IP bit
  238. * 2. Re-arm interrupt if periodic by writing to IE bit [0]
  239. */
  240. write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
  241. evt->event_handler(evt);
  242. return IRQ_HANDLED;
  243. }
  244. static int arc_timer_starting_cpu(unsigned int cpu)
  245. {
  246. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  247. evt->cpumask = cpumask_of(smp_processor_id());
  248. clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX);
  249. enable_percpu_irq(arc_timer_irq, 0);
  250. return 0;
  251. }
  252. static int arc_timer_dying_cpu(unsigned int cpu)
  253. {
  254. disable_percpu_irq(arc_timer_irq);
  255. return 0;
  256. }
  257. /*
  258. * clockevent setup for boot CPU
  259. */
  260. static int __init arc_clockevent_setup(struct device_node *node)
  261. {
  262. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  263. int ret;
  264. arc_timer_irq = irq_of_parse_and_map(node, 0);
  265. if (arc_timer_irq <= 0) {
  266. pr_err("clockevent: missing irq\n");
  267. return -EINVAL;
  268. }
  269. ret = arc_get_timer_clk(node);
  270. if (ret) {
  271. pr_err("clockevent: missing clk\n");
  272. return ret;
  273. }
  274. /* Needs apriori irq_set_percpu_devid() done in intc map function */
  275. ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
  276. "Timer0 (per-cpu-tick)", evt);
  277. if (ret) {
  278. pr_err("clockevent: unable to request irq\n");
  279. return ret;
  280. }
  281. ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
  282. "clockevents/arc/timer:starting",
  283. arc_timer_starting_cpu,
  284. arc_timer_dying_cpu);
  285. if (ret) {
  286. pr_err("Failed to setup hotplug state\n");
  287. return ret;
  288. }
  289. return 0;
  290. }
  291. static int __init arc_of_timer_init(struct device_node *np)
  292. {
  293. static int init_count = 0;
  294. int ret;
  295. if (!init_count) {
  296. init_count = 1;
  297. ret = arc_clockevent_setup(np);
  298. } else {
  299. ret = arc_cs_setup_timer1(np);
  300. }
  301. return ret;
  302. }
  303. TIMER_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);