sh_cmt.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * SuperH Timer Support - CMT
  4. *
  5. * Copyright (C) 2008 Magnus Damm
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/clockchips.h>
  9. #include <linux/clocksource.h>
  10. #include <linux/delay.h>
  11. #include <linux/err.h>
  12. #include <linux/init.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/iopoll.h>
  16. #include <linux/ioport.h>
  17. #include <linux/irq.h>
  18. #include <linux/module.h>
  19. #include <linux/of.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/pm_domain.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/sh_timer.h>
  24. #include <linux/slab.h>
  25. #include <linux/spinlock.h>
  26. #ifdef CONFIG_SUPERH
  27. #include <asm/platform_early.h>
  28. #endif
  29. struct sh_cmt_device;
  30. /*
  31. * The CMT comes in 5 different identified flavours, depending not only on the
  32. * SoC but also on the particular instance. The following table lists the main
  33. * characteristics of those flavours.
  34. *
  35. * 16B 32B 32B-F 48B R-Car Gen2
  36. * -----------------------------------------------------------------------------
  37. * Channels 2 1/4 1 6 2/8
  38. * Control Width 16 16 16 16 32
  39. * Counter Width 16 32 32 32/48 32/48
  40. * Shared Start/Stop Y Y Y Y N
  41. *
  42. * The r8a73a4 / R-Car Gen2 version has a per-channel start/stop register
  43. * located in the channel registers block. All other versions have a shared
  44. * start/stop register located in the global space.
  45. *
  46. * Channels are indexed from 0 to N-1 in the documentation. The channel index
  47. * infers the start/stop bit position in the control register and the channel
  48. * registers block address. Some CMT instances have a subset of channels
  49. * available, in which case the index in the documentation doesn't match the
  50. * "real" index as implemented in hardware. This is for instance the case with
  51. * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0
  52. * in the documentation but using start/stop bit 5 and having its registers
  53. * block at 0x60.
  54. *
  55. * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit
  56. * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable.
  57. */
  58. enum sh_cmt_model {
  59. SH_CMT_16BIT,
  60. SH_CMT_32BIT,
  61. SH_CMT_48BIT,
  62. SH_CMT0_RCAR_GEN2,
  63. SH_CMT1_RCAR_GEN2,
  64. };
  65. struct sh_cmt_info {
  66. enum sh_cmt_model model;
  67. unsigned int channels_mask;
  68. unsigned long width; /* 16 or 32 bit version of hardware block */
  69. u32 overflow_bit;
  70. u32 clear_bits;
  71. /* callbacks for CMSTR and CMCSR access */
  72. u32 (*read_control)(void __iomem *base, unsigned long offs);
  73. void (*write_control)(void __iomem *base, unsigned long offs,
  74. u32 value);
  75. /* callbacks for CMCNT and CMCOR access */
  76. u32 (*read_count)(void __iomem *base, unsigned long offs);
  77. void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
  78. };
  79. struct sh_cmt_channel {
  80. struct sh_cmt_device *cmt;
  81. unsigned int index; /* Index in the documentation */
  82. unsigned int hwidx; /* Real hardware index */
  83. void __iomem *iostart;
  84. void __iomem *ioctrl;
  85. unsigned int timer_bit;
  86. unsigned long flags;
  87. u32 match_value;
  88. u32 next_match_value;
  89. u32 max_match_value;
  90. raw_spinlock_t lock;
  91. struct clock_event_device ced;
  92. struct clocksource cs;
  93. u64 total_cycles;
  94. bool cs_enabled;
  95. };
  96. struct sh_cmt_device {
  97. struct platform_device *pdev;
  98. const struct sh_cmt_info *info;
  99. void __iomem *mapbase;
  100. struct clk *clk;
  101. unsigned long rate;
  102. unsigned int reg_delay;
  103. raw_spinlock_t lock; /* Protect the shared start/stop register */
  104. struct sh_cmt_channel *channels;
  105. unsigned int num_channels;
  106. unsigned int hw_channels;
  107. bool has_clockevent;
  108. bool has_clocksource;
  109. };
  110. #define SH_CMT16_CMCSR_CMF (1 << 7)
  111. #define SH_CMT16_CMCSR_CMIE (1 << 6)
  112. #define SH_CMT16_CMCSR_CKS8 (0 << 0)
  113. #define SH_CMT16_CMCSR_CKS32 (1 << 0)
  114. #define SH_CMT16_CMCSR_CKS128 (2 << 0)
  115. #define SH_CMT16_CMCSR_CKS512 (3 << 0)
  116. #define SH_CMT16_CMCSR_CKS_MASK (3 << 0)
  117. #define SH_CMT32_CMCSR_CMF (1 << 15)
  118. #define SH_CMT32_CMCSR_OVF (1 << 14)
  119. #define SH_CMT32_CMCSR_WRFLG (1 << 13)
  120. #define SH_CMT32_CMCSR_STTF (1 << 12)
  121. #define SH_CMT32_CMCSR_STPF (1 << 11)
  122. #define SH_CMT32_CMCSR_SSIE (1 << 10)
  123. #define SH_CMT32_CMCSR_CMS (1 << 9)
  124. #define SH_CMT32_CMCSR_CMM (1 << 8)
  125. #define SH_CMT32_CMCSR_CMTOUT_IE (1 << 7)
  126. #define SH_CMT32_CMCSR_CMR_NONE (0 << 4)
  127. #define SH_CMT32_CMCSR_CMR_DMA (1 << 4)
  128. #define SH_CMT32_CMCSR_CMR_IRQ (2 << 4)
  129. #define SH_CMT32_CMCSR_CMR_MASK (3 << 4)
  130. #define SH_CMT32_CMCSR_DBGIVD (1 << 3)
  131. #define SH_CMT32_CMCSR_CKS_RCLK8 (4 << 0)
  132. #define SH_CMT32_CMCSR_CKS_RCLK32 (5 << 0)
  133. #define SH_CMT32_CMCSR_CKS_RCLK128 (6 << 0)
  134. #define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
  135. #define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
  136. static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
  137. {
  138. return ioread16(base + (offs << 1));
  139. }
  140. static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
  141. {
  142. return ioread32(base + (offs << 2));
  143. }
  144. static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
  145. {
  146. iowrite16(value, base + (offs << 1));
  147. }
  148. static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
  149. {
  150. iowrite32(value, base + (offs << 2));
  151. }
  152. static const struct sh_cmt_info sh_cmt_info[] = {
  153. [SH_CMT_16BIT] = {
  154. .model = SH_CMT_16BIT,
  155. .width = 16,
  156. .overflow_bit = SH_CMT16_CMCSR_CMF,
  157. .clear_bits = ~SH_CMT16_CMCSR_CMF,
  158. .read_control = sh_cmt_read16,
  159. .write_control = sh_cmt_write16,
  160. .read_count = sh_cmt_read16,
  161. .write_count = sh_cmt_write16,
  162. },
  163. [SH_CMT_32BIT] = {
  164. .model = SH_CMT_32BIT,
  165. .width = 32,
  166. .overflow_bit = SH_CMT32_CMCSR_CMF,
  167. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  168. .read_control = sh_cmt_read16,
  169. .write_control = sh_cmt_write16,
  170. .read_count = sh_cmt_read32,
  171. .write_count = sh_cmt_write32,
  172. },
  173. [SH_CMT_48BIT] = {
  174. .model = SH_CMT_48BIT,
  175. .channels_mask = 0x3f,
  176. .width = 32,
  177. .overflow_bit = SH_CMT32_CMCSR_CMF,
  178. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  179. .read_control = sh_cmt_read32,
  180. .write_control = sh_cmt_write32,
  181. .read_count = sh_cmt_read32,
  182. .write_count = sh_cmt_write32,
  183. },
  184. [SH_CMT0_RCAR_GEN2] = {
  185. .model = SH_CMT0_RCAR_GEN2,
  186. .channels_mask = 0x60,
  187. .width = 32,
  188. .overflow_bit = SH_CMT32_CMCSR_CMF,
  189. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  190. .read_control = sh_cmt_read32,
  191. .write_control = sh_cmt_write32,
  192. .read_count = sh_cmt_read32,
  193. .write_count = sh_cmt_write32,
  194. },
  195. [SH_CMT1_RCAR_GEN2] = {
  196. .model = SH_CMT1_RCAR_GEN2,
  197. .channels_mask = 0xff,
  198. .width = 32,
  199. .overflow_bit = SH_CMT32_CMCSR_CMF,
  200. .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
  201. .read_control = sh_cmt_read32,
  202. .write_control = sh_cmt_write32,
  203. .read_count = sh_cmt_read32,
  204. .write_count = sh_cmt_write32,
  205. },
  206. };
  207. #define CMCSR 0 /* channel register */
  208. #define CMCNT 1 /* channel register */
  209. #define CMCOR 2 /* channel register */
  210. #define CMCLKE 0x1000 /* CLK Enable Register (R-Car Gen2) */
  211. static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
  212. {
  213. if (ch->iostart)
  214. return ch->cmt->info->read_control(ch->iostart, 0);
  215. else
  216. return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
  217. }
  218. static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
  219. {
  220. u32 old_value = sh_cmt_read_cmstr(ch);
  221. if (value != old_value) {
  222. if (ch->iostart) {
  223. ch->cmt->info->write_control(ch->iostart, 0, value);
  224. udelay(ch->cmt->reg_delay);
  225. } else {
  226. ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
  227. udelay(ch->cmt->reg_delay);
  228. }
  229. }
  230. }
  231. static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
  232. {
  233. return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
  234. }
  235. static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
  236. {
  237. u32 old_value = sh_cmt_read_cmcsr(ch);
  238. if (value != old_value) {
  239. ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
  240. udelay(ch->cmt->reg_delay);
  241. }
  242. }
  243. static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
  244. {
  245. return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
  246. }
  247. static inline int sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
  248. {
  249. /* Tests showed that we need to wait 3 clocks here */
  250. unsigned int cmcnt_delay = DIV_ROUND_UP(3 * ch->cmt->reg_delay, 2);
  251. u32 reg;
  252. if (ch->cmt->info->model > SH_CMT_16BIT) {
  253. int ret = read_poll_timeout_atomic(sh_cmt_read_cmcsr, reg,
  254. !(reg & SH_CMT32_CMCSR_WRFLG),
  255. 1, cmcnt_delay, false, ch);
  256. if (ret < 0)
  257. return ret;
  258. }
  259. ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
  260. udelay(cmcnt_delay);
  261. return 0;
  262. }
  263. static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
  264. {
  265. u32 old_value = ch->cmt->info->read_count(ch->ioctrl, CMCOR);
  266. if (value != old_value) {
  267. ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
  268. udelay(ch->cmt->reg_delay);
  269. }
  270. }
  271. static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
  272. {
  273. u32 v1, v2, v3;
  274. u32 o1, o2;
  275. o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
  276. /* Make sure the timer value is stable. Stolen from acpi_pm.c */
  277. do {
  278. o2 = o1;
  279. v1 = sh_cmt_read_cmcnt(ch);
  280. v2 = sh_cmt_read_cmcnt(ch);
  281. v3 = sh_cmt_read_cmcnt(ch);
  282. o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
  283. } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
  284. || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
  285. *has_wrapped = o1;
  286. return v2;
  287. }
  288. static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
  289. {
  290. unsigned long flags;
  291. u32 value;
  292. /* start stop register shared by multiple timer channels */
  293. raw_spin_lock_irqsave(&ch->cmt->lock, flags);
  294. value = sh_cmt_read_cmstr(ch);
  295. if (start)
  296. value |= 1 << ch->timer_bit;
  297. else
  298. value &= ~(1 << ch->timer_bit);
  299. sh_cmt_write_cmstr(ch, value);
  300. raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
  301. }
  302. static int sh_cmt_enable(struct sh_cmt_channel *ch)
  303. {
  304. int ret;
  305. dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
  306. /* enable clock */
  307. ret = clk_enable(ch->cmt->clk);
  308. if (ret) {
  309. dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
  310. ch->index);
  311. goto err0;
  312. }
  313. /* make sure channel is disabled */
  314. sh_cmt_start_stop_ch(ch, 0);
  315. /* configure channel, periodic mode and maximum timeout */
  316. if (ch->cmt->info->width == 16) {
  317. sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
  318. SH_CMT16_CMCSR_CKS512);
  319. } else {
  320. u32 cmtout = ch->cmt->info->model <= SH_CMT_48BIT ?
  321. SH_CMT32_CMCSR_CMTOUT_IE : 0;
  322. sh_cmt_write_cmcsr(ch, cmtout | SH_CMT32_CMCSR_CMM |
  323. SH_CMT32_CMCSR_CMR_IRQ |
  324. SH_CMT32_CMCSR_CKS_RCLK8);
  325. }
  326. sh_cmt_write_cmcor(ch, 0xffffffff);
  327. ret = sh_cmt_write_cmcnt(ch, 0);
  328. if (ret || sh_cmt_read_cmcnt(ch)) {
  329. dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
  330. ch->index);
  331. ret = -ETIMEDOUT;
  332. goto err1;
  333. }
  334. /* enable channel */
  335. sh_cmt_start_stop_ch(ch, 1);
  336. return 0;
  337. err1:
  338. /* stop clock */
  339. clk_disable(ch->cmt->clk);
  340. err0:
  341. return ret;
  342. }
  343. static void sh_cmt_disable(struct sh_cmt_channel *ch)
  344. {
  345. /* disable channel */
  346. sh_cmt_start_stop_ch(ch, 0);
  347. /* disable interrupts in CMT block */
  348. sh_cmt_write_cmcsr(ch, 0);
  349. /* stop clock */
  350. clk_disable(ch->cmt->clk);
  351. dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
  352. }
  353. /* private flags */
  354. #define FLAG_CLOCKEVENT (1 << 0)
  355. #define FLAG_CLOCKSOURCE (1 << 1)
  356. #define FLAG_REPROGRAM (1 << 2)
  357. #define FLAG_SKIPEVENT (1 << 3)
  358. #define FLAG_IRQCONTEXT (1 << 4)
  359. static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
  360. int absolute)
  361. {
  362. u32 value = ch->next_match_value;
  363. u32 new_match;
  364. u32 delay = 0;
  365. u32 now = 0;
  366. u32 has_wrapped;
  367. now = sh_cmt_get_counter(ch, &has_wrapped);
  368. ch->flags |= FLAG_REPROGRAM; /* force reprogram */
  369. if (has_wrapped) {
  370. /* we're competing with the interrupt handler.
  371. * -> let the interrupt handler reprogram the timer.
  372. * -> interrupt number two handles the event.
  373. */
  374. ch->flags |= FLAG_SKIPEVENT;
  375. return;
  376. }
  377. if (absolute)
  378. now = 0;
  379. do {
  380. /* reprogram the timer hardware,
  381. * but don't save the new match value yet.
  382. */
  383. new_match = now + value + delay;
  384. if (new_match > ch->max_match_value)
  385. new_match = ch->max_match_value;
  386. sh_cmt_write_cmcor(ch, new_match);
  387. now = sh_cmt_get_counter(ch, &has_wrapped);
  388. if (has_wrapped && (new_match > ch->match_value)) {
  389. /* we are changing to a greater match value,
  390. * so this wrap must be caused by the counter
  391. * matching the old value.
  392. * -> first interrupt reprograms the timer.
  393. * -> interrupt number two handles the event.
  394. */
  395. ch->flags |= FLAG_SKIPEVENT;
  396. break;
  397. }
  398. if (has_wrapped) {
  399. /* we are changing to a smaller match value,
  400. * so the wrap must be caused by the counter
  401. * matching the new value.
  402. * -> save programmed match value.
  403. * -> let isr handle the event.
  404. */
  405. ch->match_value = new_match;
  406. break;
  407. }
  408. /* be safe: verify hardware settings */
  409. if (now < new_match) {
  410. /* timer value is below match value, all good.
  411. * this makes sure we won't miss any match events.
  412. * -> save programmed match value.
  413. * -> let isr handle the event.
  414. */
  415. ch->match_value = new_match;
  416. break;
  417. }
  418. /* the counter has reached a value greater
  419. * than our new match value. and since the
  420. * has_wrapped flag isn't set we must have
  421. * programmed a too close event.
  422. * -> increase delay and retry.
  423. */
  424. if (delay)
  425. delay <<= 1;
  426. else
  427. delay = 1;
  428. if (!delay)
  429. dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
  430. ch->index);
  431. } while (delay);
  432. }
  433. static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
  434. {
  435. if (delta > ch->max_match_value)
  436. dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
  437. ch->index);
  438. ch->next_match_value = delta;
  439. sh_cmt_clock_event_program_verify(ch, 0);
  440. }
  441. static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
  442. {
  443. unsigned long flags;
  444. raw_spin_lock_irqsave(&ch->lock, flags);
  445. __sh_cmt_set_next(ch, delta);
  446. raw_spin_unlock_irqrestore(&ch->lock, flags);
  447. }
  448. static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
  449. {
  450. struct sh_cmt_channel *ch = dev_id;
  451. unsigned long flags;
  452. /* clear flags */
  453. sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
  454. ch->cmt->info->clear_bits);
  455. /* update clock source counter to begin with if enabled
  456. * the wrap flag should be cleared by the timer specific
  457. * isr before we end up here.
  458. */
  459. if (ch->flags & FLAG_CLOCKSOURCE)
  460. ch->total_cycles += ch->match_value + 1;
  461. if (!(ch->flags & FLAG_REPROGRAM))
  462. ch->next_match_value = ch->max_match_value;
  463. ch->flags |= FLAG_IRQCONTEXT;
  464. if (ch->flags & FLAG_CLOCKEVENT) {
  465. if (!(ch->flags & FLAG_SKIPEVENT)) {
  466. if (clockevent_state_oneshot(&ch->ced)) {
  467. ch->next_match_value = ch->max_match_value;
  468. ch->flags |= FLAG_REPROGRAM;
  469. }
  470. ch->ced.event_handler(&ch->ced);
  471. }
  472. }
  473. ch->flags &= ~FLAG_SKIPEVENT;
  474. raw_spin_lock_irqsave(&ch->lock, flags);
  475. if (ch->flags & FLAG_REPROGRAM) {
  476. ch->flags &= ~FLAG_REPROGRAM;
  477. sh_cmt_clock_event_program_verify(ch, 1);
  478. if (ch->flags & FLAG_CLOCKEVENT)
  479. if ((clockevent_state_shutdown(&ch->ced))
  480. || (ch->match_value == ch->next_match_value))
  481. ch->flags &= ~FLAG_REPROGRAM;
  482. }
  483. ch->flags &= ~FLAG_IRQCONTEXT;
  484. raw_spin_unlock_irqrestore(&ch->lock, flags);
  485. return IRQ_HANDLED;
  486. }
  487. static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
  488. {
  489. int ret = 0;
  490. unsigned long flags;
  491. if (flag & FLAG_CLOCKSOURCE)
  492. pm_runtime_get_sync(&ch->cmt->pdev->dev);
  493. raw_spin_lock_irqsave(&ch->lock, flags);
  494. if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
  495. if (flag & FLAG_CLOCKEVENT)
  496. pm_runtime_get_sync(&ch->cmt->pdev->dev);
  497. ret = sh_cmt_enable(ch);
  498. }
  499. if (ret)
  500. goto out;
  501. ch->flags |= flag;
  502. /* setup timeout if no clockevent */
  503. if (ch->cmt->num_channels == 1 &&
  504. flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
  505. __sh_cmt_set_next(ch, ch->max_match_value);
  506. out:
  507. raw_spin_unlock_irqrestore(&ch->lock, flags);
  508. return ret;
  509. }
  510. static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
  511. {
  512. unsigned long flags;
  513. unsigned long f;
  514. raw_spin_lock_irqsave(&ch->lock, flags);
  515. f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
  516. ch->flags &= ~flag;
  517. if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
  518. sh_cmt_disable(ch);
  519. if (flag & FLAG_CLOCKEVENT)
  520. pm_runtime_put(&ch->cmt->pdev->dev);
  521. }
  522. /* adjust the timeout to maximum if only clocksource left */
  523. if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
  524. __sh_cmt_set_next(ch, ch->max_match_value);
  525. raw_spin_unlock_irqrestore(&ch->lock, flags);
  526. if (flag & FLAG_CLOCKSOURCE)
  527. pm_runtime_put(&ch->cmt->pdev->dev);
  528. }
  529. static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
  530. {
  531. return container_of(cs, struct sh_cmt_channel, cs);
  532. }
  533. static u64 sh_cmt_clocksource_read(struct clocksource *cs)
  534. {
  535. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  536. u32 has_wrapped;
  537. if (ch->cmt->num_channels == 1) {
  538. unsigned long flags;
  539. u64 value;
  540. u32 raw;
  541. raw_spin_lock_irqsave(&ch->lock, flags);
  542. value = ch->total_cycles;
  543. raw = sh_cmt_get_counter(ch, &has_wrapped);
  544. if (unlikely(has_wrapped))
  545. raw += ch->match_value + 1;
  546. raw_spin_unlock_irqrestore(&ch->lock, flags);
  547. return value + raw;
  548. }
  549. return sh_cmt_get_counter(ch, &has_wrapped);
  550. }
  551. static int sh_cmt_clocksource_enable(struct clocksource *cs)
  552. {
  553. int ret;
  554. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  555. WARN_ON(ch->cs_enabled);
  556. ch->total_cycles = 0;
  557. ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
  558. if (!ret)
  559. ch->cs_enabled = true;
  560. return ret;
  561. }
  562. static void sh_cmt_clocksource_disable(struct clocksource *cs)
  563. {
  564. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  565. WARN_ON(!ch->cs_enabled);
  566. sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
  567. ch->cs_enabled = false;
  568. }
  569. static void sh_cmt_clocksource_suspend(struct clocksource *cs)
  570. {
  571. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  572. if (!ch->cs_enabled)
  573. return;
  574. sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
  575. dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
  576. }
  577. static void sh_cmt_clocksource_resume(struct clocksource *cs)
  578. {
  579. struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
  580. if (!ch->cs_enabled)
  581. return;
  582. dev_pm_genpd_resume(&ch->cmt->pdev->dev);
  583. sh_cmt_start(ch, FLAG_CLOCKSOURCE);
  584. }
  585. static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
  586. const char *name)
  587. {
  588. struct clocksource *cs = &ch->cs;
  589. cs->name = name;
  590. cs->rating = 125;
  591. cs->read = sh_cmt_clocksource_read;
  592. cs->enable = sh_cmt_clocksource_enable;
  593. cs->disable = sh_cmt_clocksource_disable;
  594. cs->suspend = sh_cmt_clocksource_suspend;
  595. cs->resume = sh_cmt_clocksource_resume;
  596. cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
  597. cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
  598. dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
  599. ch->index);
  600. clocksource_register_hz(cs, ch->cmt->rate);
  601. return 0;
  602. }
  603. static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
  604. {
  605. return container_of(ced, struct sh_cmt_channel, ced);
  606. }
  607. static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
  608. {
  609. sh_cmt_start(ch, FLAG_CLOCKEVENT);
  610. if (periodic)
  611. sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
  612. else
  613. sh_cmt_set_next(ch, ch->max_match_value);
  614. }
  615. static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
  616. {
  617. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  618. sh_cmt_stop(ch, FLAG_CLOCKEVENT);
  619. return 0;
  620. }
  621. static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
  622. int periodic)
  623. {
  624. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  625. /* deal with old setting first */
  626. if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
  627. sh_cmt_stop(ch, FLAG_CLOCKEVENT);
  628. dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
  629. ch->index, periodic ? "periodic" : "oneshot");
  630. sh_cmt_clock_event_start(ch, periodic);
  631. return 0;
  632. }
  633. static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced)
  634. {
  635. return sh_cmt_clock_event_set_state(ced, 0);
  636. }
  637. static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced)
  638. {
  639. return sh_cmt_clock_event_set_state(ced, 1);
  640. }
  641. static int sh_cmt_clock_event_next(unsigned long delta,
  642. struct clock_event_device *ced)
  643. {
  644. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  645. unsigned long flags;
  646. BUG_ON(!clockevent_state_oneshot(ced));
  647. raw_spin_lock_irqsave(&ch->lock, flags);
  648. if (likely(ch->flags & FLAG_IRQCONTEXT))
  649. ch->next_match_value = delta - 1;
  650. else
  651. __sh_cmt_set_next(ch, delta - 1);
  652. raw_spin_unlock_irqrestore(&ch->lock, flags);
  653. return 0;
  654. }
  655. static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
  656. {
  657. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  658. dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
  659. clk_unprepare(ch->cmt->clk);
  660. }
  661. static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
  662. {
  663. struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
  664. clk_prepare(ch->cmt->clk);
  665. dev_pm_genpd_resume(&ch->cmt->pdev->dev);
  666. }
  667. static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
  668. const char *name)
  669. {
  670. struct clock_event_device *ced = &ch->ced;
  671. int irq;
  672. int ret;
  673. irq = platform_get_irq(ch->cmt->pdev, ch->index);
  674. if (irq < 0)
  675. return irq;
  676. ret = request_irq(irq, sh_cmt_interrupt,
  677. IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
  678. dev_name(&ch->cmt->pdev->dev), ch);
  679. if (ret) {
  680. dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n",
  681. ch->index, irq);
  682. return ret;
  683. }
  684. ced->name = name;
  685. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  686. ced->features |= CLOCK_EVT_FEAT_ONESHOT;
  687. ced->rating = 125;
  688. ced->cpumask = cpu_possible_mask;
  689. ced->set_next_event = sh_cmt_clock_event_next;
  690. ced->set_state_shutdown = sh_cmt_clock_event_shutdown;
  691. ced->set_state_periodic = sh_cmt_clock_event_set_periodic;
  692. ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot;
  693. ced->suspend = sh_cmt_clock_event_suspend;
  694. ced->resume = sh_cmt_clock_event_resume;
  695. /* TODO: calculate good shift from rate and counter bit width */
  696. ced->shift = 32;
  697. ced->mult = div_sc(ch->cmt->rate, NSEC_PER_SEC, ced->shift);
  698. ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
  699. ced->max_delta_ticks = ch->max_match_value;
  700. ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
  701. ced->min_delta_ticks = 0x1f;
  702. dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
  703. ch->index);
  704. clockevents_register_device(ced);
  705. return 0;
  706. }
  707. static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
  708. bool clockevent, bool clocksource)
  709. {
  710. int ret;
  711. if (clockevent) {
  712. ch->cmt->has_clockevent = true;
  713. ret = sh_cmt_register_clockevent(ch, name);
  714. if (ret < 0)
  715. return ret;
  716. }
  717. if (clocksource) {
  718. ch->cmt->has_clocksource = true;
  719. sh_cmt_register_clocksource(ch, name);
  720. }
  721. return 0;
  722. }
  723. static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
  724. unsigned int hwidx, bool clockevent,
  725. bool clocksource, struct sh_cmt_device *cmt)
  726. {
  727. u32 value;
  728. int ret;
  729. /* Skip unused channels. */
  730. if (!clockevent && !clocksource)
  731. return 0;
  732. ch->cmt = cmt;
  733. ch->index = index;
  734. ch->hwidx = hwidx;
  735. ch->timer_bit = hwidx;
  736. /*
  737. * Compute the address of the channel control register block. For the
  738. * timers with a per-channel start/stop register, compute its address
  739. * as well.
  740. */
  741. switch (cmt->info->model) {
  742. case SH_CMT_16BIT:
  743. ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
  744. break;
  745. case SH_CMT_32BIT:
  746. case SH_CMT_48BIT:
  747. ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
  748. break;
  749. case SH_CMT0_RCAR_GEN2:
  750. case SH_CMT1_RCAR_GEN2:
  751. ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
  752. ch->ioctrl = ch->iostart + 0x10;
  753. ch->timer_bit = 0;
  754. /* Enable the clock supply to the channel */
  755. value = ioread32(cmt->mapbase + CMCLKE);
  756. value |= BIT(hwidx);
  757. iowrite32(value, cmt->mapbase + CMCLKE);
  758. break;
  759. }
  760. if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
  761. ch->max_match_value = ~0;
  762. else
  763. ch->max_match_value = (1 << cmt->info->width) - 1;
  764. ch->match_value = ch->max_match_value;
  765. raw_spin_lock_init(&ch->lock);
  766. ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
  767. clockevent, clocksource);
  768. if (ret) {
  769. dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
  770. ch->index);
  771. return ret;
  772. }
  773. ch->cs_enabled = false;
  774. return 0;
  775. }
  776. static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
  777. {
  778. struct resource *mem;
  779. mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
  780. if (!mem) {
  781. dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
  782. return -ENXIO;
  783. }
  784. cmt->mapbase = ioremap(mem->start, resource_size(mem));
  785. if (cmt->mapbase == NULL) {
  786. dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
  787. return -ENXIO;
  788. }
  789. return 0;
  790. }
  791. static const struct platform_device_id sh_cmt_id_table[] = {
  792. { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
  793. { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
  794. { }
  795. };
  796. MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
  797. static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
  798. {
  799. /* deprecated, preserved for backward compatibility */
  800. .compatible = "renesas,cmt-48",
  801. .data = &sh_cmt_info[SH_CMT_48BIT]
  802. },
  803. {
  804. /* deprecated, preserved for backward compatibility */
  805. .compatible = "renesas,cmt-48-gen2",
  806. .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
  807. },
  808. {
  809. .compatible = "renesas,r8a7740-cmt1",
  810. .data = &sh_cmt_info[SH_CMT_48BIT]
  811. },
  812. {
  813. .compatible = "renesas,sh73a0-cmt1",
  814. .data = &sh_cmt_info[SH_CMT_48BIT]
  815. },
  816. {
  817. .compatible = "renesas,rcar-gen2-cmt0",
  818. .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
  819. },
  820. {
  821. .compatible = "renesas,rcar-gen2-cmt1",
  822. .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
  823. },
  824. {
  825. .compatible = "renesas,rcar-gen3-cmt0",
  826. .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
  827. },
  828. {
  829. .compatible = "renesas,rcar-gen3-cmt1",
  830. .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
  831. },
  832. {
  833. .compatible = "renesas,rcar-gen4-cmt0",
  834. .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
  835. },
  836. {
  837. .compatible = "renesas,rcar-gen4-cmt1",
  838. .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
  839. },
  840. { }
  841. };
  842. MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
  843. static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
  844. {
  845. unsigned int mask, i;
  846. unsigned long rate;
  847. int ret;
  848. cmt->pdev = pdev;
  849. raw_spin_lock_init(&cmt->lock);
  850. if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
  851. cmt->info = of_device_get_match_data(&pdev->dev);
  852. cmt->hw_channels = cmt->info->channels_mask;
  853. } else if (pdev->dev.platform_data) {
  854. struct sh_timer_config *cfg = pdev->dev.platform_data;
  855. const struct platform_device_id *id = pdev->id_entry;
  856. cmt->info = (const struct sh_cmt_info *)id->driver_data;
  857. cmt->hw_channels = cfg->channels_mask;
  858. } else {
  859. dev_err(&cmt->pdev->dev, "missing platform data\n");
  860. return -ENXIO;
  861. }
  862. /* Get hold of clock. */
  863. cmt->clk = clk_get(&cmt->pdev->dev, "fck");
  864. if (IS_ERR(cmt->clk)) {
  865. dev_err(&cmt->pdev->dev, "cannot get clock\n");
  866. return PTR_ERR(cmt->clk);
  867. }
  868. ret = clk_prepare(cmt->clk);
  869. if (ret < 0)
  870. goto err_clk_put;
  871. /* Determine clock rate. */
  872. ret = clk_enable(cmt->clk);
  873. if (ret < 0)
  874. goto err_clk_unprepare;
  875. rate = clk_get_rate(cmt->clk);
  876. if (!rate) {
  877. ret = -EINVAL;
  878. goto err_clk_disable;
  879. }
  880. /* We shall wait 2 input clks after register writes */
  881. if (cmt->info->model >= SH_CMT_48BIT)
  882. cmt->reg_delay = DIV_ROUND_UP(2UL * USEC_PER_SEC, rate);
  883. cmt->rate = rate / (cmt->info->width == 16 ? 512 : 8);
  884. /* Map the memory resource(s). */
  885. ret = sh_cmt_map_memory(cmt);
  886. if (ret < 0)
  887. goto err_clk_disable;
  888. /* Allocate and setup the channels. */
  889. cmt->num_channels = hweight8(cmt->hw_channels);
  890. cmt->channels = kcalloc(cmt->num_channels, sizeof(*cmt->channels),
  891. GFP_KERNEL);
  892. if (cmt->channels == NULL) {
  893. ret = -ENOMEM;
  894. goto err_unmap;
  895. }
  896. /*
  897. * Use the first channel as a clock event device and the second channel
  898. * as a clock source. If only one channel is available use it for both.
  899. */
  900. for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) {
  901. unsigned int hwidx = ffs(mask) - 1;
  902. bool clocksource = i == 1 || cmt->num_channels == 1;
  903. bool clockevent = i == 0;
  904. ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
  905. clockevent, clocksource, cmt);
  906. if (ret < 0)
  907. goto err_unmap;
  908. mask &= ~(1 << hwidx);
  909. }
  910. clk_disable(cmt->clk);
  911. platform_set_drvdata(pdev, cmt);
  912. return 0;
  913. err_unmap:
  914. kfree(cmt->channels);
  915. iounmap(cmt->mapbase);
  916. err_clk_disable:
  917. clk_disable(cmt->clk);
  918. err_clk_unprepare:
  919. clk_unprepare(cmt->clk);
  920. err_clk_put:
  921. clk_put(cmt->clk);
  922. return ret;
  923. }
  924. static int sh_cmt_probe(struct platform_device *pdev)
  925. {
  926. struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
  927. int ret;
  928. if (!is_sh_early_platform_device(pdev)) {
  929. pm_runtime_set_active(&pdev->dev);
  930. pm_runtime_enable(&pdev->dev);
  931. }
  932. if (cmt) {
  933. dev_info(&pdev->dev, "kept as earlytimer\n");
  934. goto out;
  935. }
  936. cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
  937. if (cmt == NULL)
  938. return -ENOMEM;
  939. ret = sh_cmt_setup(cmt, pdev);
  940. if (ret) {
  941. kfree(cmt);
  942. pm_runtime_idle(&pdev->dev);
  943. return ret;
  944. }
  945. if (is_sh_early_platform_device(pdev))
  946. return 0;
  947. out:
  948. if (cmt->has_clockevent || cmt->has_clocksource)
  949. pm_runtime_irq_safe(&pdev->dev);
  950. else
  951. pm_runtime_idle(&pdev->dev);
  952. return 0;
  953. }
  954. static struct platform_driver sh_cmt_device_driver = {
  955. .probe = sh_cmt_probe,
  956. .driver = {
  957. .name = "sh_cmt",
  958. .of_match_table = of_match_ptr(sh_cmt_of_table),
  959. .suppress_bind_attrs = true,
  960. },
  961. .id_table = sh_cmt_id_table,
  962. };
  963. static int __init sh_cmt_init(void)
  964. {
  965. return platform_driver_register(&sh_cmt_device_driver);
  966. }
  967. static void __exit sh_cmt_exit(void)
  968. {
  969. platform_driver_unregister(&sh_cmt_device_driver);
  970. }
  971. #ifdef CONFIG_SUPERH
  972. sh_early_platform_init("earlytimer", &sh_cmt_device_driver);
  973. #endif
  974. subsys_initcall(sh_cmt_init);
  975. module_exit(sh_cmt_exit);
  976. MODULE_AUTHOR("Magnus Damm");
  977. MODULE_DESCRIPTION("SuperH CMT Timer Driver");