ptp_dfl_tod.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * DFL device driver for Time-of-Day (ToD) private feature
  4. *
  5. * Copyright (C) 2023 Intel Corporation
  6. */
  7. #include <linux/bitfield.h>
  8. #include <linux/delay.h>
  9. #include <linux/dfl.h>
  10. #include <linux/gcd.h>
  11. #include <linux/iopoll.h>
  12. #include <linux/module.h>
  13. #include <linux/ptp_clock_kernel.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/units.h>
  16. #define FME_FEATURE_ID_TOD 0x22
  17. /* ToD clock register space. */
  18. #define TOD_CLK_FREQ 0x038
  19. /*
  20. * The read sequence of ToD timestamp registers: TOD_NANOSEC, TOD_SECONDSL and
  21. * TOD_SECONDSH, because there is a hardware snapshot whenever the TOD_NANOSEC
  22. * register is read.
  23. *
  24. * The ToD IP requires writing registers in the reverse order to the read sequence.
  25. * The timestamp is corrected when the TOD_NANOSEC register is written, so the
  26. * sequence of write TOD registers: TOD_SECONDSH, TOD_SECONDSL and TOD_NANOSEC.
  27. */
  28. #define TOD_SECONDSH 0x100
  29. #define TOD_SECONDSL 0x104
  30. #define TOD_NANOSEC 0x108
  31. #define TOD_PERIOD 0x110
  32. #define TOD_ADJUST_PERIOD 0x114
  33. #define TOD_ADJUST_COUNT 0x118
  34. #define TOD_DRIFT_ADJUST 0x11c
  35. #define TOD_DRIFT_ADJUST_RATE 0x120
  36. #define PERIOD_FRAC_OFFSET 16
  37. #define SECONDS_MSB GENMASK_ULL(47, 32)
  38. #define SECONDS_LSB GENMASK_ULL(31, 0)
  39. #define TOD_SECONDSH_SEC_MSB GENMASK_ULL(15, 0)
  40. #define CAL_SECONDS(m, l) ((FIELD_GET(TOD_SECONDSH_SEC_MSB, (m)) << 32) | (l))
  41. #define TOD_PERIOD_MASK GENMASK_ULL(19, 0)
  42. #define TOD_PERIOD_MAX FIELD_MAX(TOD_PERIOD_MASK)
  43. #define TOD_PERIOD_MIN 0
  44. #define TOD_DRIFT_ADJUST_MASK GENMASK_ULL(15, 0)
  45. #define TOD_DRIFT_ADJUST_FNS_MAX FIELD_MAX(TOD_DRIFT_ADJUST_MASK)
  46. #define TOD_DRIFT_ADJUST_RATE_MAX TOD_DRIFT_ADJUST_FNS_MAX
  47. #define TOD_ADJUST_COUNT_MASK GENMASK_ULL(19, 0)
  48. #define TOD_ADJUST_COUNT_MAX FIELD_MAX(TOD_ADJUST_COUNT_MASK)
  49. #define TOD_ADJUST_INTERVAL_US 10
  50. #define TOD_ADJUST_MS \
  51. (((TOD_PERIOD_MAX >> 16) + 1) * (TOD_ADJUST_COUNT_MAX + 1))
  52. #define TOD_ADJUST_MS_MAX (TOD_ADJUST_MS / MICRO)
  53. #define TOD_ADJUST_MAX_US (TOD_ADJUST_MS_MAX * USEC_PER_MSEC)
  54. #define TOD_MAX_ADJ (500 * MEGA)
  55. struct dfl_tod {
  56. struct ptp_clock_info ptp_clock_ops;
  57. struct device *dev;
  58. struct ptp_clock *ptp_clock;
  59. /* ToD Clock address space */
  60. void __iomem *tod_ctrl;
  61. /* ToD clock registers protection */
  62. spinlock_t tod_lock;
  63. };
  64. /*
  65. * A fine ToD HW clock offset adjustment. To perform the fine offset adjustment, the
  66. * adjust_period and adjust_count argument are used to update the TOD_ADJUST_PERIOD
  67. * and TOD_ADJUST_COUNT register for in hardware. The dt->tod_lock spinlock must be
  68. * held when calling this function.
  69. */
  70. static int fine_adjust_tod_clock(struct dfl_tod *dt, u32 adjust_period,
  71. u32 adjust_count)
  72. {
  73. void __iomem *base = dt->tod_ctrl;
  74. u32 val;
  75. writel(adjust_period, base + TOD_ADJUST_PERIOD);
  76. writel(adjust_count, base + TOD_ADJUST_COUNT);
  77. /* Wait for present offset adjustment update to complete */
  78. return readl_poll_timeout_atomic(base + TOD_ADJUST_COUNT, val, !val, TOD_ADJUST_INTERVAL_US,
  79. TOD_ADJUST_MAX_US);
  80. }
  81. /*
  82. * A coarse ToD HW clock offset adjustment. The coarse time adjustment performs by
  83. * adding or subtracting the delta value from the current ToD HW clock time.
  84. */
  85. static int coarse_adjust_tod_clock(struct dfl_tod *dt, s64 delta)
  86. {
  87. u32 seconds_msb, seconds_lsb, nanosec;
  88. void __iomem *base = dt->tod_ctrl;
  89. u64 seconds, now;
  90. if (delta == 0)
  91. return 0;
  92. nanosec = readl(base + TOD_NANOSEC);
  93. seconds_lsb = readl(base + TOD_SECONDSL);
  94. seconds_msb = readl(base + TOD_SECONDSH);
  95. /* Calculate new time */
  96. seconds = CAL_SECONDS(seconds_msb, seconds_lsb);
  97. now = seconds * NSEC_PER_SEC + nanosec + delta;
  98. seconds = div_u64_rem(now, NSEC_PER_SEC, &nanosec);
  99. seconds_msb = FIELD_GET(SECONDS_MSB, seconds);
  100. seconds_lsb = FIELD_GET(SECONDS_LSB, seconds);
  101. writel(seconds_msb, base + TOD_SECONDSH);
  102. writel(seconds_lsb, base + TOD_SECONDSL);
  103. writel(nanosec, base + TOD_NANOSEC);
  104. return 0;
  105. }
  106. static int dfl_tod_adjust_fine(struct ptp_clock_info *ptp, long scaled_ppm)
  107. {
  108. struct dfl_tod *dt = container_of(ptp, struct dfl_tod, ptp_clock_ops);
  109. u32 tod_period, tod_rem, tod_drift_adjust_fns, tod_drift_adjust_rate;
  110. void __iomem *base = dt->tod_ctrl;
  111. unsigned long flags, rate;
  112. u64 ppb;
  113. /* Get the clock rate from clock frequency register offset */
  114. rate = readl(base + TOD_CLK_FREQ);
  115. /* add GIGA as nominal ppb */
  116. ppb = scaled_ppm_to_ppb(scaled_ppm) + GIGA;
  117. tod_period = div_u64_rem(ppb << PERIOD_FRAC_OFFSET, rate, &tod_rem);
  118. if (tod_period > TOD_PERIOD_MAX)
  119. return -ERANGE;
  120. /*
  121. * The drift of ToD adjusted periodically by adding a drift_adjust_fns
  122. * correction value every drift_adjust_rate count of clock cycles.
  123. */
  124. tod_drift_adjust_fns = tod_rem / gcd(tod_rem, rate);
  125. tod_drift_adjust_rate = rate / gcd(tod_rem, rate);
  126. while ((tod_drift_adjust_fns > TOD_DRIFT_ADJUST_FNS_MAX) ||
  127. (tod_drift_adjust_rate > TOD_DRIFT_ADJUST_RATE_MAX)) {
  128. tod_drift_adjust_fns >>= 1;
  129. tod_drift_adjust_rate >>= 1;
  130. }
  131. if (tod_drift_adjust_fns == 0)
  132. tod_drift_adjust_rate = 0;
  133. spin_lock_irqsave(&dt->tod_lock, flags);
  134. writel(tod_period, base + TOD_PERIOD);
  135. writel(0, base + TOD_ADJUST_PERIOD);
  136. writel(0, base + TOD_ADJUST_COUNT);
  137. writel(tod_drift_adjust_fns, base + TOD_DRIFT_ADJUST);
  138. writel(tod_drift_adjust_rate, base + TOD_DRIFT_ADJUST_RATE);
  139. spin_unlock_irqrestore(&dt->tod_lock, flags);
  140. return 0;
  141. }
  142. static int dfl_tod_adjust_time(struct ptp_clock_info *ptp, s64 delta)
  143. {
  144. struct dfl_tod *dt = container_of(ptp, struct dfl_tod, ptp_clock_ops);
  145. u32 period, diff, rem, rem_period, adj_period;
  146. void __iomem *base = dt->tod_ctrl;
  147. unsigned long flags;
  148. bool neg_adj;
  149. u64 count;
  150. int ret;
  151. neg_adj = delta < 0;
  152. if (neg_adj)
  153. delta = -delta;
  154. spin_lock_irqsave(&dt->tod_lock, flags);
  155. /*
  156. * Get the maximum possible value of the Period register offset
  157. * adjustment in nanoseconds scale. This depends on the current
  158. * Period register setting and the maximum and minimum possible
  159. * values of the Period register.
  160. */
  161. period = readl(base + TOD_PERIOD);
  162. if (neg_adj) {
  163. diff = (period - TOD_PERIOD_MIN) >> PERIOD_FRAC_OFFSET;
  164. adj_period = period - (diff << PERIOD_FRAC_OFFSET);
  165. count = div_u64_rem(delta, diff, &rem);
  166. rem_period = period - (rem << PERIOD_FRAC_OFFSET);
  167. } else {
  168. diff = (TOD_PERIOD_MAX - period) >> PERIOD_FRAC_OFFSET;
  169. adj_period = period + (diff << PERIOD_FRAC_OFFSET);
  170. count = div_u64_rem(delta, diff, &rem);
  171. rem_period = period + (rem << PERIOD_FRAC_OFFSET);
  172. }
  173. ret = 0;
  174. if (count > TOD_ADJUST_COUNT_MAX) {
  175. ret = coarse_adjust_tod_clock(dt, delta);
  176. } else {
  177. /* Adjust the period by count cycles to adjust the time */
  178. if (count)
  179. ret = fine_adjust_tod_clock(dt, adj_period, count);
  180. /* If there is a remainder, adjust the period for an additional cycle */
  181. if (rem)
  182. ret = fine_adjust_tod_clock(dt, rem_period, 1);
  183. }
  184. spin_unlock_irqrestore(&dt->tod_lock, flags);
  185. return ret;
  186. }
  187. static int dfl_tod_get_timex(struct ptp_clock_info *ptp, struct timespec64 *ts,
  188. struct ptp_system_timestamp *sts)
  189. {
  190. struct dfl_tod *dt = container_of(ptp, struct dfl_tod, ptp_clock_ops);
  191. u32 seconds_msb, seconds_lsb, nanosec;
  192. void __iomem *base = dt->tod_ctrl;
  193. unsigned long flags;
  194. u64 seconds;
  195. spin_lock_irqsave(&dt->tod_lock, flags);
  196. ptp_read_system_prets(sts);
  197. nanosec = readl(base + TOD_NANOSEC);
  198. seconds_lsb = readl(base + TOD_SECONDSL);
  199. seconds_msb = readl(base + TOD_SECONDSH);
  200. ptp_read_system_postts(sts);
  201. spin_unlock_irqrestore(&dt->tod_lock, flags);
  202. seconds = CAL_SECONDS(seconds_msb, seconds_lsb);
  203. ts->tv_nsec = nanosec;
  204. ts->tv_sec = seconds;
  205. return 0;
  206. }
  207. static int dfl_tod_set_time(struct ptp_clock_info *ptp,
  208. const struct timespec64 *ts)
  209. {
  210. struct dfl_tod *dt = container_of(ptp, struct dfl_tod, ptp_clock_ops);
  211. u32 seconds_msb = FIELD_GET(SECONDS_MSB, ts->tv_sec);
  212. u32 seconds_lsb = FIELD_GET(SECONDS_LSB, ts->tv_sec);
  213. u32 nanosec = FIELD_GET(SECONDS_LSB, ts->tv_nsec);
  214. void __iomem *base = dt->tod_ctrl;
  215. unsigned long flags;
  216. spin_lock_irqsave(&dt->tod_lock, flags);
  217. writel(seconds_msb, base + TOD_SECONDSH);
  218. writel(seconds_lsb, base + TOD_SECONDSL);
  219. writel(nanosec, base + TOD_NANOSEC);
  220. spin_unlock_irqrestore(&dt->tod_lock, flags);
  221. return 0;
  222. }
  223. static struct ptp_clock_info dfl_tod_clock_ops = {
  224. .owner = THIS_MODULE,
  225. .name = "dfl_tod",
  226. .max_adj = TOD_MAX_ADJ,
  227. .adjfine = dfl_tod_adjust_fine,
  228. .adjtime = dfl_tod_adjust_time,
  229. .gettimex64 = dfl_tod_get_timex,
  230. .settime64 = dfl_tod_set_time,
  231. };
  232. static int dfl_tod_probe(struct dfl_device *ddev)
  233. {
  234. struct device *dev = &ddev->dev;
  235. struct dfl_tod *dt;
  236. dt = devm_kzalloc(dev, sizeof(*dt), GFP_KERNEL);
  237. if (!dt)
  238. return -ENOMEM;
  239. dt->tod_ctrl = devm_ioremap_resource(dev, &ddev->mmio_res);
  240. if (IS_ERR(dt->tod_ctrl))
  241. return PTR_ERR(dt->tod_ctrl);
  242. dt->dev = dev;
  243. spin_lock_init(&dt->tod_lock);
  244. dev_set_drvdata(dev, dt);
  245. dt->ptp_clock_ops = dfl_tod_clock_ops;
  246. dt->ptp_clock = ptp_clock_register(&dt->ptp_clock_ops, dev);
  247. if (IS_ERR(dt->ptp_clock))
  248. return dev_err_probe(dt->dev, PTR_ERR(dt->ptp_clock),
  249. "Unable to register PTP clock\n");
  250. return 0;
  251. }
  252. static void dfl_tod_remove(struct dfl_device *ddev)
  253. {
  254. struct dfl_tod *dt = dev_get_drvdata(&ddev->dev);
  255. ptp_clock_unregister(dt->ptp_clock);
  256. }
  257. static const struct dfl_device_id dfl_tod_ids[] = {
  258. { FME_ID, FME_FEATURE_ID_TOD },
  259. { }
  260. };
  261. MODULE_DEVICE_TABLE(dfl, dfl_tod_ids);
  262. static struct dfl_driver dfl_tod_driver = {
  263. .drv = {
  264. .name = "dfl-tod",
  265. },
  266. .id_table = dfl_tod_ids,
  267. .probe = dfl_tod_probe,
  268. .remove = dfl_tod_remove,
  269. };
  270. module_dfl_driver(dfl_tod_driver);
  271. MODULE_DESCRIPTION("FPGA DFL ToD driver");
  272. MODULE_AUTHOR("Intel Corporation");
  273. MODULE_LICENSE("GPL");