ptp_clock.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * PTP 1588 clock support
  4. *
  5. * Copyright (C) 2010 OMICRON electronics GmbH
  6. */
  7. #include <linux/device.h>
  8. #include <linux/err.h>
  9. #include <linux/init.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/posix-clock.h>
  13. #include <linux/pps_kernel.h>
  14. #include <linux/slab.h>
  15. #include <linux/syscalls.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/debugfs.h>
  18. #include <linux/xarray.h>
  19. #include <uapi/linux/sched/types.h>
  20. #include "ptp_private.h"
  21. #define PTP_MAX_ALARMS 4
  22. #define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
  23. #define PTP_PPS_EVENT PPS_CAPTUREASSERT
  24. #define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
  25. const struct class ptp_class = {
  26. .name = "ptp",
  27. .dev_groups = ptp_groups
  28. };
  29. /* private globals */
  30. static dev_t ptp_devt;
  31. static DEFINE_XARRAY_ALLOC(ptp_clocks_map);
  32. /* time stamp event queue operations */
  33. static inline int queue_free(struct timestamp_event_queue *q)
  34. {
  35. return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
  36. }
  37. static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
  38. struct ptp_clock_event *src)
  39. {
  40. struct ptp_extts_event *dst;
  41. struct timespec64 offset_ts;
  42. unsigned long flags;
  43. s64 seconds;
  44. u32 remainder;
  45. if (src->type == PTP_CLOCK_EXTTS) {
  46. seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
  47. } else if (src->type == PTP_CLOCK_EXTOFF) {
  48. offset_ts = ns_to_timespec64(src->offset);
  49. seconds = offset_ts.tv_sec;
  50. remainder = offset_ts.tv_nsec;
  51. } else {
  52. WARN(1, "%s: unknown type %d\n", __func__, src->type);
  53. return;
  54. }
  55. spin_lock_irqsave(&queue->lock, flags);
  56. dst = &queue->buf[queue->tail];
  57. dst->index = src->index;
  58. dst->flags = PTP_EXTTS_EVENT_VALID;
  59. dst->t.sec = seconds;
  60. dst->t.nsec = remainder;
  61. if (src->type == PTP_CLOCK_EXTOFF)
  62. dst->flags |= PTP_EXT_OFFSET;
  63. /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
  64. if (!queue_free(queue))
  65. WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
  66. WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
  67. spin_unlock_irqrestore(&queue->lock, flags);
  68. }
  69. /* posix clock implementation */
  70. static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
  71. {
  72. tp->tv_sec = 0;
  73. tp->tv_nsec = 1;
  74. return 0;
  75. }
  76. static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
  77. {
  78. struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
  79. if (ptp_clock_freerun(ptp)) {
  80. pr_err_ratelimited("ptp: physical clock is free running\n");
  81. return -EBUSY;
  82. }
  83. return ptp->info->settime64(ptp->info, tp);
  84. }
  85. static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
  86. {
  87. struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
  88. int err;
  89. if (ptp->info->gettimex64)
  90. err = ptp->info->gettimex64(ptp->info, tp, NULL);
  91. else
  92. err = ptp->info->gettime64(ptp->info, tp);
  93. return err;
  94. }
  95. static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
  96. {
  97. struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
  98. struct ptp_clock_info *ops;
  99. int err = -EOPNOTSUPP;
  100. if (tx->modes & (ADJ_SETOFFSET | ADJ_FREQUENCY | ADJ_OFFSET) &&
  101. ptp_clock_freerun(ptp)) {
  102. pr_err("ptp: physical clock is free running\n");
  103. return -EBUSY;
  104. }
  105. ops = ptp->info;
  106. if (tx->modes & ADJ_SETOFFSET) {
  107. struct timespec64 ts;
  108. ktime_t kt;
  109. s64 delta;
  110. ts.tv_sec = tx->time.tv_sec;
  111. ts.tv_nsec = tx->time.tv_usec;
  112. if (!(tx->modes & ADJ_NANO))
  113. ts.tv_nsec *= 1000;
  114. if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
  115. return -EINVAL;
  116. kt = timespec64_to_ktime(ts);
  117. delta = ktime_to_ns(kt);
  118. err = ops->adjtime(ops, delta);
  119. } else if (tx->modes & ADJ_FREQUENCY) {
  120. long ppb = scaled_ppm_to_ppb(tx->freq);
  121. if (ppb > ops->max_adj || ppb < -ops->max_adj)
  122. return -ERANGE;
  123. err = ops->adjfine(ops, tx->freq);
  124. if (!err)
  125. ptp->dialed_frequency = tx->freq;
  126. } else if (tx->modes & ADJ_OFFSET) {
  127. if (ops->adjphase) {
  128. s32 max_phase_adj = ops->getmaxphase(ops);
  129. s32 offset = tx->offset;
  130. if (!(tx->modes & ADJ_NANO))
  131. offset *= NSEC_PER_USEC;
  132. if (offset > max_phase_adj || offset < -max_phase_adj)
  133. return -ERANGE;
  134. err = ops->adjphase(ops, offset);
  135. }
  136. } else if (tx->modes == 0) {
  137. tx->freq = ptp->dialed_frequency;
  138. err = 0;
  139. }
  140. return err;
  141. }
  142. static struct posix_clock_operations ptp_clock_ops = {
  143. .owner = THIS_MODULE,
  144. .clock_adjtime = ptp_clock_adjtime,
  145. .clock_gettime = ptp_clock_gettime,
  146. .clock_getres = ptp_clock_getres,
  147. .clock_settime = ptp_clock_settime,
  148. .ioctl = ptp_ioctl,
  149. .open = ptp_open,
  150. .release = ptp_release,
  151. .poll = ptp_poll,
  152. .read = ptp_read,
  153. };
  154. static void ptp_clock_release(struct device *dev)
  155. {
  156. struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
  157. struct timestamp_event_queue *tsevq;
  158. unsigned long flags;
  159. ptp_cleanup_pin_groups(ptp);
  160. kfree(ptp->vclock_index);
  161. mutex_destroy(&ptp->pincfg_mux);
  162. mutex_destroy(&ptp->n_vclocks_mux);
  163. /* Delete first entry */
  164. spin_lock_irqsave(&ptp->tsevqs_lock, flags);
  165. tsevq = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue,
  166. qlist);
  167. list_del(&tsevq->qlist);
  168. spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
  169. bitmap_free(tsevq->mask);
  170. kfree(tsevq);
  171. debugfs_remove(ptp->debugfs_root);
  172. xa_erase(&ptp_clocks_map, ptp->index);
  173. kfree(ptp);
  174. }
  175. static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
  176. {
  177. if (info->getcyclesx64)
  178. return info->getcyclesx64(info, ts, NULL);
  179. else
  180. return info->gettime64(info, ts);
  181. }
  182. static int ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on)
  183. {
  184. return -EOPNOTSUPP;
  185. }
  186. static void ptp_aux_kworker(struct kthread_work *work)
  187. {
  188. struct ptp_clock *ptp = container_of(work, struct ptp_clock,
  189. aux_work.work);
  190. struct ptp_clock_info *info = ptp->info;
  191. long delay;
  192. delay = info->do_aux_work(info);
  193. if (delay >= 0)
  194. kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
  195. }
  196. /* public interface */
  197. struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
  198. struct device *parent)
  199. {
  200. struct ptp_clock *ptp;
  201. struct timestamp_event_queue *queue = NULL;
  202. int err, index, major = MAJOR(ptp_devt);
  203. char debugfsname[16];
  204. size_t size;
  205. if (info->n_alarm > PTP_MAX_ALARMS)
  206. return ERR_PTR(-EINVAL);
  207. /* Initialize a clock structure. */
  208. ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
  209. if (!ptp) {
  210. err = -ENOMEM;
  211. goto no_memory;
  212. }
  213. err = xa_alloc(&ptp_clocks_map, &index, ptp, xa_limit_31b,
  214. GFP_KERNEL);
  215. if (err)
  216. goto no_slot;
  217. ptp->clock.ops = ptp_clock_ops;
  218. ptp->info = info;
  219. ptp->devid = MKDEV(major, index);
  220. ptp->index = index;
  221. INIT_LIST_HEAD(&ptp->tsevqs);
  222. queue = kzalloc(sizeof(*queue), GFP_KERNEL);
  223. if (!queue) {
  224. err = -ENOMEM;
  225. goto no_memory_queue;
  226. }
  227. list_add_tail(&queue->qlist, &ptp->tsevqs);
  228. spin_lock_init(&ptp->tsevqs_lock);
  229. queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
  230. if (!queue->mask) {
  231. err = -ENOMEM;
  232. goto no_memory_bitmap;
  233. }
  234. bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
  235. spin_lock_init(&queue->lock);
  236. mutex_init(&ptp->pincfg_mux);
  237. mutex_init(&ptp->n_vclocks_mux);
  238. init_waitqueue_head(&ptp->tsev_wq);
  239. if (ptp->info->getcycles64 || ptp->info->getcyclesx64) {
  240. ptp->has_cycles = true;
  241. if (!ptp->info->getcycles64 && ptp->info->getcyclesx64)
  242. ptp->info->getcycles64 = ptp_getcycles64;
  243. } else {
  244. /* Free running cycle counter not supported, use time. */
  245. ptp->info->getcycles64 = ptp_getcycles64;
  246. if (ptp->info->gettimex64)
  247. ptp->info->getcyclesx64 = ptp->info->gettimex64;
  248. if (ptp->info->getcrosststamp)
  249. ptp->info->getcrosscycles = ptp->info->getcrosststamp;
  250. }
  251. if (!ptp->info->enable)
  252. ptp->info->enable = ptp_enable;
  253. if (ptp->info->do_aux_work) {
  254. kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
  255. ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
  256. if (IS_ERR(ptp->kworker)) {
  257. err = PTR_ERR(ptp->kworker);
  258. pr_err("failed to create ptp aux_worker %d\n", err);
  259. goto kworker_err;
  260. }
  261. }
  262. /* PTP virtual clock is being registered under physical clock */
  263. if (parent && parent->class && parent->class->name &&
  264. strcmp(parent->class->name, "ptp") == 0)
  265. ptp->is_virtual_clock = true;
  266. if (!ptp->is_virtual_clock) {
  267. ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
  268. size = sizeof(int) * ptp->max_vclocks;
  269. ptp->vclock_index = kzalloc(size, GFP_KERNEL);
  270. if (!ptp->vclock_index) {
  271. err = -ENOMEM;
  272. goto no_mem_for_vclocks;
  273. }
  274. }
  275. err = ptp_populate_pin_groups(ptp);
  276. if (err)
  277. goto no_pin_groups;
  278. /* Register a new PPS source. */
  279. if (info->pps) {
  280. struct pps_source_info pps;
  281. memset(&pps, 0, sizeof(pps));
  282. snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index);
  283. pps.mode = PTP_PPS_MODE;
  284. pps.owner = info->owner;
  285. ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
  286. if (IS_ERR(ptp->pps_source)) {
  287. err = PTR_ERR(ptp->pps_source);
  288. pr_err("failed to register pps source\n");
  289. goto no_pps;
  290. }
  291. ptp->pps_source->lookup_cookie = ptp;
  292. }
  293. /* Initialize a new device of our class in our clock structure. */
  294. device_initialize(&ptp->dev);
  295. ptp->dev.devt = ptp->devid;
  296. ptp->dev.class = &ptp_class;
  297. ptp->dev.parent = parent;
  298. ptp->dev.groups = ptp->pin_attr_groups;
  299. ptp->dev.release = ptp_clock_release;
  300. dev_set_drvdata(&ptp->dev, ptp);
  301. dev_set_name(&ptp->dev, "ptp%d", ptp->index);
  302. /* Create a posix clock and link it to the device. */
  303. err = posix_clock_register(&ptp->clock, &ptp->dev);
  304. if (err) {
  305. if (ptp->pps_source)
  306. pps_unregister_source(ptp->pps_source);
  307. if (ptp->kworker)
  308. kthread_destroy_worker(ptp->kworker);
  309. put_device(&ptp->dev);
  310. pr_err("failed to create posix clock\n");
  311. return ERR_PTR(err);
  312. }
  313. /* Debugfs initialization */
  314. snprintf(debugfsname, sizeof(debugfsname), "ptp%d", ptp->index);
  315. ptp->debugfs_root = debugfs_create_dir(debugfsname, NULL);
  316. return ptp;
  317. no_pps:
  318. ptp_cleanup_pin_groups(ptp);
  319. no_pin_groups:
  320. kfree(ptp->vclock_index);
  321. no_mem_for_vclocks:
  322. if (ptp->kworker)
  323. kthread_destroy_worker(ptp->kworker);
  324. kworker_err:
  325. mutex_destroy(&ptp->pincfg_mux);
  326. mutex_destroy(&ptp->n_vclocks_mux);
  327. bitmap_free(queue->mask);
  328. no_memory_bitmap:
  329. list_del(&queue->qlist);
  330. kfree(queue);
  331. no_memory_queue:
  332. xa_erase(&ptp_clocks_map, index);
  333. no_slot:
  334. kfree(ptp);
  335. no_memory:
  336. return ERR_PTR(err);
  337. }
  338. EXPORT_SYMBOL(ptp_clock_register);
  339. static int unregister_vclock(struct device *dev, void *data)
  340. {
  341. struct ptp_clock *ptp = dev_get_drvdata(dev);
  342. ptp_vclock_unregister(info_to_vclock(ptp->info));
  343. return 0;
  344. }
  345. int ptp_clock_unregister(struct ptp_clock *ptp)
  346. {
  347. if (ptp_vclock_in_use(ptp)) {
  348. device_for_each_child(&ptp->dev, NULL, unregister_vclock);
  349. }
  350. ptp->defunct = 1;
  351. wake_up_interruptible(&ptp->tsev_wq);
  352. if (ptp->kworker) {
  353. kthread_cancel_delayed_work_sync(&ptp->aux_work);
  354. kthread_destroy_worker(ptp->kworker);
  355. }
  356. /* Release the clock's resources. */
  357. if (ptp->pps_source)
  358. pps_unregister_source(ptp->pps_source);
  359. posix_clock_unregister(&ptp->clock);
  360. return 0;
  361. }
  362. EXPORT_SYMBOL(ptp_clock_unregister);
  363. void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
  364. {
  365. struct timestamp_event_queue *tsevq;
  366. struct pps_event_time evt;
  367. unsigned long flags;
  368. switch (event->type) {
  369. case PTP_CLOCK_ALARM:
  370. break;
  371. case PTP_CLOCK_EXTTS:
  372. case PTP_CLOCK_EXTOFF:
  373. /* Enqueue timestamp on selected queues */
  374. spin_lock_irqsave(&ptp->tsevqs_lock, flags);
  375. list_for_each_entry(tsevq, &ptp->tsevqs, qlist) {
  376. if (test_bit((unsigned int)event->index, tsevq->mask))
  377. enqueue_external_timestamp(tsevq, event);
  378. }
  379. spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
  380. wake_up_interruptible(&ptp->tsev_wq);
  381. break;
  382. case PTP_CLOCK_PPS:
  383. pps_get_ts(&evt);
  384. pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
  385. break;
  386. case PTP_CLOCK_PPSUSR:
  387. pps_event(ptp->pps_source, &event->pps_times,
  388. PTP_PPS_EVENT, NULL);
  389. break;
  390. }
  391. }
  392. EXPORT_SYMBOL(ptp_clock_event);
  393. int ptp_clock_index(struct ptp_clock *ptp)
  394. {
  395. return ptp->index;
  396. }
  397. EXPORT_SYMBOL(ptp_clock_index);
  398. int ptp_find_pin(struct ptp_clock *ptp,
  399. enum ptp_pin_function func, unsigned int chan)
  400. {
  401. struct ptp_pin_desc *pin = NULL;
  402. int i;
  403. for (i = 0; i < ptp->info->n_pins; i++) {
  404. if (ptp->info->pin_config[i].func == func &&
  405. ptp->info->pin_config[i].chan == chan) {
  406. pin = &ptp->info->pin_config[i];
  407. break;
  408. }
  409. }
  410. return pin ? i : -1;
  411. }
  412. EXPORT_SYMBOL(ptp_find_pin);
  413. int ptp_find_pin_unlocked(struct ptp_clock *ptp,
  414. enum ptp_pin_function func, unsigned int chan)
  415. {
  416. int result;
  417. mutex_lock(&ptp->pincfg_mux);
  418. result = ptp_find_pin(ptp, func, chan);
  419. mutex_unlock(&ptp->pincfg_mux);
  420. return result;
  421. }
  422. EXPORT_SYMBOL(ptp_find_pin_unlocked);
  423. int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
  424. {
  425. return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
  426. }
  427. EXPORT_SYMBOL(ptp_schedule_worker);
  428. void ptp_cancel_worker_sync(struct ptp_clock *ptp)
  429. {
  430. kthread_cancel_delayed_work_sync(&ptp->aux_work);
  431. }
  432. EXPORT_SYMBOL(ptp_cancel_worker_sync);
  433. /* module operations */
  434. static void __exit ptp_exit(void)
  435. {
  436. class_unregister(&ptp_class);
  437. unregister_chrdev_region(ptp_devt, MINORMASK + 1);
  438. xa_destroy(&ptp_clocks_map);
  439. }
  440. static int __init ptp_init(void)
  441. {
  442. int err;
  443. err = class_register(&ptp_class);
  444. if (err) {
  445. pr_err("ptp: failed to allocate class\n");
  446. return err;
  447. }
  448. err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
  449. if (err < 0) {
  450. pr_err("ptp: failed to allocate device region\n");
  451. goto no_region;
  452. }
  453. pr_info("PTP clock support registered\n");
  454. return 0;
  455. no_region:
  456. class_unregister(&ptp_class);
  457. return err;
  458. }
  459. subsys_initcall(ptp_init);
  460. module_exit(ptp_exit);
  461. MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
  462. MODULE_DESCRIPTION("PTP clocks support");
  463. MODULE_LICENSE("GPL");