ptp_clock.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * PTP 1588 clock support
  4. *
  5. * Copyright (C) 2010 OMICRON electronics GmbH
  6. */
  7. #include <linux/device.h>
  8. #include <linux/err.h>
  9. #include <linux/init.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/posix-clock.h>
  13. #include <linux/pps_kernel.h>
  14. #include <linux/slab.h>
  15. #include <linux/syscalls.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/debugfs.h>
  18. #include <linux/xarray.h>
  19. #include <uapi/linux/sched/types.h>
  20. #include "ptp_private.h"
  21. #define PTP_MAX_ALARMS 4
  22. #define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
  23. #define PTP_PPS_EVENT PPS_CAPTUREASSERT
  24. #define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
  25. const struct class ptp_class = {
  26. .name = "ptp",
  27. .dev_groups = ptp_groups
  28. };
  29. /* private globals */
  30. static dev_t ptp_devt;
  31. static DEFINE_XARRAY_ALLOC(ptp_clocks_map);
  32. /* time stamp event queue operations */
  33. static inline int queue_free(struct timestamp_event_queue *q)
  34. {
  35. return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
  36. }
  37. static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
  38. struct ptp_clock_event *src)
  39. {
  40. struct ptp_extts_event *dst;
  41. struct timespec64 offset_ts;
  42. unsigned long flags;
  43. s64 seconds;
  44. u32 remainder;
  45. if (src->type == PTP_CLOCK_EXTTS) {
  46. seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
  47. } else if (src->type == PTP_CLOCK_EXTOFF) {
  48. offset_ts = ns_to_timespec64(src->offset);
  49. seconds = offset_ts.tv_sec;
  50. remainder = offset_ts.tv_nsec;
  51. } else {
  52. WARN(1, "%s: unknown type %d\n", __func__, src->type);
  53. return;
  54. }
  55. spin_lock_irqsave(&queue->lock, flags);
  56. dst = &queue->buf[queue->tail];
  57. dst->index = src->index;
  58. dst->flags = PTP_EXTTS_EVENT_VALID;
  59. dst->t.sec = seconds;
  60. dst->t.nsec = remainder;
  61. if (src->type == PTP_CLOCK_EXTOFF)
  62. dst->flags |= PTP_EXT_OFFSET;
  63. /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
  64. if (!queue_free(queue))
  65. WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
  66. WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
  67. spin_unlock_irqrestore(&queue->lock, flags);
  68. }
  69. /* posix clock implementation */
  70. static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
  71. {
  72. tp->tv_sec = 0;
  73. tp->tv_nsec = 1;
  74. return 0;
  75. }
  76. static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
  77. {
  78. struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
  79. if (ptp_clock_freerun(ptp)) {
  80. pr_err("ptp: physical clock is free running\n");
  81. return -EBUSY;
  82. }
  83. return ptp->info->settime64(ptp->info, tp);
  84. }
  85. static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
  86. {
  87. struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
  88. int err;
  89. if (ptp->info->gettimex64)
  90. err = ptp->info->gettimex64(ptp->info, tp, NULL);
  91. else
  92. err = ptp->info->gettime64(ptp->info, tp);
  93. return err;
  94. }
  95. static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
  96. {
  97. struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
  98. struct ptp_clock_info *ops;
  99. int err = -EOPNOTSUPP;
  100. if (ptp_clock_freerun(ptp)) {
  101. pr_err("ptp: physical clock is free running\n");
  102. return -EBUSY;
  103. }
  104. ops = ptp->info;
  105. if (tx->modes & ADJ_SETOFFSET) {
  106. struct timespec64 ts;
  107. ktime_t kt;
  108. s64 delta;
  109. ts.tv_sec = tx->time.tv_sec;
  110. ts.tv_nsec = tx->time.tv_usec;
  111. if (!(tx->modes & ADJ_NANO))
  112. ts.tv_nsec *= 1000;
  113. if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
  114. return -EINVAL;
  115. kt = timespec64_to_ktime(ts);
  116. delta = ktime_to_ns(kt);
  117. err = ops->adjtime(ops, delta);
  118. } else if (tx->modes & ADJ_FREQUENCY) {
  119. long ppb = scaled_ppm_to_ppb(tx->freq);
  120. if (ppb > ops->max_adj || ppb < -ops->max_adj)
  121. return -ERANGE;
  122. err = ops->adjfine(ops, tx->freq);
  123. if (!err)
  124. ptp->dialed_frequency = tx->freq;
  125. } else if (tx->modes & ADJ_OFFSET) {
  126. if (ops->adjphase) {
  127. s32 max_phase_adj = ops->getmaxphase(ops);
  128. s32 offset = tx->offset;
  129. if (!(tx->modes & ADJ_NANO))
  130. offset *= NSEC_PER_USEC;
  131. if (offset > max_phase_adj || offset < -max_phase_adj)
  132. return -ERANGE;
  133. err = ops->adjphase(ops, offset);
  134. }
  135. } else if (tx->modes == 0) {
  136. tx->freq = ptp->dialed_frequency;
  137. err = 0;
  138. }
  139. return err;
  140. }
  141. static struct posix_clock_operations ptp_clock_ops = {
  142. .owner = THIS_MODULE,
  143. .clock_adjtime = ptp_clock_adjtime,
  144. .clock_gettime = ptp_clock_gettime,
  145. .clock_getres = ptp_clock_getres,
  146. .clock_settime = ptp_clock_settime,
  147. .ioctl = ptp_ioctl,
  148. .open = ptp_open,
  149. .release = ptp_release,
  150. .poll = ptp_poll,
  151. .read = ptp_read,
  152. };
  153. static void ptp_clock_release(struct device *dev)
  154. {
  155. struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
  156. struct timestamp_event_queue *tsevq;
  157. unsigned long flags;
  158. ptp_cleanup_pin_groups(ptp);
  159. kfree(ptp->vclock_index);
  160. mutex_destroy(&ptp->pincfg_mux);
  161. mutex_destroy(&ptp->n_vclocks_mux);
  162. /* Delete first entry */
  163. spin_lock_irqsave(&ptp->tsevqs_lock, flags);
  164. tsevq = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue,
  165. qlist);
  166. list_del(&tsevq->qlist);
  167. spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
  168. bitmap_free(tsevq->mask);
  169. kfree(tsevq);
  170. debugfs_remove(ptp->debugfs_root);
  171. xa_erase(&ptp_clocks_map, ptp->index);
  172. kfree(ptp);
  173. }
  174. static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
  175. {
  176. if (info->getcyclesx64)
  177. return info->getcyclesx64(info, ts, NULL);
  178. else
  179. return info->gettime64(info, ts);
  180. }
  181. static int ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on)
  182. {
  183. return -EOPNOTSUPP;
  184. }
  185. static void ptp_aux_kworker(struct kthread_work *work)
  186. {
  187. struct ptp_clock *ptp = container_of(work, struct ptp_clock,
  188. aux_work.work);
  189. struct ptp_clock_info *info = ptp->info;
  190. long delay;
  191. delay = info->do_aux_work(info);
  192. if (delay >= 0)
  193. kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
  194. }
  195. /* public interface */
  196. struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
  197. struct device *parent)
  198. {
  199. struct ptp_clock *ptp;
  200. struct timestamp_event_queue *queue = NULL;
  201. int err, index, major = MAJOR(ptp_devt);
  202. char debugfsname[16];
  203. size_t size;
  204. if (info->n_alarm > PTP_MAX_ALARMS)
  205. return ERR_PTR(-EINVAL);
  206. /* Initialize a clock structure. */
  207. ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
  208. if (!ptp) {
  209. err = -ENOMEM;
  210. goto no_memory;
  211. }
  212. err = xa_alloc(&ptp_clocks_map, &index, ptp, xa_limit_31b,
  213. GFP_KERNEL);
  214. if (err)
  215. goto no_slot;
  216. ptp->clock.ops = ptp_clock_ops;
  217. ptp->info = info;
  218. ptp->devid = MKDEV(major, index);
  219. ptp->index = index;
  220. INIT_LIST_HEAD(&ptp->tsevqs);
  221. queue = kzalloc(sizeof(*queue), GFP_KERNEL);
  222. if (!queue) {
  223. err = -ENOMEM;
  224. goto no_memory_queue;
  225. }
  226. list_add_tail(&queue->qlist, &ptp->tsevqs);
  227. spin_lock_init(&ptp->tsevqs_lock);
  228. queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
  229. if (!queue->mask) {
  230. err = -ENOMEM;
  231. goto no_memory_bitmap;
  232. }
  233. bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
  234. spin_lock_init(&queue->lock);
  235. mutex_init(&ptp->pincfg_mux);
  236. mutex_init(&ptp->n_vclocks_mux);
  237. init_waitqueue_head(&ptp->tsev_wq);
  238. if (ptp->info->getcycles64 || ptp->info->getcyclesx64) {
  239. ptp->has_cycles = true;
  240. if (!ptp->info->getcycles64 && ptp->info->getcyclesx64)
  241. ptp->info->getcycles64 = ptp_getcycles64;
  242. } else {
  243. /* Free running cycle counter not supported, use time. */
  244. ptp->info->getcycles64 = ptp_getcycles64;
  245. if (ptp->info->gettimex64)
  246. ptp->info->getcyclesx64 = ptp->info->gettimex64;
  247. if (ptp->info->getcrosststamp)
  248. ptp->info->getcrosscycles = ptp->info->getcrosststamp;
  249. }
  250. if (!ptp->info->enable)
  251. ptp->info->enable = ptp_enable;
  252. if (ptp->info->do_aux_work) {
  253. kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
  254. ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
  255. if (IS_ERR(ptp->kworker)) {
  256. err = PTR_ERR(ptp->kworker);
  257. pr_err("failed to create ptp aux_worker %d\n", err);
  258. goto kworker_err;
  259. }
  260. }
  261. /* PTP virtual clock is being registered under physical clock */
  262. if (parent && parent->class && parent->class->name &&
  263. strcmp(parent->class->name, "ptp") == 0)
  264. ptp->is_virtual_clock = true;
  265. if (!ptp->is_virtual_clock) {
  266. ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
  267. size = sizeof(int) * ptp->max_vclocks;
  268. ptp->vclock_index = kzalloc(size, GFP_KERNEL);
  269. if (!ptp->vclock_index) {
  270. err = -ENOMEM;
  271. goto no_mem_for_vclocks;
  272. }
  273. }
  274. err = ptp_populate_pin_groups(ptp);
  275. if (err)
  276. goto no_pin_groups;
  277. /* Register a new PPS source. */
  278. if (info->pps) {
  279. struct pps_source_info pps;
  280. memset(&pps, 0, sizeof(pps));
  281. snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index);
  282. pps.mode = PTP_PPS_MODE;
  283. pps.owner = info->owner;
  284. ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
  285. if (IS_ERR(ptp->pps_source)) {
  286. err = PTR_ERR(ptp->pps_source);
  287. pr_err("failed to register pps source\n");
  288. goto no_pps;
  289. }
  290. ptp->pps_source->lookup_cookie = ptp;
  291. }
  292. /* Initialize a new device of our class in our clock structure. */
  293. device_initialize(&ptp->dev);
  294. ptp->dev.devt = ptp->devid;
  295. ptp->dev.class = &ptp_class;
  296. ptp->dev.parent = parent;
  297. ptp->dev.groups = ptp->pin_attr_groups;
  298. ptp->dev.release = ptp_clock_release;
  299. dev_set_drvdata(&ptp->dev, ptp);
  300. dev_set_name(&ptp->dev, "ptp%d", ptp->index);
  301. /* Create a posix clock and link it to the device. */
  302. err = posix_clock_register(&ptp->clock, &ptp->dev);
  303. if (err) {
  304. if (ptp->pps_source)
  305. pps_unregister_source(ptp->pps_source);
  306. if (ptp->kworker)
  307. kthread_destroy_worker(ptp->kworker);
  308. put_device(&ptp->dev);
  309. pr_err("failed to create posix clock\n");
  310. return ERR_PTR(err);
  311. }
  312. /* Debugfs initialization */
  313. snprintf(debugfsname, sizeof(debugfsname), "ptp%d", ptp->index);
  314. ptp->debugfs_root = debugfs_create_dir(debugfsname, NULL);
  315. return ptp;
  316. no_pps:
  317. ptp_cleanup_pin_groups(ptp);
  318. no_pin_groups:
  319. kfree(ptp->vclock_index);
  320. no_mem_for_vclocks:
  321. if (ptp->kworker)
  322. kthread_destroy_worker(ptp->kworker);
  323. kworker_err:
  324. mutex_destroy(&ptp->pincfg_mux);
  325. mutex_destroy(&ptp->n_vclocks_mux);
  326. bitmap_free(queue->mask);
  327. no_memory_bitmap:
  328. list_del(&queue->qlist);
  329. kfree(queue);
  330. no_memory_queue:
  331. xa_erase(&ptp_clocks_map, index);
  332. no_slot:
  333. kfree(ptp);
  334. no_memory:
  335. return ERR_PTR(err);
  336. }
  337. EXPORT_SYMBOL(ptp_clock_register);
  338. static int unregister_vclock(struct device *dev, void *data)
  339. {
  340. struct ptp_clock *ptp = dev_get_drvdata(dev);
  341. ptp_vclock_unregister(info_to_vclock(ptp->info));
  342. return 0;
  343. }
  344. int ptp_clock_unregister(struct ptp_clock *ptp)
  345. {
  346. if (ptp_vclock_in_use(ptp)) {
  347. device_for_each_child(&ptp->dev, NULL, unregister_vclock);
  348. }
  349. ptp->defunct = 1;
  350. wake_up_interruptible(&ptp->tsev_wq);
  351. if (ptp->kworker) {
  352. kthread_cancel_delayed_work_sync(&ptp->aux_work);
  353. kthread_destroy_worker(ptp->kworker);
  354. }
  355. /* Release the clock's resources. */
  356. if (ptp->pps_source)
  357. pps_unregister_source(ptp->pps_source);
  358. posix_clock_unregister(&ptp->clock);
  359. return 0;
  360. }
  361. EXPORT_SYMBOL(ptp_clock_unregister);
  362. void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
  363. {
  364. struct timestamp_event_queue *tsevq;
  365. struct pps_event_time evt;
  366. unsigned long flags;
  367. switch (event->type) {
  368. case PTP_CLOCK_ALARM:
  369. break;
  370. case PTP_CLOCK_EXTTS:
  371. case PTP_CLOCK_EXTOFF:
  372. /* Enqueue timestamp on selected queues */
  373. spin_lock_irqsave(&ptp->tsevqs_lock, flags);
  374. list_for_each_entry(tsevq, &ptp->tsevqs, qlist) {
  375. if (test_bit((unsigned int)event->index, tsevq->mask))
  376. enqueue_external_timestamp(tsevq, event);
  377. }
  378. spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
  379. wake_up_interruptible(&ptp->tsev_wq);
  380. break;
  381. case PTP_CLOCK_PPS:
  382. pps_get_ts(&evt);
  383. pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
  384. break;
  385. case PTP_CLOCK_PPSUSR:
  386. pps_event(ptp->pps_source, &event->pps_times,
  387. PTP_PPS_EVENT, NULL);
  388. break;
  389. }
  390. }
  391. EXPORT_SYMBOL(ptp_clock_event);
  392. int ptp_clock_index(struct ptp_clock *ptp)
  393. {
  394. return ptp->index;
  395. }
  396. EXPORT_SYMBOL(ptp_clock_index);
  397. int ptp_find_pin(struct ptp_clock *ptp,
  398. enum ptp_pin_function func, unsigned int chan)
  399. {
  400. struct ptp_pin_desc *pin = NULL;
  401. int i;
  402. for (i = 0; i < ptp->info->n_pins; i++) {
  403. if (ptp->info->pin_config[i].func == func &&
  404. ptp->info->pin_config[i].chan == chan) {
  405. pin = &ptp->info->pin_config[i];
  406. break;
  407. }
  408. }
  409. return pin ? i : -1;
  410. }
  411. EXPORT_SYMBOL(ptp_find_pin);
  412. int ptp_find_pin_unlocked(struct ptp_clock *ptp,
  413. enum ptp_pin_function func, unsigned int chan)
  414. {
  415. int result;
  416. mutex_lock(&ptp->pincfg_mux);
  417. result = ptp_find_pin(ptp, func, chan);
  418. mutex_unlock(&ptp->pincfg_mux);
  419. return result;
  420. }
  421. EXPORT_SYMBOL(ptp_find_pin_unlocked);
  422. int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
  423. {
  424. return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
  425. }
  426. EXPORT_SYMBOL(ptp_schedule_worker);
  427. void ptp_cancel_worker_sync(struct ptp_clock *ptp)
  428. {
  429. kthread_cancel_delayed_work_sync(&ptp->aux_work);
  430. }
  431. EXPORT_SYMBOL(ptp_cancel_worker_sync);
  432. /* module operations */
  433. static void __exit ptp_exit(void)
  434. {
  435. class_unregister(&ptp_class);
  436. unregister_chrdev_region(ptp_devt, MINORMASK + 1);
  437. xa_destroy(&ptp_clocks_map);
  438. }
  439. static int __init ptp_init(void)
  440. {
  441. int err;
  442. err = class_register(&ptp_class);
  443. if (err) {
  444. pr_err("ptp: failed to allocate class\n");
  445. return err;
  446. }
  447. err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
  448. if (err < 0) {
  449. pr_err("ptp: failed to allocate device region\n");
  450. goto no_region;
  451. }
  452. pr_info("PTP clock support registered\n");
  453. return 0;
  454. no_region:
  455. class_unregister(&ptp_class);
  456. return err;
  457. }
  458. subsys_initcall(ptp_init);
  459. module_exit(ptp_exit);
  460. MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
  461. MODULE_DESCRIPTION("PTP clocks support");
  462. MODULE_LICENSE("GPL");