dpll_core.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * dpll_core.c - DPLL subsystem kernel-space interface implementation.
  4. *
  5. * Copyright (c) 2023 Meta Platforms, Inc. and affiliates
  6. * Copyright (c) 2023 Intel Corporation.
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/device.h>
  10. #include <linux/err.h>
  11. #include <linux/slab.h>
  12. #include <linux/string.h>
  13. #include "dpll_core.h"
  14. #include "dpll_netlink.h"
  15. /* Mutex lock to protect DPLL subsystem devices and pins */
  16. DEFINE_MUTEX(dpll_lock);
  17. DEFINE_XARRAY_FLAGS(dpll_device_xa, XA_FLAGS_ALLOC);
  18. DEFINE_XARRAY_FLAGS(dpll_pin_xa, XA_FLAGS_ALLOC);
  19. static u32 dpll_device_xa_id;
  20. static u32 dpll_pin_xa_id;
  21. #define ASSERT_DPLL_REGISTERED(d) \
  22. WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
  23. #define ASSERT_DPLL_NOT_REGISTERED(d) \
  24. WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
  25. #define ASSERT_DPLL_PIN_REGISTERED(p) \
  26. WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
  27. struct dpll_device_registration {
  28. struct list_head list;
  29. const struct dpll_device_ops *ops;
  30. void *priv;
  31. };
  32. struct dpll_pin_registration {
  33. struct list_head list;
  34. const struct dpll_pin_ops *ops;
  35. void *priv;
  36. void *cookie;
  37. };
  38. struct dpll_device *dpll_device_get_by_id(int id)
  39. {
  40. if (xa_get_mark(&dpll_device_xa, id, DPLL_REGISTERED))
  41. return xa_load(&dpll_device_xa, id);
  42. return NULL;
  43. }
  44. static struct dpll_pin_registration *
  45. dpll_pin_registration_find(struct dpll_pin_ref *ref,
  46. const struct dpll_pin_ops *ops, void *priv,
  47. void *cookie)
  48. {
  49. struct dpll_pin_registration *reg;
  50. list_for_each_entry(reg, &ref->registration_list, list) {
  51. if (reg->ops == ops && reg->priv == priv &&
  52. reg->cookie == cookie)
  53. return reg;
  54. }
  55. return NULL;
  56. }
  57. static int
  58. dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
  59. const struct dpll_pin_ops *ops, void *priv,
  60. void *cookie)
  61. {
  62. struct dpll_pin_registration *reg;
  63. struct dpll_pin_ref *ref;
  64. bool ref_exists = false;
  65. unsigned long i;
  66. int ret;
  67. xa_for_each(xa_pins, i, ref) {
  68. if (ref->pin != pin)
  69. continue;
  70. reg = dpll_pin_registration_find(ref, ops, priv, cookie);
  71. if (reg) {
  72. refcount_inc(&ref->refcount);
  73. return 0;
  74. }
  75. ref_exists = true;
  76. break;
  77. }
  78. if (!ref_exists) {
  79. ref = kzalloc(sizeof(*ref), GFP_KERNEL);
  80. if (!ref)
  81. return -ENOMEM;
  82. ref->pin = pin;
  83. INIT_LIST_HEAD(&ref->registration_list);
  84. ret = xa_insert(xa_pins, pin->pin_idx, ref, GFP_KERNEL);
  85. if (ret) {
  86. kfree(ref);
  87. return ret;
  88. }
  89. refcount_set(&ref->refcount, 1);
  90. }
  91. reg = kzalloc(sizeof(*reg), GFP_KERNEL);
  92. if (!reg) {
  93. if (!ref_exists) {
  94. xa_erase(xa_pins, pin->pin_idx);
  95. kfree(ref);
  96. }
  97. return -ENOMEM;
  98. }
  99. reg->ops = ops;
  100. reg->priv = priv;
  101. reg->cookie = cookie;
  102. if (ref_exists)
  103. refcount_inc(&ref->refcount);
  104. list_add_tail(&reg->list, &ref->registration_list);
  105. return 0;
  106. }
  107. static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
  108. const struct dpll_pin_ops *ops, void *priv,
  109. void *cookie)
  110. {
  111. struct dpll_pin_registration *reg;
  112. struct dpll_pin_ref *ref;
  113. unsigned long i;
  114. xa_for_each(xa_pins, i, ref) {
  115. if (ref->pin != pin)
  116. continue;
  117. reg = dpll_pin_registration_find(ref, ops, priv, cookie);
  118. if (WARN_ON(!reg))
  119. return -EINVAL;
  120. list_del(&reg->list);
  121. kfree(reg);
  122. if (refcount_dec_and_test(&ref->refcount)) {
  123. xa_erase(xa_pins, i);
  124. WARN_ON(!list_empty(&ref->registration_list));
  125. kfree(ref);
  126. }
  127. return 0;
  128. }
  129. return -EINVAL;
  130. }
  131. static int
  132. dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
  133. const struct dpll_pin_ops *ops, void *priv, void *cookie)
  134. {
  135. struct dpll_pin_registration *reg;
  136. struct dpll_pin_ref *ref;
  137. bool ref_exists = false;
  138. unsigned long i;
  139. int ret;
  140. xa_for_each(xa_dplls, i, ref) {
  141. if (ref->dpll != dpll)
  142. continue;
  143. reg = dpll_pin_registration_find(ref, ops, priv, cookie);
  144. if (reg) {
  145. refcount_inc(&ref->refcount);
  146. return 0;
  147. }
  148. ref_exists = true;
  149. break;
  150. }
  151. if (!ref_exists) {
  152. ref = kzalloc(sizeof(*ref), GFP_KERNEL);
  153. if (!ref)
  154. return -ENOMEM;
  155. ref->dpll = dpll;
  156. INIT_LIST_HEAD(&ref->registration_list);
  157. ret = xa_insert(xa_dplls, dpll->id, ref, GFP_KERNEL);
  158. if (ret) {
  159. kfree(ref);
  160. return ret;
  161. }
  162. refcount_set(&ref->refcount, 1);
  163. }
  164. reg = kzalloc(sizeof(*reg), GFP_KERNEL);
  165. if (!reg) {
  166. if (!ref_exists) {
  167. xa_erase(xa_dplls, dpll->id);
  168. kfree(ref);
  169. }
  170. return -ENOMEM;
  171. }
  172. reg->ops = ops;
  173. reg->priv = priv;
  174. reg->cookie = cookie;
  175. if (ref_exists)
  176. refcount_inc(&ref->refcount);
  177. list_add_tail(&reg->list, &ref->registration_list);
  178. return 0;
  179. }
  180. static void
  181. dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
  182. const struct dpll_pin_ops *ops, void *priv, void *cookie)
  183. {
  184. struct dpll_pin_registration *reg;
  185. struct dpll_pin_ref *ref;
  186. unsigned long i;
  187. xa_for_each(xa_dplls, i, ref) {
  188. if (ref->dpll != dpll)
  189. continue;
  190. reg = dpll_pin_registration_find(ref, ops, priv, cookie);
  191. if (WARN_ON(!reg))
  192. return;
  193. list_del(&reg->list);
  194. kfree(reg);
  195. if (refcount_dec_and_test(&ref->refcount)) {
  196. xa_erase(xa_dplls, i);
  197. WARN_ON(!list_empty(&ref->registration_list));
  198. kfree(ref);
  199. }
  200. return;
  201. }
  202. }
  203. struct dpll_pin_ref *dpll_xa_ref_dpll_first(struct xarray *xa_refs)
  204. {
  205. struct dpll_pin_ref *ref;
  206. unsigned long i = 0;
  207. ref = xa_find(xa_refs, &i, ULONG_MAX, XA_PRESENT);
  208. WARN_ON(!ref);
  209. return ref;
  210. }
  211. static struct dpll_device *
  212. dpll_device_alloc(const u64 clock_id, u32 device_idx, struct module *module)
  213. {
  214. struct dpll_device *dpll;
  215. int ret;
  216. dpll = kzalloc(sizeof(*dpll), GFP_KERNEL);
  217. if (!dpll)
  218. return ERR_PTR(-ENOMEM);
  219. refcount_set(&dpll->refcount, 1);
  220. INIT_LIST_HEAD(&dpll->registration_list);
  221. dpll->device_idx = device_idx;
  222. dpll->clock_id = clock_id;
  223. dpll->module = module;
  224. ret = xa_alloc_cyclic(&dpll_device_xa, &dpll->id, dpll, xa_limit_32b,
  225. &dpll_device_xa_id, GFP_KERNEL);
  226. if (ret < 0) {
  227. kfree(dpll);
  228. return ERR_PTR(ret);
  229. }
  230. xa_init_flags(&dpll->pin_refs, XA_FLAGS_ALLOC);
  231. return dpll;
  232. }
  233. /**
  234. * dpll_device_get - find existing or create new dpll device
  235. * @clock_id: clock_id of creator
  236. * @device_idx: idx given by device driver
  237. * @module: reference to registering module
  238. *
  239. * Get existing object of a dpll device, unique for given arguments.
  240. * Create new if doesn't exist yet.
  241. *
  242. * Context: Acquires a lock (dpll_lock)
  243. * Return:
  244. * * valid dpll_device struct pointer if succeeded
  245. * * ERR_PTR(X) - error
  246. */
  247. struct dpll_device *
  248. dpll_device_get(u64 clock_id, u32 device_idx, struct module *module)
  249. {
  250. struct dpll_device *dpll, *ret = NULL;
  251. unsigned long index;
  252. mutex_lock(&dpll_lock);
  253. xa_for_each(&dpll_device_xa, index, dpll) {
  254. if (dpll->clock_id == clock_id &&
  255. dpll->device_idx == device_idx &&
  256. dpll->module == module) {
  257. ret = dpll;
  258. refcount_inc(&ret->refcount);
  259. break;
  260. }
  261. }
  262. if (!ret)
  263. ret = dpll_device_alloc(clock_id, device_idx, module);
  264. mutex_unlock(&dpll_lock);
  265. return ret;
  266. }
  267. EXPORT_SYMBOL_GPL(dpll_device_get);
  268. /**
  269. * dpll_device_put - decrease the refcount and free memory if possible
  270. * @dpll: dpll_device struct pointer
  271. *
  272. * Context: Acquires a lock (dpll_lock)
  273. * Drop reference for a dpll device, if all references are gone, delete
  274. * dpll device object.
  275. */
  276. void dpll_device_put(struct dpll_device *dpll)
  277. {
  278. mutex_lock(&dpll_lock);
  279. if (refcount_dec_and_test(&dpll->refcount)) {
  280. ASSERT_DPLL_NOT_REGISTERED(dpll);
  281. WARN_ON_ONCE(!xa_empty(&dpll->pin_refs));
  282. xa_destroy(&dpll->pin_refs);
  283. xa_erase(&dpll_device_xa, dpll->id);
  284. WARN_ON(!list_empty(&dpll->registration_list));
  285. kfree(dpll);
  286. }
  287. mutex_unlock(&dpll_lock);
  288. }
  289. EXPORT_SYMBOL_GPL(dpll_device_put);
  290. static struct dpll_device_registration *
  291. dpll_device_registration_find(struct dpll_device *dpll,
  292. const struct dpll_device_ops *ops, void *priv)
  293. {
  294. struct dpll_device_registration *reg;
  295. list_for_each_entry(reg, &dpll->registration_list, list) {
  296. if (reg->ops == ops && reg->priv == priv)
  297. return reg;
  298. }
  299. return NULL;
  300. }
  301. /**
  302. * dpll_device_register - register the dpll device in the subsystem
  303. * @dpll: pointer to a dpll
  304. * @type: type of a dpll
  305. * @ops: ops for a dpll device
  306. * @priv: pointer to private information of owner
  307. *
  308. * Make dpll device available for user space.
  309. *
  310. * Context: Acquires a lock (dpll_lock)
  311. * Return:
  312. * * 0 on success
  313. * * negative - error value
  314. */
  315. int dpll_device_register(struct dpll_device *dpll, enum dpll_type type,
  316. const struct dpll_device_ops *ops, void *priv)
  317. {
  318. struct dpll_device_registration *reg;
  319. bool first_registration = false;
  320. if (WARN_ON(!ops))
  321. return -EINVAL;
  322. if (WARN_ON(!ops->mode_get))
  323. return -EINVAL;
  324. if (WARN_ON(!ops->lock_status_get))
  325. return -EINVAL;
  326. if (WARN_ON(type < DPLL_TYPE_PPS || type > DPLL_TYPE_MAX))
  327. return -EINVAL;
  328. mutex_lock(&dpll_lock);
  329. reg = dpll_device_registration_find(dpll, ops, priv);
  330. if (reg) {
  331. mutex_unlock(&dpll_lock);
  332. return -EEXIST;
  333. }
  334. reg = kzalloc(sizeof(*reg), GFP_KERNEL);
  335. if (!reg) {
  336. mutex_unlock(&dpll_lock);
  337. return -ENOMEM;
  338. }
  339. reg->ops = ops;
  340. reg->priv = priv;
  341. dpll->type = type;
  342. first_registration = list_empty(&dpll->registration_list);
  343. list_add_tail(&reg->list, &dpll->registration_list);
  344. if (!first_registration) {
  345. mutex_unlock(&dpll_lock);
  346. return 0;
  347. }
  348. xa_set_mark(&dpll_device_xa, dpll->id, DPLL_REGISTERED);
  349. dpll_device_create_ntf(dpll);
  350. mutex_unlock(&dpll_lock);
  351. return 0;
  352. }
  353. EXPORT_SYMBOL_GPL(dpll_device_register);
  354. /**
  355. * dpll_device_unregister - unregister dpll device
  356. * @dpll: registered dpll pointer
  357. * @ops: ops for a dpll device
  358. * @priv: pointer to private information of owner
  359. *
  360. * Unregister device, make it unavailable for userspace.
  361. * Note: It does not free the memory
  362. * Context: Acquires a lock (dpll_lock)
  363. */
  364. void dpll_device_unregister(struct dpll_device *dpll,
  365. const struct dpll_device_ops *ops, void *priv)
  366. {
  367. struct dpll_device_registration *reg;
  368. mutex_lock(&dpll_lock);
  369. ASSERT_DPLL_REGISTERED(dpll);
  370. dpll_device_delete_ntf(dpll);
  371. reg = dpll_device_registration_find(dpll, ops, priv);
  372. if (WARN_ON(!reg)) {
  373. mutex_unlock(&dpll_lock);
  374. return;
  375. }
  376. list_del(&reg->list);
  377. kfree(reg);
  378. if (!list_empty(&dpll->registration_list)) {
  379. mutex_unlock(&dpll_lock);
  380. return;
  381. }
  382. xa_clear_mark(&dpll_device_xa, dpll->id, DPLL_REGISTERED);
  383. mutex_unlock(&dpll_lock);
  384. }
  385. EXPORT_SYMBOL_GPL(dpll_device_unregister);
  386. static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
  387. {
  388. kfree(prop->package_label);
  389. kfree(prop->panel_label);
  390. kfree(prop->board_label);
  391. kfree(prop->freq_supported);
  392. }
  393. static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
  394. struct dpll_pin_properties *dst)
  395. {
  396. if (WARN_ON(src->freq_supported && !src->freq_supported_num))
  397. return -EINVAL;
  398. memcpy(dst, src, sizeof(*dst));
  399. if (src->freq_supported) {
  400. size_t freq_size = src->freq_supported_num *
  401. sizeof(*src->freq_supported);
  402. dst->freq_supported = kmemdup(src->freq_supported,
  403. freq_size, GFP_KERNEL);
  404. if (!dst->freq_supported)
  405. return -ENOMEM;
  406. }
  407. if (src->board_label) {
  408. dst->board_label = kstrdup(src->board_label, GFP_KERNEL);
  409. if (!dst->board_label)
  410. goto err_board_label;
  411. }
  412. if (src->panel_label) {
  413. dst->panel_label = kstrdup(src->panel_label, GFP_KERNEL);
  414. if (!dst->panel_label)
  415. goto err_panel_label;
  416. }
  417. if (src->package_label) {
  418. dst->package_label = kstrdup(src->package_label, GFP_KERNEL);
  419. if (!dst->package_label)
  420. goto err_package_label;
  421. }
  422. return 0;
  423. err_package_label:
  424. kfree(dst->panel_label);
  425. err_panel_label:
  426. kfree(dst->board_label);
  427. err_board_label:
  428. kfree(dst->freq_supported);
  429. return -ENOMEM;
  430. }
  431. static struct dpll_pin *
  432. dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
  433. const struct dpll_pin_properties *prop)
  434. {
  435. struct dpll_pin *pin;
  436. int ret;
  437. pin = kzalloc(sizeof(*pin), GFP_KERNEL);
  438. if (!pin)
  439. return ERR_PTR(-ENOMEM);
  440. pin->pin_idx = pin_idx;
  441. pin->clock_id = clock_id;
  442. pin->module = module;
  443. if (WARN_ON(prop->type < DPLL_PIN_TYPE_MUX ||
  444. prop->type > DPLL_PIN_TYPE_MAX)) {
  445. ret = -EINVAL;
  446. goto err_pin_prop;
  447. }
  448. ret = dpll_pin_prop_dup(prop, &pin->prop);
  449. if (ret)
  450. goto err_pin_prop;
  451. refcount_set(&pin->refcount, 1);
  452. xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
  453. xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
  454. ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
  455. &dpll_pin_xa_id, GFP_KERNEL);
  456. if (ret < 0)
  457. goto err_xa_alloc;
  458. return pin;
  459. err_xa_alloc:
  460. xa_destroy(&pin->dpll_refs);
  461. xa_destroy(&pin->parent_refs);
  462. dpll_pin_prop_free(&pin->prop);
  463. err_pin_prop:
  464. kfree(pin);
  465. return ERR_PTR(ret);
  466. }
  467. static void dpll_netdev_pin_assign(struct net_device *dev, struct dpll_pin *dpll_pin)
  468. {
  469. rtnl_lock();
  470. rcu_assign_pointer(dev->dpll_pin, dpll_pin);
  471. rtnl_unlock();
  472. }
  473. void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)
  474. {
  475. WARN_ON(!dpll_pin);
  476. dpll_netdev_pin_assign(dev, dpll_pin);
  477. }
  478. EXPORT_SYMBOL(dpll_netdev_pin_set);
  479. void dpll_netdev_pin_clear(struct net_device *dev)
  480. {
  481. dpll_netdev_pin_assign(dev, NULL);
  482. }
  483. EXPORT_SYMBOL(dpll_netdev_pin_clear);
  484. /**
  485. * dpll_pin_get - find existing or create new dpll pin
  486. * @clock_id: clock_id of creator
  487. * @pin_idx: idx given by dev driver
  488. * @module: reference to registering module
  489. * @prop: dpll pin properties
  490. *
  491. * Get existing object of a pin (unique for given arguments) or create new
  492. * if doesn't exist yet.
  493. *
  494. * Context: Acquires a lock (dpll_lock)
  495. * Return:
  496. * * valid allocated dpll_pin struct pointer if succeeded
  497. * * ERR_PTR(X) - error
  498. */
  499. struct dpll_pin *
  500. dpll_pin_get(u64 clock_id, u32 pin_idx, struct module *module,
  501. const struct dpll_pin_properties *prop)
  502. {
  503. struct dpll_pin *pos, *ret = NULL;
  504. unsigned long i;
  505. mutex_lock(&dpll_lock);
  506. xa_for_each(&dpll_pin_xa, i, pos) {
  507. if (pos->clock_id == clock_id &&
  508. pos->pin_idx == pin_idx &&
  509. pos->module == module) {
  510. ret = pos;
  511. refcount_inc(&ret->refcount);
  512. break;
  513. }
  514. }
  515. if (!ret)
  516. ret = dpll_pin_alloc(clock_id, pin_idx, module, prop);
  517. mutex_unlock(&dpll_lock);
  518. return ret;
  519. }
  520. EXPORT_SYMBOL_GPL(dpll_pin_get);
  521. /**
  522. * dpll_pin_put - decrease the refcount and free memory if possible
  523. * @pin: pointer to a pin to be put
  524. *
  525. * Drop reference for a pin, if all references are gone, delete pin object.
  526. *
  527. * Context: Acquires a lock (dpll_lock)
  528. */
  529. void dpll_pin_put(struct dpll_pin *pin)
  530. {
  531. mutex_lock(&dpll_lock);
  532. if (refcount_dec_and_test(&pin->refcount)) {
  533. xa_erase(&dpll_pin_xa, pin->id);
  534. xa_destroy(&pin->dpll_refs);
  535. xa_destroy(&pin->parent_refs);
  536. dpll_pin_prop_free(&pin->prop);
  537. kfree_rcu(pin, rcu);
  538. }
  539. mutex_unlock(&dpll_lock);
  540. }
  541. EXPORT_SYMBOL_GPL(dpll_pin_put);
  542. static int
  543. __dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
  544. const struct dpll_pin_ops *ops, void *priv, void *cookie)
  545. {
  546. int ret;
  547. ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv, cookie);
  548. if (ret)
  549. return ret;
  550. ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv, cookie);
  551. if (ret)
  552. goto ref_pin_del;
  553. xa_set_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
  554. dpll_pin_create_ntf(pin);
  555. return ret;
  556. ref_pin_del:
  557. dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
  558. return ret;
  559. }
  560. /**
  561. * dpll_pin_register - register the dpll pin in the subsystem
  562. * @dpll: pointer to a dpll
  563. * @pin: pointer to a dpll pin
  564. * @ops: ops for a dpll pin ops
  565. * @priv: pointer to private information of owner
  566. *
  567. * Context: Acquires a lock (dpll_lock)
  568. * Return:
  569. * * 0 on success
  570. * * negative - error value
  571. */
  572. int
  573. dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
  574. const struct dpll_pin_ops *ops, void *priv)
  575. {
  576. int ret;
  577. if (WARN_ON(!ops) ||
  578. WARN_ON(!ops->state_on_dpll_get) ||
  579. WARN_ON(!ops->direction_get))
  580. return -EINVAL;
  581. mutex_lock(&dpll_lock);
  582. if (WARN_ON(!(dpll->module == pin->module &&
  583. dpll->clock_id == pin->clock_id)))
  584. ret = -EINVAL;
  585. else
  586. ret = __dpll_pin_register(dpll, pin, ops, priv, NULL);
  587. mutex_unlock(&dpll_lock);
  588. return ret;
  589. }
  590. EXPORT_SYMBOL_GPL(dpll_pin_register);
  591. static void
  592. __dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
  593. const struct dpll_pin_ops *ops, void *priv, void *cookie)
  594. {
  595. ASSERT_DPLL_PIN_REGISTERED(pin);
  596. dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
  597. dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv, cookie);
  598. if (xa_empty(&pin->dpll_refs))
  599. xa_clear_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
  600. }
  601. /**
  602. * dpll_pin_unregister - unregister dpll pin from dpll device
  603. * @dpll: registered dpll pointer
  604. * @pin: pointer to a pin
  605. * @ops: ops for a dpll pin
  606. * @priv: pointer to private information of owner
  607. *
  608. * Note: It does not free the memory
  609. * Context: Acquires a lock (dpll_lock)
  610. */
  611. void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
  612. const struct dpll_pin_ops *ops, void *priv)
  613. {
  614. if (WARN_ON(xa_empty(&dpll->pin_refs)))
  615. return;
  616. if (WARN_ON(!xa_empty(&pin->parent_refs)))
  617. return;
  618. mutex_lock(&dpll_lock);
  619. dpll_pin_delete_ntf(pin);
  620. __dpll_pin_unregister(dpll, pin, ops, priv, NULL);
  621. mutex_unlock(&dpll_lock);
  622. }
  623. EXPORT_SYMBOL_GPL(dpll_pin_unregister);
  624. /**
  625. * dpll_pin_on_pin_register - register a pin with a parent pin
  626. * @parent: pointer to a parent pin
  627. * @pin: pointer to a pin
  628. * @ops: ops for a dpll pin
  629. * @priv: pointer to private information of owner
  630. *
  631. * Register a pin with a parent pin, create references between them and
  632. * between newly registered pin and dplls connected with a parent pin.
  633. *
  634. * Context: Acquires a lock (dpll_lock)
  635. * Return:
  636. * * 0 on success
  637. * * negative - error value
  638. */
  639. int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
  640. const struct dpll_pin_ops *ops, void *priv)
  641. {
  642. struct dpll_pin_ref *ref;
  643. unsigned long i, stop;
  644. int ret;
  645. if (WARN_ON(parent->prop.type != DPLL_PIN_TYPE_MUX))
  646. return -EINVAL;
  647. if (WARN_ON(!ops) ||
  648. WARN_ON(!ops->state_on_pin_get) ||
  649. WARN_ON(!ops->direction_get))
  650. return -EINVAL;
  651. mutex_lock(&dpll_lock);
  652. ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv, pin);
  653. if (ret)
  654. goto unlock;
  655. refcount_inc(&pin->refcount);
  656. xa_for_each(&parent->dpll_refs, i, ref) {
  657. ret = __dpll_pin_register(ref->dpll, pin, ops, priv, parent);
  658. if (ret) {
  659. stop = i;
  660. goto dpll_unregister;
  661. }
  662. dpll_pin_create_ntf(pin);
  663. }
  664. mutex_unlock(&dpll_lock);
  665. return ret;
  666. dpll_unregister:
  667. xa_for_each(&parent->dpll_refs, i, ref)
  668. if (i < stop) {
  669. __dpll_pin_unregister(ref->dpll, pin, ops, priv,
  670. parent);
  671. dpll_pin_delete_ntf(pin);
  672. }
  673. refcount_dec(&pin->refcount);
  674. dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
  675. unlock:
  676. mutex_unlock(&dpll_lock);
  677. return ret;
  678. }
  679. EXPORT_SYMBOL_GPL(dpll_pin_on_pin_register);
  680. /**
  681. * dpll_pin_on_pin_unregister - unregister dpll pin from a parent pin
  682. * @parent: pointer to a parent pin
  683. * @pin: pointer to a pin
  684. * @ops: ops for a dpll pin
  685. * @priv: pointer to private information of owner
  686. *
  687. * Context: Acquires a lock (dpll_lock)
  688. * Note: It does not free the memory
  689. */
  690. void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
  691. const struct dpll_pin_ops *ops, void *priv)
  692. {
  693. struct dpll_pin_ref *ref;
  694. unsigned long i;
  695. mutex_lock(&dpll_lock);
  696. dpll_pin_delete_ntf(pin);
  697. dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
  698. refcount_dec(&pin->refcount);
  699. xa_for_each(&pin->dpll_refs, i, ref)
  700. __dpll_pin_unregister(ref->dpll, pin, ops, priv, parent);
  701. mutex_unlock(&dpll_lock);
  702. }
  703. EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister);
  704. static struct dpll_device_registration *
  705. dpll_device_registration_first(struct dpll_device *dpll)
  706. {
  707. struct dpll_device_registration *reg;
  708. reg = list_first_entry_or_null((struct list_head *)&dpll->registration_list,
  709. struct dpll_device_registration, list);
  710. WARN_ON(!reg);
  711. return reg;
  712. }
  713. void *dpll_priv(struct dpll_device *dpll)
  714. {
  715. struct dpll_device_registration *reg;
  716. reg = dpll_device_registration_first(dpll);
  717. return reg->priv;
  718. }
  719. const struct dpll_device_ops *dpll_device_ops(struct dpll_device *dpll)
  720. {
  721. struct dpll_device_registration *reg;
  722. reg = dpll_device_registration_first(dpll);
  723. return reg->ops;
  724. }
  725. static struct dpll_pin_registration *
  726. dpll_pin_registration_first(struct dpll_pin_ref *ref)
  727. {
  728. struct dpll_pin_registration *reg;
  729. reg = list_first_entry_or_null(&ref->registration_list,
  730. struct dpll_pin_registration, list);
  731. WARN_ON(!reg);
  732. return reg;
  733. }
  734. void *dpll_pin_on_dpll_priv(struct dpll_device *dpll,
  735. struct dpll_pin *pin)
  736. {
  737. struct dpll_pin_registration *reg;
  738. struct dpll_pin_ref *ref;
  739. ref = xa_load(&dpll->pin_refs, pin->pin_idx);
  740. if (!ref)
  741. return NULL;
  742. reg = dpll_pin_registration_first(ref);
  743. return reg->priv;
  744. }
  745. void *dpll_pin_on_pin_priv(struct dpll_pin *parent,
  746. struct dpll_pin *pin)
  747. {
  748. struct dpll_pin_registration *reg;
  749. struct dpll_pin_ref *ref;
  750. ref = xa_load(&pin->parent_refs, parent->pin_idx);
  751. if (!ref)
  752. return NULL;
  753. reg = dpll_pin_registration_first(ref);
  754. return reg->priv;
  755. }
  756. const struct dpll_pin_ops *dpll_pin_ops(struct dpll_pin_ref *ref)
  757. {
  758. struct dpll_pin_registration *reg;
  759. reg = dpll_pin_registration_first(ref);
  760. return reg->ops;
  761. }
  762. static int __init dpll_init(void)
  763. {
  764. int ret;
  765. ret = genl_register_family(&dpll_nl_family);
  766. if (ret)
  767. goto error;
  768. return 0;
  769. error:
  770. mutex_destroy(&dpll_lock);
  771. return ret;
  772. }
  773. static void __exit dpll_exit(void)
  774. {
  775. genl_unregister_family(&dpll_nl_family);
  776. mutex_destroy(&dpll_lock);
  777. }
  778. subsys_initcall(dpll_init);
  779. module_exit(dpll_exit);