industrialio-backend.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Framework to handle complex IIO aggregate devices.
  4. *
  5. * The typical architecture is to have one device as the frontend device which
  6. * can be "linked" against one or multiple backend devices. All the IIO and
  7. * userspace interface is expected to be registers/managed by the frontend
  8. * device which will callback into the backends when needed (to get/set some
  9. * configuration that it does not directly control).
  10. *
  11. * -------------------------------------------------------
  12. * ------------------ | ------------ ------------ ------- FPGA|
  13. * | ADC |------------------------| | ADC CORE |---------| DMA CORE |------| RAM | |
  14. * | (Frontend/IIO) | Serial Data (eg: LVDS) | |(backend) |---------| |------| | |
  15. * | |------------------------| ------------ ------------ ------- |
  16. * ------------------ -------------------------------------------------------
  17. *
  18. * The framework interface is pretty simple:
  19. * - Backends should register themselves with devm_iio_backend_register()
  20. * - Frontend devices should get backends with devm_iio_backend_get()
  21. *
  22. * Also to note that the primary target for this framework are converters like
  23. * ADC/DACs so iio_backend_ops will have some operations typical of converter
  24. * devices. On top of that, this is "generic" for all IIO which means any kind
  25. * of device can make use of the framework. That said, If the iio_backend_ops
  26. * struct begins to grow out of control, we can always refactor things so that
  27. * the industrialio-backend.c is only left with the really generic stuff. Then,
  28. * we can build on top of it depending on the needs.
  29. *
  30. * Copyright (C) 2023-2024 Analog Devices Inc.
  31. */
  32. #define dev_fmt(fmt) "iio-backend: " fmt
  33. #include <linux/cleanup.h>
  34. #include <linux/debugfs.h>
  35. #include <linux/device.h>
  36. #include <linux/err.h>
  37. #include <linux/errno.h>
  38. #include <linux/list.h>
  39. #include <linux/module.h>
  40. #include <linux/mutex.h>
  41. #include <linux/property.h>
  42. #include <linux/slab.h>
  43. #include <linux/stringify.h>
  44. #include <linux/types.h>
  45. #include <linux/iio/backend.h>
  46. #include <linux/iio/iio.h>
  47. struct iio_backend {
  48. struct list_head entry;
  49. const struct iio_backend_ops *ops;
  50. struct device *frontend_dev;
  51. struct device *dev;
  52. struct module *owner;
  53. void *priv;
  54. const char *name;
  55. unsigned int cached_reg_addr;
  56. /*
  57. * This index is relative to the frontend. Meaning that for
  58. * frontends with multiple backends, this will be the index of this
  59. * backend. Used for the debugfs directory name.
  60. */
  61. u8 idx;
  62. };
  63. /*
  64. * Helper struct for requesting buffers. This ensures that we have all data
  65. * that we need to free the buffer in a device managed action.
  66. */
  67. struct iio_backend_buffer_pair {
  68. struct iio_backend *back;
  69. struct iio_buffer *buffer;
  70. };
  71. static LIST_HEAD(iio_back_list);
  72. static DEFINE_MUTEX(iio_back_lock);
  73. /*
  74. * Helper macros to call backend ops. Makes sure the option is supported.
  75. */
  76. #define iio_backend_check_op(back, op) ({ \
  77. struct iio_backend *____back = back; \
  78. int ____ret = 0; \
  79. \
  80. if (!____back->ops->op) \
  81. ____ret = -EOPNOTSUPP; \
  82. \
  83. ____ret; \
  84. })
  85. #define iio_backend_op_call(back, op, args...) ({ \
  86. struct iio_backend *__back = back; \
  87. int __ret; \
  88. \
  89. __ret = iio_backend_check_op(__back, op); \
  90. if (!__ret) \
  91. __ret = __back->ops->op(__back, ##args); \
  92. \
  93. __ret; \
  94. })
  95. #define iio_backend_ptr_op_call(back, op, args...) ({ \
  96. struct iio_backend *__back = back; \
  97. void *ptr_err; \
  98. int __ret; \
  99. \
  100. __ret = iio_backend_check_op(__back, op); \
  101. if (__ret) \
  102. ptr_err = ERR_PTR(__ret); \
  103. else \
  104. ptr_err = __back->ops->op(__back, ##args); \
  105. \
  106. ptr_err; \
  107. })
  108. #define iio_backend_void_op_call(back, op, args...) { \
  109. struct iio_backend *__back = back; \
  110. int __ret; \
  111. \
  112. __ret = iio_backend_check_op(__back, op); \
  113. if (!__ret) \
  114. __back->ops->op(__back, ##args); \
  115. else \
  116. dev_dbg(__back->dev, "Op(%s) not implemented\n",\
  117. __stringify(op)); \
  118. }
  119. static ssize_t iio_backend_debugfs_read_reg(struct file *file,
  120. char __user *userbuf,
  121. size_t count, loff_t *ppos)
  122. {
  123. struct iio_backend *back = file->private_data;
  124. char read_buf[20];
  125. unsigned int val;
  126. int ret, len;
  127. ret = iio_backend_op_call(back, debugfs_reg_access,
  128. back->cached_reg_addr, 0, &val);
  129. if (ret)
  130. return ret;
  131. len = scnprintf(read_buf, sizeof(read_buf), "0x%X\n", val);
  132. return simple_read_from_buffer(userbuf, count, ppos, read_buf, len);
  133. }
  134. static ssize_t iio_backend_debugfs_write_reg(struct file *file,
  135. const char __user *userbuf,
  136. size_t count, loff_t *ppos)
  137. {
  138. struct iio_backend *back = file->private_data;
  139. unsigned int val;
  140. char buf[80];
  141. ssize_t rc;
  142. int ret;
  143. rc = simple_write_to_buffer(buf, sizeof(buf), ppos, userbuf, count);
  144. if (rc < 0)
  145. return rc;
  146. ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val);
  147. switch (ret) {
  148. case 1:
  149. return count;
  150. case 2:
  151. ret = iio_backend_op_call(back, debugfs_reg_access,
  152. back->cached_reg_addr, val, NULL);
  153. if (ret)
  154. return ret;
  155. return count;
  156. default:
  157. return -EINVAL;
  158. }
  159. }
  160. static const struct file_operations iio_backend_debugfs_reg_fops = {
  161. .open = simple_open,
  162. .read = iio_backend_debugfs_read_reg,
  163. .write = iio_backend_debugfs_write_reg,
  164. };
  165. static ssize_t iio_backend_debugfs_read_name(struct file *file,
  166. char __user *userbuf,
  167. size_t count, loff_t *ppos)
  168. {
  169. struct iio_backend *back = file->private_data;
  170. char name[128];
  171. int len;
  172. len = scnprintf(name, sizeof(name), "%s\n", back->name);
  173. return simple_read_from_buffer(userbuf, count, ppos, name, len);
  174. }
  175. static const struct file_operations iio_backend_debugfs_name_fops = {
  176. .open = simple_open,
  177. .read = iio_backend_debugfs_read_name,
  178. };
  179. /**
  180. * iio_backend_debugfs_add - Add debugfs interfaces for Backends
  181. * @back: Backend device
  182. * @indio_dev: IIO device
  183. */
  184. void iio_backend_debugfs_add(struct iio_backend *back,
  185. struct iio_dev *indio_dev)
  186. {
  187. struct dentry *d = iio_get_debugfs_dentry(indio_dev);
  188. struct dentry *back_d;
  189. char name[128];
  190. if (!IS_ENABLED(CONFIG_DEBUG_FS) || !d)
  191. return;
  192. if (!back->ops->debugfs_reg_access && !back->name)
  193. return;
  194. snprintf(name, sizeof(name), "backend%d", back->idx);
  195. back_d = debugfs_create_dir(name, d);
  196. if (IS_ERR(back_d))
  197. return;
  198. if (back->ops->debugfs_reg_access)
  199. debugfs_create_file("direct_reg_access", 0600, back_d, back,
  200. &iio_backend_debugfs_reg_fops);
  201. if (back->name)
  202. debugfs_create_file("name", 0400, back_d, back,
  203. &iio_backend_debugfs_name_fops);
  204. }
  205. EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_add, IIO_BACKEND);
  206. /**
  207. * iio_backend_debugfs_print_chan_status - Print channel status
  208. * @back: Backend device
  209. * @chan: Channel number
  210. * @buf: Buffer where to print the status
  211. * @len: Available space
  212. *
  213. * One usecase where this is useful is for testing test tones in a digital
  214. * interface and "ask" the backend to dump more details on why a test tone might
  215. * have errors.
  216. *
  217. * RETURNS:
  218. * Number of copied bytes on success, negative error code on failure.
  219. */
  220. ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back,
  221. unsigned int chan, char *buf,
  222. size_t len)
  223. {
  224. if (!IS_ENABLED(CONFIG_DEBUG_FS))
  225. return -ENODEV;
  226. return iio_backend_op_call(back, debugfs_print_chan_status, chan, buf,
  227. len);
  228. }
  229. EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_print_chan_status, IIO_BACKEND);
  230. /**
  231. * iio_backend_chan_enable - Enable a backend channel
  232. * @back: Backend device
  233. * @chan: Channel number
  234. *
  235. * RETURNS:
  236. * 0 on success, negative error number on failure.
  237. */
  238. int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan)
  239. {
  240. return iio_backend_op_call(back, chan_enable, chan);
  241. }
  242. EXPORT_SYMBOL_NS_GPL(iio_backend_chan_enable, IIO_BACKEND);
  243. /**
  244. * iio_backend_chan_disable - Disable a backend channel
  245. * @back: Backend device
  246. * @chan: Channel number
  247. *
  248. * RETURNS:
  249. * 0 on success, negative error number on failure.
  250. */
  251. int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan)
  252. {
  253. return iio_backend_op_call(back, chan_disable, chan);
  254. }
  255. EXPORT_SYMBOL_NS_GPL(iio_backend_chan_disable, IIO_BACKEND);
  256. static void __iio_backend_disable(void *back)
  257. {
  258. iio_backend_void_op_call(back, disable);
  259. }
  260. /**
  261. * iio_backend_disable - Backend disable
  262. * @back: Backend device
  263. */
  264. void iio_backend_disable(struct iio_backend *back)
  265. {
  266. __iio_backend_disable(back);
  267. }
  268. EXPORT_SYMBOL_NS_GPL(iio_backend_disable, IIO_BACKEND);
  269. /**
  270. * iio_backend_enable - Backend enable
  271. * @back: Backend device
  272. *
  273. * RETURNS:
  274. * 0 on success, negative error number on failure.
  275. */
  276. int iio_backend_enable(struct iio_backend *back)
  277. {
  278. return iio_backend_op_call(back, enable);
  279. }
  280. EXPORT_SYMBOL_NS_GPL(iio_backend_enable, IIO_BACKEND);
  281. /**
  282. * devm_iio_backend_enable - Device managed backend enable
  283. * @dev: Consumer device for the backend
  284. * @back: Backend device
  285. *
  286. * RETURNS:
  287. * 0 on success, negative error number on failure.
  288. */
  289. int devm_iio_backend_enable(struct device *dev, struct iio_backend *back)
  290. {
  291. int ret;
  292. ret = iio_backend_enable(back);
  293. if (ret)
  294. return ret;
  295. return devm_add_action_or_reset(dev, __iio_backend_disable, back);
  296. }
  297. EXPORT_SYMBOL_NS_GPL(devm_iio_backend_enable, IIO_BACKEND);
  298. /**
  299. * iio_backend_data_format_set - Configure the channel data format
  300. * @back: Backend device
  301. * @chan: Channel number
  302. * @data: Data format
  303. *
  304. * Properly configure a channel with respect to the expected data format. A
  305. * @struct iio_backend_data_fmt must be passed with the settings.
  306. *
  307. * RETURNS:
  308. * 0 on success, negative error number on failure.
  309. */
  310. int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan,
  311. const struct iio_backend_data_fmt *data)
  312. {
  313. if (!data || data->type >= IIO_BACKEND_DATA_TYPE_MAX)
  314. return -EINVAL;
  315. return iio_backend_op_call(back, data_format_set, chan, data);
  316. }
  317. EXPORT_SYMBOL_NS_GPL(iio_backend_data_format_set, IIO_BACKEND);
  318. /**
  319. * iio_backend_data_source_set - Select data source
  320. * @back: Backend device
  321. * @chan: Channel number
  322. * @data: Data source
  323. *
  324. * A given backend may have different sources to stream/sync data. This allows
  325. * to choose that source.
  326. *
  327. * RETURNS:
  328. * 0 on success, negative error number on failure.
  329. */
  330. int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan,
  331. enum iio_backend_data_source data)
  332. {
  333. if (data >= IIO_BACKEND_DATA_SOURCE_MAX)
  334. return -EINVAL;
  335. return iio_backend_op_call(back, data_source_set, chan, data);
  336. }
  337. EXPORT_SYMBOL_NS_GPL(iio_backend_data_source_set, IIO_BACKEND);
  338. /**
  339. * iio_backend_set_sampling_freq - Set channel sampling rate
  340. * @back: Backend device
  341. * @chan: Channel number
  342. * @sample_rate_hz: Sample rate
  343. *
  344. * RETURNS:
  345. * 0 on success, negative error number on failure.
  346. */
  347. int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan,
  348. u64 sample_rate_hz)
  349. {
  350. return iio_backend_op_call(back, set_sample_rate, chan, sample_rate_hz);
  351. }
  352. EXPORT_SYMBOL_NS_GPL(iio_backend_set_sampling_freq, IIO_BACKEND);
  353. /**
  354. * iio_backend_test_pattern_set - Configure a test pattern
  355. * @back: Backend device
  356. * @chan: Channel number
  357. * @pattern: Test pattern
  358. *
  359. * Configure a test pattern on the backend. This is typically used for
  360. * calibrating the timings on the data digital interface.
  361. *
  362. * RETURNS:
  363. * 0 on success, negative error number on failure.
  364. */
  365. int iio_backend_test_pattern_set(struct iio_backend *back,
  366. unsigned int chan,
  367. enum iio_backend_test_pattern pattern)
  368. {
  369. if (pattern >= IIO_BACKEND_TEST_PATTERN_MAX)
  370. return -EINVAL;
  371. return iio_backend_op_call(back, test_pattern_set, chan, pattern);
  372. }
  373. EXPORT_SYMBOL_NS_GPL(iio_backend_test_pattern_set, IIO_BACKEND);
  374. /**
  375. * iio_backend_chan_status - Get the channel status
  376. * @back: Backend device
  377. * @chan: Channel number
  378. * @error: Error indication
  379. *
  380. * Get the current state of the backend channel. Typically used to check if
  381. * there were any errors sending/receiving data.
  382. *
  383. * RETURNS:
  384. * 0 on success, negative error number on failure.
  385. */
  386. int iio_backend_chan_status(struct iio_backend *back, unsigned int chan,
  387. bool *error)
  388. {
  389. return iio_backend_op_call(back, chan_status, chan, error);
  390. }
  391. EXPORT_SYMBOL_NS_GPL(iio_backend_chan_status, IIO_BACKEND);
  392. /**
  393. * iio_backend_iodelay_set - Set digital I/O delay
  394. * @back: Backend device
  395. * @lane: Lane number
  396. * @taps: Number of taps
  397. *
  398. * Controls delays on sending/receiving data. One usecase for this is to
  399. * calibrate the data digital interface so we get the best results when
  400. * transferring data. Note that @taps has no unit since the actual delay per tap
  401. * is very backend specific. Hence, frontend devices typically should go through
  402. * an array of @taps (the size of that array should typically match the size of
  403. * calibration points on the frontend device) and call this API.
  404. *
  405. * RETURNS:
  406. * 0 on success, negative error number on failure.
  407. */
  408. int iio_backend_iodelay_set(struct iio_backend *back, unsigned int lane,
  409. unsigned int taps)
  410. {
  411. return iio_backend_op_call(back, iodelay_set, lane, taps);
  412. }
  413. EXPORT_SYMBOL_NS_GPL(iio_backend_iodelay_set, IIO_BACKEND);
  414. /**
  415. * iio_backend_data_sample_trigger - Control when to sample data
  416. * @back: Backend device
  417. * @trigger: Data trigger
  418. *
  419. * Mostly useful for input backends. Configures the backend for when to sample
  420. * data (eg: rising vs falling edge).
  421. *
  422. * RETURNS:
  423. * 0 on success, negative error number on failure.
  424. */
  425. int iio_backend_data_sample_trigger(struct iio_backend *back,
  426. enum iio_backend_sample_trigger trigger)
  427. {
  428. if (trigger >= IIO_BACKEND_SAMPLE_TRIGGER_MAX)
  429. return -EINVAL;
  430. return iio_backend_op_call(back, data_sample_trigger, trigger);
  431. }
  432. EXPORT_SYMBOL_NS_GPL(iio_backend_data_sample_trigger, IIO_BACKEND);
  433. static void iio_backend_free_buffer(void *arg)
  434. {
  435. struct iio_backend_buffer_pair *pair = arg;
  436. iio_backend_void_op_call(pair->back, free_buffer, pair->buffer);
  437. }
  438. /**
  439. * devm_iio_backend_request_buffer - Device managed buffer request
  440. * @dev: Consumer device for the backend
  441. * @back: Backend device
  442. * @indio_dev: IIO device
  443. *
  444. * Request an IIO buffer from the backend. The type of the buffer (typically
  445. * INDIO_BUFFER_HARDWARE) is up to the backend to decide. This is because,
  446. * normally, the backend dictates what kind of buffering we can get.
  447. *
  448. * The backend .free_buffer() hooks is automatically called on @dev detach.
  449. *
  450. * RETURNS:
  451. * 0 on success, negative error number on failure.
  452. */
  453. int devm_iio_backend_request_buffer(struct device *dev,
  454. struct iio_backend *back,
  455. struct iio_dev *indio_dev)
  456. {
  457. struct iio_backend_buffer_pair *pair;
  458. struct iio_buffer *buffer;
  459. pair = devm_kzalloc(dev, sizeof(*pair), GFP_KERNEL);
  460. if (!pair)
  461. return -ENOMEM;
  462. buffer = iio_backend_ptr_op_call(back, request_buffer, indio_dev);
  463. if (IS_ERR(buffer))
  464. return PTR_ERR(buffer);
  465. /* weak reference should be all what we need */
  466. pair->back = back;
  467. pair->buffer = buffer;
  468. return devm_add_action_or_reset(dev, iio_backend_free_buffer, pair);
  469. }
  470. EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, IIO_BACKEND);
  471. /**
  472. * iio_backend_read_raw - Read a channel attribute from a backend device.
  473. * @back: Backend device
  474. * @chan: IIO channel reference
  475. * @val: First returned value
  476. * @val2: Second returned value
  477. * @mask: Specify the attribute to return
  478. *
  479. * RETURNS:
  480. * 0 on success, negative error number on failure.
  481. */
  482. int iio_backend_read_raw(struct iio_backend *back,
  483. struct iio_chan_spec const *chan, int *val, int *val2,
  484. long mask)
  485. {
  486. return iio_backend_op_call(back, read_raw, chan, val, val2, mask);
  487. }
  488. EXPORT_SYMBOL_NS_GPL(iio_backend_read_raw, IIO_BACKEND);
  489. static struct iio_backend *iio_backend_from_indio_dev_parent(const struct device *dev)
  490. {
  491. struct iio_backend *back = ERR_PTR(-ENODEV), *iter;
  492. /*
  493. * We deliberately go through all backends even after finding a match.
  494. * The reason is that we want to catch frontend devices which have more
  495. * than one backend in which case returning the first we find is bogus.
  496. * For those cases, frontends need to explicitly define
  497. * get_iio_backend() in struct iio_info.
  498. */
  499. guard(mutex)(&iio_back_lock);
  500. list_for_each_entry(iter, &iio_back_list, entry) {
  501. if (dev == iter->frontend_dev) {
  502. if (!IS_ERR(back)) {
  503. dev_warn(dev,
  504. "Multiple backends! get_iio_backend() needs to be implemented");
  505. return ERR_PTR(-ENODEV);
  506. }
  507. back = iter;
  508. }
  509. }
  510. return back;
  511. }
  512. /**
  513. * iio_backend_ext_info_get - IIO ext_info read callback
  514. * @indio_dev: IIO device
  515. * @private: Data private to the driver
  516. * @chan: IIO channel
  517. * @buf: Buffer where to place the attribute data
  518. *
  519. * This helper is intended to be used by backends that extend an IIO channel
  520. * (through iio_backend_extend_chan_spec()) with extended info. In that case,
  521. * backends are not supposed to give their own callbacks (as they would not have
  522. * a way to get the backend from indio_dev). This is the getter.
  523. *
  524. * RETURNS:
  525. * Number of bytes written to buf, negative error number on failure.
  526. */
  527. ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private,
  528. const struct iio_chan_spec *chan, char *buf)
  529. {
  530. struct iio_backend *back;
  531. /*
  532. * The below should work for the majority of the cases. It will not work
  533. * when one frontend has multiple backends in which case we'll need a
  534. * new callback in struct iio_info so we can directly request the proper
  535. * backend from the frontend. Anyways, let's only introduce new options
  536. * when really needed...
  537. */
  538. back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent);
  539. if (IS_ERR(back))
  540. return PTR_ERR(back);
  541. return iio_backend_op_call(back, ext_info_get, private, chan, buf);
  542. }
  543. EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_get, IIO_BACKEND);
  544. /**
  545. * iio_backend_ext_info_set - IIO ext_info write callback
  546. * @indio_dev: IIO device
  547. * @private: Data private to the driver
  548. * @chan: IIO channel
  549. * @buf: Buffer holding the sysfs attribute
  550. * @len: Buffer length
  551. *
  552. * This helper is intended to be used by backends that extend an IIO channel
  553. * (trough iio_backend_extend_chan_spec()) with extended info. In that case,
  554. * backends are not supposed to give their own callbacks (as they would not have
  555. * a way to get the backend from indio_dev). This is the setter.
  556. *
  557. * RETURNS:
  558. * Buffer length on success, negative error number on failure.
  559. */
  560. ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private,
  561. const struct iio_chan_spec *chan,
  562. const char *buf, size_t len)
  563. {
  564. struct iio_backend *back;
  565. back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent);
  566. if (IS_ERR(back))
  567. return PTR_ERR(back);
  568. return iio_backend_op_call(back, ext_info_set, private, chan, buf, len);
  569. }
  570. EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, IIO_BACKEND);
  571. /**
  572. * iio_backend_extend_chan_spec - Extend an IIO channel
  573. * @back: Backend device
  574. * @chan: IIO channel
  575. *
  576. * Some backends may have their own functionalities and hence capable of
  577. * extending a frontend's channel.
  578. *
  579. * RETURNS:
  580. * 0 on success, negative error number on failure.
  581. */
  582. int iio_backend_extend_chan_spec(struct iio_backend *back,
  583. struct iio_chan_spec *chan)
  584. {
  585. const struct iio_chan_spec_ext_info *frontend_ext_info = chan->ext_info;
  586. const struct iio_chan_spec_ext_info *back_ext_info;
  587. int ret;
  588. ret = iio_backend_op_call(back, extend_chan_spec, chan);
  589. if (ret)
  590. return ret;
  591. /*
  592. * Let's keep things simple for now. Don't allow to overwrite the
  593. * frontend's extended info. If ever needed, we can support appending
  594. * it.
  595. */
  596. if (frontend_ext_info && chan->ext_info != frontend_ext_info)
  597. return -EOPNOTSUPP;
  598. if (!chan->ext_info)
  599. return 0;
  600. /* Don't allow backends to get creative and force their own handlers */
  601. for (back_ext_info = chan->ext_info; back_ext_info->name; back_ext_info++) {
  602. if (back_ext_info->read != iio_backend_ext_info_get)
  603. return -EINVAL;
  604. if (back_ext_info->write != iio_backend_ext_info_set)
  605. return -EINVAL;
  606. }
  607. return 0;
  608. }
  609. EXPORT_SYMBOL_NS_GPL(iio_backend_extend_chan_spec, IIO_BACKEND);
  610. static void iio_backend_release(void *arg)
  611. {
  612. struct iio_backend *back = arg;
  613. module_put(back->owner);
  614. }
  615. static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back)
  616. {
  617. struct device_link *link;
  618. int ret;
  619. /*
  620. * Make sure the provider cannot be unloaded before the consumer module.
  621. * Note that device_links would still guarantee that nothing is
  622. * accessible (and breaks) but this makes it explicit that the consumer
  623. * module must be also unloaded.
  624. */
  625. if (!try_module_get(back->owner))
  626. return dev_err_probe(dev, -ENODEV,
  627. "Cannot get module reference\n");
  628. ret = devm_add_action_or_reset(dev, iio_backend_release, back);
  629. if (ret)
  630. return ret;
  631. link = device_link_add(dev, back->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
  632. if (!link)
  633. return dev_err_probe(dev, -EINVAL,
  634. "Could not link to supplier(%s)\n",
  635. dev_name(back->dev));
  636. back->frontend_dev = dev;
  637. dev_dbg(dev, "Found backend(%s) device\n", dev_name(back->dev));
  638. return 0;
  639. }
  640. static struct iio_backend *__devm_iio_backend_fwnode_get(struct device *dev, const char *name,
  641. struct fwnode_handle *fwnode)
  642. {
  643. struct fwnode_handle *fwnode_back;
  644. struct iio_backend *back;
  645. unsigned int index;
  646. int ret;
  647. if (name) {
  648. ret = device_property_match_string(dev, "io-backend-names",
  649. name);
  650. if (ret < 0)
  651. return ERR_PTR(ret);
  652. index = ret;
  653. } else {
  654. index = 0;
  655. }
  656. fwnode_back = fwnode_find_reference(fwnode, "io-backends", index);
  657. if (IS_ERR(fwnode_back))
  658. return dev_err_cast_probe(dev, fwnode_back,
  659. "Cannot get Firmware reference\n");
  660. guard(mutex)(&iio_back_lock);
  661. list_for_each_entry(back, &iio_back_list, entry) {
  662. if (!device_match_fwnode(back->dev, fwnode_back))
  663. continue;
  664. fwnode_handle_put(fwnode_back);
  665. ret = __devm_iio_backend_get(dev, back);
  666. if (ret)
  667. return ERR_PTR(ret);
  668. if (name)
  669. back->idx = index;
  670. return back;
  671. }
  672. fwnode_handle_put(fwnode_back);
  673. return ERR_PTR(-EPROBE_DEFER);
  674. }
  675. /**
  676. * devm_iio_backend_get - Device managed backend device get
  677. * @dev: Consumer device for the backend
  678. * @name: Backend name
  679. *
  680. * Get's the backend associated with @dev.
  681. *
  682. * RETURNS:
  683. * A backend pointer, negative error pointer otherwise.
  684. */
  685. struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name)
  686. {
  687. return __devm_iio_backend_fwnode_get(dev, name, dev_fwnode(dev));
  688. }
  689. EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, IIO_BACKEND);
  690. /**
  691. * devm_iio_backend_fwnode_get - Device managed backend firmware node get
  692. * @dev: Consumer device for the backend
  693. * @name: Backend name
  694. * @fwnode: Firmware node of the backend consumer
  695. *
  696. * Get's the backend associated with a firmware node.
  697. *
  698. * RETURNS:
  699. * A backend pointer, negative error pointer otherwise.
  700. */
  701. struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev,
  702. const char *name,
  703. struct fwnode_handle *fwnode)
  704. {
  705. return __devm_iio_backend_fwnode_get(dev, name, fwnode);
  706. }
  707. EXPORT_SYMBOL_NS_GPL(devm_iio_backend_fwnode_get, IIO_BACKEND);
  708. /**
  709. * __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get
  710. * @dev: Consumer device for the backend
  711. * @fwnode: Firmware node of the backend device
  712. *
  713. * Search the backend list for a device matching @fwnode.
  714. * This API should not be used and it's only present for preventing the first
  715. * user of this framework to break it's DT ABI.
  716. *
  717. * RETURNS:
  718. * A backend pointer, negative error pointer otherwise.
  719. */
  720. struct iio_backend *
  721. __devm_iio_backend_get_from_fwnode_lookup(struct device *dev,
  722. struct fwnode_handle *fwnode)
  723. {
  724. struct iio_backend *back;
  725. int ret;
  726. guard(mutex)(&iio_back_lock);
  727. list_for_each_entry(back, &iio_back_list, entry) {
  728. if (!device_match_fwnode(back->dev, fwnode))
  729. continue;
  730. ret = __devm_iio_backend_get(dev, back);
  731. if (ret)
  732. return ERR_PTR(ret);
  733. return back;
  734. }
  735. return ERR_PTR(-EPROBE_DEFER);
  736. }
  737. EXPORT_SYMBOL_NS_GPL(__devm_iio_backend_get_from_fwnode_lookup, IIO_BACKEND);
  738. /**
  739. * iio_backend_get_priv - Get driver private data
  740. * @back: Backend device
  741. */
  742. void *iio_backend_get_priv(const struct iio_backend *back)
  743. {
  744. return back->priv;
  745. }
  746. EXPORT_SYMBOL_NS_GPL(iio_backend_get_priv, IIO_BACKEND);
  747. static void iio_backend_unregister(void *arg)
  748. {
  749. struct iio_backend *back = arg;
  750. guard(mutex)(&iio_back_lock);
  751. list_del(&back->entry);
  752. }
  753. /**
  754. * devm_iio_backend_register - Device managed backend device register
  755. * @dev: Backend device being registered
  756. * @info: Backend info
  757. * @priv: Device private data
  758. *
  759. * @info is mandatory. Not providing it results in -EINVAL.
  760. *
  761. * RETURNS:
  762. * 0 on success, negative error number on failure.
  763. */
  764. int devm_iio_backend_register(struct device *dev,
  765. const struct iio_backend_info *info, void *priv)
  766. {
  767. struct iio_backend *back;
  768. if (!info || !info->ops)
  769. return dev_err_probe(dev, -EINVAL, "No backend ops given\n");
  770. /*
  771. * Through device_links, we guarantee that a frontend device cannot be
  772. * bound/exist if the backend driver is not around. Hence, we can bind
  773. * the backend object lifetime with the device being passed since
  774. * removing it will tear the frontend/consumer down.
  775. */
  776. back = devm_kzalloc(dev, sizeof(*back), GFP_KERNEL);
  777. if (!back)
  778. return -ENOMEM;
  779. back->ops = info->ops;
  780. back->name = info->name;
  781. back->owner = dev->driver->owner;
  782. back->dev = dev;
  783. back->priv = priv;
  784. scoped_guard(mutex, &iio_back_lock)
  785. list_add(&back->entry, &iio_back_list);
  786. return devm_add_action_or_reset(dev, iio_backend_unregister, back);
  787. }
  788. EXPORT_SYMBOL_NS_GPL(devm_iio_backend_register, IIO_BACKEND);
  789. MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
  790. MODULE_DESCRIPTION("Framework to handle complex IIO aggregate devices");
  791. MODULE_LICENSE("GPL");