inkern.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* The industrial I/O core in kernel channel mapping
  3. *
  4. * Copyright (c) 2011 Jonathan Cameron
  5. */
  6. #include <linux/cleanup.h>
  7. #include <linux/err.h>
  8. #include <linux/export.h>
  9. #include <linux/minmax.h>
  10. #include <linux/mutex.h>
  11. #include <linux/property.h>
  12. #include <linux/slab.h>
  13. #include <linux/units.h>
  14. #include <linux/iio/iio.h>
  15. #include <linux/iio/iio-opaque.h>
  16. #include "iio_core.h"
  17. #include <linux/iio/machine.h>
  18. #include <linux/iio/driver.h>
  19. #include <linux/iio/consumer.h>
  20. struct iio_map_internal {
  21. struct iio_dev *indio_dev;
  22. struct iio_map *map;
  23. struct list_head l;
  24. };
  25. static LIST_HEAD(iio_map_list);
  26. static DEFINE_MUTEX(iio_map_list_lock);
  27. static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
  28. {
  29. int ret = -ENODEV;
  30. struct iio_map_internal *mapi, *next;
  31. list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
  32. if (indio_dev == mapi->indio_dev) {
  33. list_del(&mapi->l);
  34. kfree(mapi);
  35. ret = 0;
  36. }
  37. }
  38. return ret;
  39. }
  40. int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
  41. {
  42. struct iio_map_internal *mapi;
  43. int i = 0;
  44. int ret;
  45. if (!maps)
  46. return 0;
  47. guard(mutex)(&iio_map_list_lock);
  48. while (maps[i].consumer_dev_name) {
  49. mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
  50. if (!mapi) {
  51. ret = -ENOMEM;
  52. goto error_ret;
  53. }
  54. mapi->map = &maps[i];
  55. mapi->indio_dev = indio_dev;
  56. list_add_tail(&mapi->l, &iio_map_list);
  57. i++;
  58. }
  59. return 0;
  60. error_ret:
  61. iio_map_array_unregister_locked(indio_dev);
  62. return ret;
  63. }
  64. EXPORT_SYMBOL_GPL(iio_map_array_register);
  65. /*
  66. * Remove all map entries associated with the given iio device
  67. */
  68. int iio_map_array_unregister(struct iio_dev *indio_dev)
  69. {
  70. guard(mutex)(&iio_map_list_lock);
  71. return iio_map_array_unregister_locked(indio_dev);
  72. }
  73. EXPORT_SYMBOL_GPL(iio_map_array_unregister);
  74. static void iio_map_array_unregister_cb(void *indio_dev)
  75. {
  76. iio_map_array_unregister(indio_dev);
  77. }
  78. int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps)
  79. {
  80. int ret;
  81. ret = iio_map_array_register(indio_dev, maps);
  82. if (ret)
  83. return ret;
  84. return devm_add_action_or_reset(dev, iio_map_array_unregister_cb, indio_dev);
  85. }
  86. EXPORT_SYMBOL_GPL(devm_iio_map_array_register);
  87. static const struct iio_chan_spec
  88. *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
  89. {
  90. int i;
  91. const struct iio_chan_spec *chan = NULL;
  92. for (i = 0; i < indio_dev->num_channels; i++)
  93. if (indio_dev->channels[i].datasheet_name &&
  94. strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
  95. chan = &indio_dev->channels[i];
  96. break;
  97. }
  98. return chan;
  99. }
  100. /**
  101. * __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index
  102. * @indio_dev: pointer to the iio_dev structure
  103. * @iiospec: IIO specifier as found in the device tree
  104. *
  105. * This is simple translation function, suitable for the most 1:1 mapped
  106. * channels in IIO chips. This function performs only one sanity check:
  107. * whether IIO index is less than num_channels (that is specified in the
  108. * iio_dev).
  109. */
  110. static int __fwnode_iio_simple_xlate(struct iio_dev *indio_dev,
  111. const struct fwnode_reference_args *iiospec)
  112. {
  113. if (!iiospec->nargs)
  114. return 0;
  115. if (iiospec->args[0] >= indio_dev->num_channels) {
  116. dev_err(&indio_dev->dev, "invalid channel index %llu\n",
  117. iiospec->args[0]);
  118. return -EINVAL;
  119. }
  120. return iiospec->args[0];
  121. }
  122. static int __fwnode_iio_channel_get(struct iio_channel *channel,
  123. struct fwnode_handle *fwnode, int index)
  124. {
  125. struct fwnode_reference_args iiospec;
  126. struct device *idev;
  127. struct iio_dev *indio_dev;
  128. int err;
  129. err = fwnode_property_get_reference_args(fwnode, "io-channels",
  130. "#io-channel-cells", 0,
  131. index, &iiospec);
  132. if (err)
  133. return err;
  134. idev = bus_find_device_by_fwnode(&iio_bus_type, iiospec.fwnode);
  135. if (!idev) {
  136. fwnode_handle_put(iiospec.fwnode);
  137. return -EPROBE_DEFER;
  138. }
  139. indio_dev = dev_to_iio_dev(idev);
  140. channel->indio_dev = indio_dev;
  141. if (indio_dev->info->fwnode_xlate)
  142. index = indio_dev->info->fwnode_xlate(indio_dev, &iiospec);
  143. else
  144. index = __fwnode_iio_simple_xlate(indio_dev, &iiospec);
  145. fwnode_handle_put(iiospec.fwnode);
  146. if (index < 0)
  147. goto err_put;
  148. channel->channel = &indio_dev->channels[index];
  149. return 0;
  150. err_put:
  151. iio_device_put(indio_dev);
  152. return index;
  153. }
  154. static struct iio_channel *fwnode_iio_channel_get(struct fwnode_handle *fwnode,
  155. int index)
  156. {
  157. int err;
  158. if (index < 0)
  159. return ERR_PTR(-EINVAL);
  160. struct iio_channel *channel __free(kfree) =
  161. kzalloc(sizeof(*channel), GFP_KERNEL);
  162. if (!channel)
  163. return ERR_PTR(-ENOMEM);
  164. err = __fwnode_iio_channel_get(channel, fwnode, index);
  165. if (err)
  166. return ERR_PTR(err);
  167. return_ptr(channel);
  168. }
  169. static struct iio_channel *
  170. __fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, const char *name)
  171. {
  172. struct iio_channel *chan;
  173. int index = 0;
  174. /*
  175. * For named iio channels, first look up the name in the
  176. * "io-channel-names" property. If it cannot be found, the
  177. * index will be an error code, and fwnode_iio_channel_get()
  178. * will fail.
  179. */
  180. if (name)
  181. index = fwnode_property_match_string(fwnode, "io-channel-names",
  182. name);
  183. chan = fwnode_iio_channel_get(fwnode, index);
  184. if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
  185. return chan;
  186. if (name) {
  187. if (index >= 0) {
  188. pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n",
  189. fwnode, name, index);
  190. /*
  191. * In this case, we found 'name' in 'io-channel-names'
  192. * but somehow we still fail so that we should not proceed
  193. * with any other lookup. Hence, explicitly return -EINVAL
  194. * (maybe not the better error code) so that the caller
  195. * won't do a system lookup.
  196. */
  197. return ERR_PTR(-EINVAL);
  198. }
  199. /*
  200. * If index < 0, then fwnode_property_get_reference_args() fails
  201. * with -EINVAL or -ENOENT (ACPI case) which is expected. We
  202. * should not proceed if we get any other error.
  203. */
  204. if (PTR_ERR(chan) != -EINVAL && PTR_ERR(chan) != -ENOENT)
  205. return chan;
  206. } else if (PTR_ERR(chan) != -ENOENT) {
  207. /*
  208. * if !name, then we should only proceed the lookup if
  209. * fwnode_property_get_reference_args() returns -ENOENT.
  210. */
  211. return chan;
  212. }
  213. /* so we continue the lookup */
  214. return ERR_PTR(-ENODEV);
  215. }
  216. struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
  217. const char *name)
  218. {
  219. struct fwnode_handle *parent;
  220. struct iio_channel *chan;
  221. /* Walk up the tree of devices looking for a matching iio channel */
  222. chan = __fwnode_iio_channel_get_by_name(fwnode, name);
  223. if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV)
  224. return chan;
  225. /*
  226. * No matching IIO channel found on this node.
  227. * If the parent node has a "io-channel-ranges" property,
  228. * then we can try one of its channels.
  229. */
  230. fwnode_for_each_parent_node(fwnode, parent) {
  231. if (!fwnode_property_present(parent, "io-channel-ranges")) {
  232. fwnode_handle_put(parent);
  233. return ERR_PTR(-ENODEV);
  234. }
  235. chan = __fwnode_iio_channel_get_by_name(parent, name);
  236. if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) {
  237. fwnode_handle_put(parent);
  238. return chan;
  239. }
  240. }
  241. return ERR_PTR(-ENODEV);
  242. }
  243. EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name);
  244. static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev)
  245. {
  246. struct fwnode_handle *fwnode = dev_fwnode(dev);
  247. int i, mapind, nummaps = 0;
  248. int ret;
  249. do {
  250. ret = fwnode_property_get_reference_args(fwnode, "io-channels",
  251. "#io-channel-cells", 0,
  252. nummaps, NULL);
  253. if (ret < 0)
  254. break;
  255. } while (++nummaps);
  256. if (nummaps == 0)
  257. return ERR_PTR(-ENODEV);
  258. /* NULL terminated array to save passing size */
  259. struct iio_channel *chans __free(kfree) =
  260. kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
  261. if (!chans)
  262. return ERR_PTR(-ENOMEM);
  263. /* Search for FW matches */
  264. for (mapind = 0; mapind < nummaps; mapind++) {
  265. ret = __fwnode_iio_channel_get(&chans[mapind], fwnode, mapind);
  266. if (ret)
  267. goto error_free_chans;
  268. }
  269. return_ptr(chans);
  270. error_free_chans:
  271. for (i = 0; i < mapind; i++)
  272. iio_device_put(chans[i].indio_dev);
  273. return ERR_PTR(ret);
  274. }
  275. static struct iio_channel *iio_channel_get_sys(const char *name,
  276. const char *channel_name)
  277. {
  278. struct iio_map_internal *c_i = NULL, *c = NULL;
  279. int err;
  280. if (!(name || channel_name))
  281. return ERR_PTR(-ENODEV);
  282. /* first find matching entry the channel map */
  283. scoped_guard(mutex, &iio_map_list_lock) {
  284. list_for_each_entry(c_i, &iio_map_list, l) {
  285. if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
  286. (channel_name &&
  287. strcmp(channel_name, c_i->map->consumer_channel) != 0))
  288. continue;
  289. c = c_i;
  290. iio_device_get(c->indio_dev);
  291. break;
  292. }
  293. }
  294. if (!c)
  295. return ERR_PTR(-ENODEV);
  296. struct iio_channel *channel __free(kfree) =
  297. kzalloc(sizeof(*channel), GFP_KERNEL);
  298. if (!channel) {
  299. err = -ENOMEM;
  300. goto error_no_mem;
  301. }
  302. channel->indio_dev = c->indio_dev;
  303. if (c->map->adc_channel_label) {
  304. channel->channel =
  305. iio_chan_spec_from_name(channel->indio_dev,
  306. c->map->adc_channel_label);
  307. if (!channel->channel) {
  308. err = -EINVAL;
  309. goto error_no_mem;
  310. }
  311. }
  312. return_ptr(channel);
  313. error_no_mem:
  314. iio_device_put(c->indio_dev);
  315. return ERR_PTR(err);
  316. }
  317. struct iio_channel *iio_channel_get(struct device *dev,
  318. const char *channel_name)
  319. {
  320. const char *name = dev ? dev_name(dev) : NULL;
  321. struct iio_channel *channel;
  322. if (dev) {
  323. channel = fwnode_iio_channel_get_by_name(dev_fwnode(dev),
  324. channel_name);
  325. if (!IS_ERR(channel) || PTR_ERR(channel) != -ENODEV)
  326. return channel;
  327. }
  328. return iio_channel_get_sys(name, channel_name);
  329. }
  330. EXPORT_SYMBOL_GPL(iio_channel_get);
  331. void iio_channel_release(struct iio_channel *channel)
  332. {
  333. if (!channel)
  334. return;
  335. iio_device_put(channel->indio_dev);
  336. kfree(channel);
  337. }
  338. EXPORT_SYMBOL_GPL(iio_channel_release);
  339. static void devm_iio_channel_free(void *iio_channel)
  340. {
  341. iio_channel_release(iio_channel);
  342. }
  343. struct iio_channel *devm_iio_channel_get(struct device *dev,
  344. const char *channel_name)
  345. {
  346. struct iio_channel *channel;
  347. int ret;
  348. channel = iio_channel_get(dev, channel_name);
  349. if (IS_ERR(channel))
  350. return channel;
  351. ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
  352. if (ret)
  353. return ERR_PTR(ret);
  354. return channel;
  355. }
  356. EXPORT_SYMBOL_GPL(devm_iio_channel_get);
  357. struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
  358. struct fwnode_handle *fwnode,
  359. const char *channel_name)
  360. {
  361. struct iio_channel *channel;
  362. int ret;
  363. channel = fwnode_iio_channel_get_by_name(fwnode, channel_name);
  364. if (IS_ERR(channel))
  365. return channel;
  366. ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
  367. if (ret)
  368. return ERR_PTR(ret);
  369. return channel;
  370. }
  371. EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name);
  372. struct iio_channel *iio_channel_get_all(struct device *dev)
  373. {
  374. const char *name;
  375. struct iio_map_internal *c = NULL;
  376. struct iio_channel *fw_chans;
  377. int nummaps = 0;
  378. int mapind = 0;
  379. int i, ret;
  380. if (!dev)
  381. return ERR_PTR(-EINVAL);
  382. fw_chans = fwnode_iio_channel_get_all(dev);
  383. /*
  384. * We only want to carry on if the error is -ENODEV. Anything else
  385. * should be reported up the stack.
  386. */
  387. if (!IS_ERR(fw_chans) || PTR_ERR(fw_chans) != -ENODEV)
  388. return fw_chans;
  389. name = dev_name(dev);
  390. guard(mutex)(&iio_map_list_lock);
  391. /* first count the matching maps */
  392. list_for_each_entry(c, &iio_map_list, l)
  393. if (name && strcmp(name, c->map->consumer_dev_name) != 0)
  394. continue;
  395. else
  396. nummaps++;
  397. if (nummaps == 0)
  398. return ERR_PTR(-ENODEV);
  399. /* NULL terminated array to save passing size */
  400. struct iio_channel *chans __free(kfree) =
  401. kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
  402. if (!chans)
  403. return ERR_PTR(-ENOMEM);
  404. /* for each map fill in the chans element */
  405. list_for_each_entry(c, &iio_map_list, l) {
  406. if (name && strcmp(name, c->map->consumer_dev_name) != 0)
  407. continue;
  408. chans[mapind].indio_dev = c->indio_dev;
  409. chans[mapind].data = c->map->consumer_data;
  410. chans[mapind].channel =
  411. iio_chan_spec_from_name(chans[mapind].indio_dev,
  412. c->map->adc_channel_label);
  413. if (!chans[mapind].channel) {
  414. ret = -EINVAL;
  415. goto error_free_chans;
  416. }
  417. iio_device_get(chans[mapind].indio_dev);
  418. mapind++;
  419. }
  420. if (mapind == 0) {
  421. ret = -ENODEV;
  422. goto error_free_chans;
  423. }
  424. return_ptr(chans);
  425. error_free_chans:
  426. for (i = 0; i < mapind; i++)
  427. iio_device_put(chans[i].indio_dev);
  428. return ERR_PTR(ret);
  429. }
  430. EXPORT_SYMBOL_GPL(iio_channel_get_all);
  431. void iio_channel_release_all(struct iio_channel *channels)
  432. {
  433. struct iio_channel *chan = &channels[0];
  434. while (chan->indio_dev) {
  435. iio_device_put(chan->indio_dev);
  436. chan++;
  437. }
  438. kfree(channels);
  439. }
  440. EXPORT_SYMBOL_GPL(iio_channel_release_all);
  441. static void devm_iio_channel_free_all(void *iio_channels)
  442. {
  443. iio_channel_release_all(iio_channels);
  444. }
  445. struct iio_channel *devm_iio_channel_get_all(struct device *dev)
  446. {
  447. struct iio_channel *channels;
  448. int ret;
  449. channels = iio_channel_get_all(dev);
  450. if (IS_ERR(channels))
  451. return channels;
  452. ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all,
  453. channels);
  454. if (ret)
  455. return ERR_PTR(ret);
  456. return channels;
  457. }
  458. EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
  459. static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
  460. enum iio_chan_info_enum info)
  461. {
  462. const struct iio_info *iio_info = chan->indio_dev->info;
  463. int unused;
  464. int vals[INDIO_MAX_RAW_ELEMENTS];
  465. int ret;
  466. int val_len = 2;
  467. if (!val2)
  468. val2 = &unused;
  469. if (!iio_channel_has_info(chan->channel, info))
  470. return -EINVAL;
  471. if (iio_info->read_raw_multi) {
  472. ret = iio_info->read_raw_multi(chan->indio_dev,
  473. chan->channel,
  474. INDIO_MAX_RAW_ELEMENTS,
  475. vals, &val_len, info);
  476. *val = vals[0];
  477. *val2 = vals[1];
  478. } else if (iio_info->read_raw) {
  479. ret = iio_info->read_raw(chan->indio_dev,
  480. chan->channel, val, val2, info);
  481. } else {
  482. return -EINVAL;
  483. }
  484. return ret;
  485. }
  486. int iio_read_channel_raw(struct iio_channel *chan, int *val)
  487. {
  488. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  489. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  490. if (!chan->indio_dev->info)
  491. return -ENODEV;
  492. return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
  493. }
  494. EXPORT_SYMBOL_GPL(iio_read_channel_raw);
  495. int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
  496. {
  497. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  498. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  499. if (!chan->indio_dev->info)
  500. return -ENODEV;
  501. return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
  502. }
  503. EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
  504. static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
  505. int raw, int *processed,
  506. unsigned int scale)
  507. {
  508. int scale_type, scale_val, scale_val2;
  509. int offset_type, offset_val, offset_val2;
  510. s64 denominator, raw64 = raw;
  511. offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
  512. IIO_CHAN_INFO_OFFSET);
  513. if (offset_type >= 0) {
  514. switch (offset_type) {
  515. case IIO_VAL_INT:
  516. break;
  517. case IIO_VAL_INT_PLUS_MICRO:
  518. case IIO_VAL_INT_PLUS_NANO:
  519. /*
  520. * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
  521. * implicitely truncate the offset to it's integer form.
  522. */
  523. break;
  524. case IIO_VAL_FRACTIONAL:
  525. offset_val /= offset_val2;
  526. break;
  527. case IIO_VAL_FRACTIONAL_LOG2:
  528. offset_val >>= offset_val2;
  529. break;
  530. default:
  531. return -EINVAL;
  532. }
  533. raw64 += offset_val;
  534. }
  535. scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
  536. IIO_CHAN_INFO_SCALE);
  537. if (scale_type < 0) {
  538. /*
  539. * If no channel scaling is available apply consumer scale to
  540. * raw value and return.
  541. */
  542. *processed = raw64 * scale;
  543. return 0;
  544. }
  545. switch (scale_type) {
  546. case IIO_VAL_INT:
  547. *processed = raw64 * scale_val * scale;
  548. break;
  549. case IIO_VAL_INT_PLUS_MICRO:
  550. case IIO_VAL_INT_PLUS_NANO:
  551. switch (scale_type) {
  552. case IIO_VAL_INT_PLUS_MICRO:
  553. denominator = MICRO;
  554. break;
  555. case IIO_VAL_INT_PLUS_NANO:
  556. denominator = NANO;
  557. break;
  558. }
  559. *processed = raw64 * scale * abs(scale_val);
  560. *processed += div_s64(raw64 * scale * abs(scale_val2), denominator);
  561. if (scale_val < 0 || scale_val2 < 0)
  562. *processed *= -1;
  563. break;
  564. case IIO_VAL_FRACTIONAL:
  565. *processed = div_s64(raw64 * (s64)scale_val * scale,
  566. scale_val2);
  567. break;
  568. case IIO_VAL_FRACTIONAL_LOG2:
  569. *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
  570. break;
  571. default:
  572. return -EINVAL;
  573. }
  574. return 0;
  575. }
  576. int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
  577. int *processed, unsigned int scale)
  578. {
  579. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  580. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  581. if (!chan->indio_dev->info)
  582. return -ENODEV;
  583. return iio_convert_raw_to_processed_unlocked(chan, raw, processed,
  584. scale);
  585. }
  586. EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
  587. int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
  588. enum iio_chan_info_enum attribute)
  589. {
  590. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  591. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  592. if (!chan->indio_dev->info)
  593. return -ENODEV;
  594. return iio_channel_read(chan, val, val2, attribute);
  595. }
  596. EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
  597. int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
  598. {
  599. return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
  600. }
  601. EXPORT_SYMBOL_GPL(iio_read_channel_offset);
  602. int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
  603. unsigned int scale)
  604. {
  605. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  606. int ret;
  607. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  608. if (!chan->indio_dev->info)
  609. return -ENODEV;
  610. if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
  611. ret = iio_channel_read(chan, val, NULL,
  612. IIO_CHAN_INFO_PROCESSED);
  613. if (ret < 0)
  614. return ret;
  615. *val *= scale;
  616. return ret;
  617. } else {
  618. ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
  619. if (ret < 0)
  620. return ret;
  621. return iio_convert_raw_to_processed_unlocked(chan, *val, val,
  622. scale);
  623. }
  624. }
  625. EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
  626. int iio_read_channel_processed(struct iio_channel *chan, int *val)
  627. {
  628. /* This is just a special case with scale factor 1 */
  629. return iio_read_channel_processed_scale(chan, val, 1);
  630. }
  631. EXPORT_SYMBOL_GPL(iio_read_channel_processed);
  632. int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
  633. {
  634. return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
  635. }
  636. EXPORT_SYMBOL_GPL(iio_read_channel_scale);
  637. static int iio_channel_read_avail(struct iio_channel *chan,
  638. const int **vals, int *type, int *length,
  639. enum iio_chan_info_enum info)
  640. {
  641. const struct iio_info *iio_info = chan->indio_dev->info;
  642. if (!iio_channel_has_available(chan->channel, info))
  643. return -EINVAL;
  644. if (iio_info->read_avail)
  645. return iio_info->read_avail(chan->indio_dev, chan->channel,
  646. vals, type, length, info);
  647. return -EINVAL;
  648. }
  649. int iio_read_avail_channel_attribute(struct iio_channel *chan,
  650. const int **vals, int *type, int *length,
  651. enum iio_chan_info_enum attribute)
  652. {
  653. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  654. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  655. if (!chan->indio_dev->info)
  656. return -ENODEV;
  657. return iio_channel_read_avail(chan, vals, type, length, attribute);
  658. }
  659. EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
  660. int iio_read_avail_channel_raw(struct iio_channel *chan,
  661. const int **vals, int *length)
  662. {
  663. int ret;
  664. int type;
  665. ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
  666. IIO_CHAN_INFO_RAW);
  667. if (ret >= 0 && type != IIO_VAL_INT)
  668. /* raw values are assumed to be IIO_VAL_INT */
  669. ret = -EINVAL;
  670. return ret;
  671. }
  672. EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
  673. static int iio_channel_read_max(struct iio_channel *chan,
  674. int *val, int *val2, int *type,
  675. enum iio_chan_info_enum info)
  676. {
  677. const int *vals;
  678. int length;
  679. int ret;
  680. ret = iio_channel_read_avail(chan, &vals, type, &length, info);
  681. if (ret < 0)
  682. return ret;
  683. switch (ret) {
  684. case IIO_AVAIL_RANGE:
  685. switch (*type) {
  686. case IIO_VAL_INT:
  687. *val = vals[2];
  688. break;
  689. default:
  690. *val = vals[4];
  691. if (val2)
  692. *val2 = vals[5];
  693. }
  694. return 0;
  695. case IIO_AVAIL_LIST:
  696. if (length <= 0)
  697. return -EINVAL;
  698. switch (*type) {
  699. case IIO_VAL_INT:
  700. *val = max_array(vals, length);
  701. break;
  702. default:
  703. /* TODO: learn about max for other iio values */
  704. return -EINVAL;
  705. }
  706. return 0;
  707. default:
  708. return -EINVAL;
  709. }
  710. }
  711. int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
  712. {
  713. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  714. int type;
  715. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  716. if (!chan->indio_dev->info)
  717. return -ENODEV;
  718. return iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
  719. }
  720. EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
  721. static int iio_channel_read_min(struct iio_channel *chan,
  722. int *val, int *val2, int *type,
  723. enum iio_chan_info_enum info)
  724. {
  725. const int *vals;
  726. int length;
  727. int ret;
  728. ret = iio_channel_read_avail(chan, &vals, type, &length, info);
  729. if (ret < 0)
  730. return ret;
  731. switch (ret) {
  732. case IIO_AVAIL_RANGE:
  733. switch (*type) {
  734. case IIO_VAL_INT:
  735. *val = vals[0];
  736. break;
  737. default:
  738. *val = vals[0];
  739. if (val2)
  740. *val2 = vals[1];
  741. }
  742. return 0;
  743. case IIO_AVAIL_LIST:
  744. if (length <= 0)
  745. return -EINVAL;
  746. switch (*type) {
  747. case IIO_VAL_INT:
  748. *val = min_array(vals, length);
  749. break;
  750. default:
  751. /* TODO: learn about min for other iio values */
  752. return -EINVAL;
  753. }
  754. return 0;
  755. default:
  756. return -EINVAL;
  757. }
  758. }
  759. int iio_read_min_channel_raw(struct iio_channel *chan, int *val)
  760. {
  761. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  762. int type;
  763. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  764. if (!chan->indio_dev->info)
  765. return -ENODEV;
  766. return iio_channel_read_min(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
  767. }
  768. EXPORT_SYMBOL_GPL(iio_read_min_channel_raw);
  769. int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
  770. {
  771. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  772. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  773. if (!chan->indio_dev->info)
  774. return -ENODEV;
  775. *type = chan->channel->type;
  776. return 0;
  777. }
  778. EXPORT_SYMBOL_GPL(iio_get_channel_type);
  779. static int iio_channel_write(struct iio_channel *chan, int val, int val2,
  780. enum iio_chan_info_enum info)
  781. {
  782. const struct iio_info *iio_info = chan->indio_dev->info;
  783. if (iio_info->write_raw)
  784. return iio_info->write_raw(chan->indio_dev,
  785. chan->channel, val, val2, info);
  786. return -EINVAL;
  787. }
  788. int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
  789. enum iio_chan_info_enum attribute)
  790. {
  791. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  792. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  793. if (!chan->indio_dev->info)
  794. return -ENODEV;
  795. return iio_channel_write(chan, val, val2, attribute);
  796. }
  797. EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
  798. int iio_write_channel_raw(struct iio_channel *chan, int val)
  799. {
  800. return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
  801. }
  802. EXPORT_SYMBOL_GPL(iio_write_channel_raw);
  803. unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
  804. {
  805. const struct iio_chan_spec_ext_info *ext_info;
  806. unsigned int i = 0;
  807. if (!chan->channel->ext_info)
  808. return i;
  809. for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
  810. ++i;
  811. return i;
  812. }
  813. EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
  814. static const struct iio_chan_spec_ext_info *
  815. iio_lookup_ext_info(const struct iio_channel *chan, const char *attr)
  816. {
  817. const struct iio_chan_spec_ext_info *ext_info;
  818. if (!chan->channel->ext_info)
  819. return NULL;
  820. for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
  821. if (!strcmp(attr, ext_info->name))
  822. return ext_info;
  823. }
  824. return NULL;
  825. }
  826. ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
  827. const char *attr, char *buf)
  828. {
  829. const struct iio_chan_spec_ext_info *ext_info;
  830. ext_info = iio_lookup_ext_info(chan, attr);
  831. if (!ext_info)
  832. return -EINVAL;
  833. return ext_info->read(chan->indio_dev, ext_info->private,
  834. chan->channel, buf);
  835. }
  836. EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
  837. ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
  838. const char *buf, size_t len)
  839. {
  840. const struct iio_chan_spec_ext_info *ext_info;
  841. ext_info = iio_lookup_ext_info(chan, attr);
  842. if (!ext_info)
  843. return -EINVAL;
  844. return ext_info->write(chan->indio_dev, ext_info->private,
  845. chan->channel, buf, len);
  846. }
  847. EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
  848. ssize_t iio_read_channel_label(struct iio_channel *chan, char *buf)
  849. {
  850. return do_iio_read_channel_label(chan->indio_dev, chan->channel, buf);
  851. }
  852. EXPORT_SYMBOL_GPL(iio_read_channel_label);