inkern.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* The industrial I/O core in kernel channel mapping
  3. *
  4. * Copyright (c) 2011 Jonathan Cameron
  5. */
  6. #include <linux/cleanup.h>
  7. #include <linux/err.h>
  8. #include <linux/export.h>
  9. #include <linux/minmax.h>
  10. #include <linux/mutex.h>
  11. #include <linux/property.h>
  12. #include <linux/slab.h>
  13. #include <linux/iio/iio.h>
  14. #include <linux/iio/iio-opaque.h>
  15. #include "iio_core.h"
  16. #include <linux/iio/machine.h>
  17. #include <linux/iio/driver.h>
  18. #include <linux/iio/consumer.h>
  19. struct iio_map_internal {
  20. struct iio_dev *indio_dev;
  21. struct iio_map *map;
  22. struct list_head l;
  23. };
  24. static LIST_HEAD(iio_map_list);
  25. static DEFINE_MUTEX(iio_map_list_lock);
  26. static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
  27. {
  28. int ret = -ENODEV;
  29. struct iio_map_internal *mapi, *next;
  30. list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
  31. if (indio_dev == mapi->indio_dev) {
  32. list_del(&mapi->l);
  33. kfree(mapi);
  34. ret = 0;
  35. }
  36. }
  37. return ret;
  38. }
  39. int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
  40. {
  41. struct iio_map_internal *mapi;
  42. int i = 0;
  43. int ret;
  44. if (!maps)
  45. return 0;
  46. guard(mutex)(&iio_map_list_lock);
  47. while (maps[i].consumer_dev_name) {
  48. mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
  49. if (!mapi) {
  50. ret = -ENOMEM;
  51. goto error_ret;
  52. }
  53. mapi->map = &maps[i];
  54. mapi->indio_dev = indio_dev;
  55. list_add_tail(&mapi->l, &iio_map_list);
  56. i++;
  57. }
  58. return 0;
  59. error_ret:
  60. iio_map_array_unregister_locked(indio_dev);
  61. return ret;
  62. }
  63. EXPORT_SYMBOL_GPL(iio_map_array_register);
  64. /*
  65. * Remove all map entries associated with the given iio device
  66. */
  67. int iio_map_array_unregister(struct iio_dev *indio_dev)
  68. {
  69. guard(mutex)(&iio_map_list_lock);
  70. return iio_map_array_unregister_locked(indio_dev);
  71. }
  72. EXPORT_SYMBOL_GPL(iio_map_array_unregister);
  73. static void iio_map_array_unregister_cb(void *indio_dev)
  74. {
  75. iio_map_array_unregister(indio_dev);
  76. }
  77. int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps)
  78. {
  79. int ret;
  80. ret = iio_map_array_register(indio_dev, maps);
  81. if (ret)
  82. return ret;
  83. return devm_add_action_or_reset(dev, iio_map_array_unregister_cb, indio_dev);
  84. }
  85. EXPORT_SYMBOL_GPL(devm_iio_map_array_register);
  86. static const struct iio_chan_spec
  87. *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
  88. {
  89. int i;
  90. const struct iio_chan_spec *chan = NULL;
  91. for (i = 0; i < indio_dev->num_channels; i++)
  92. if (indio_dev->channels[i].datasheet_name &&
  93. strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
  94. chan = &indio_dev->channels[i];
  95. break;
  96. }
  97. return chan;
  98. }
  99. /**
  100. * __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index
  101. * @indio_dev: pointer to the iio_dev structure
  102. * @iiospec: IIO specifier as found in the device tree
  103. *
  104. * This is simple translation function, suitable for the most 1:1 mapped
  105. * channels in IIO chips. This function performs only one sanity check:
  106. * whether IIO index is less than num_channels (that is specified in the
  107. * iio_dev).
  108. */
  109. static int __fwnode_iio_simple_xlate(struct iio_dev *indio_dev,
  110. const struct fwnode_reference_args *iiospec)
  111. {
  112. if (!iiospec->nargs)
  113. return 0;
  114. if (iiospec->args[0] >= indio_dev->num_channels) {
  115. dev_err(&indio_dev->dev, "invalid channel index %llu\n",
  116. iiospec->args[0]);
  117. return -EINVAL;
  118. }
  119. return iiospec->args[0];
  120. }
  121. static int __fwnode_iio_channel_get(struct iio_channel *channel,
  122. struct fwnode_handle *fwnode, int index)
  123. {
  124. struct fwnode_reference_args iiospec;
  125. struct device *idev;
  126. struct iio_dev *indio_dev;
  127. int err;
  128. err = fwnode_property_get_reference_args(fwnode, "io-channels",
  129. "#io-channel-cells", 0,
  130. index, &iiospec);
  131. if (err)
  132. return err;
  133. idev = bus_find_device_by_fwnode(&iio_bus_type, iiospec.fwnode);
  134. if (!idev) {
  135. fwnode_handle_put(iiospec.fwnode);
  136. return -EPROBE_DEFER;
  137. }
  138. indio_dev = dev_to_iio_dev(idev);
  139. channel->indio_dev = indio_dev;
  140. if (indio_dev->info->fwnode_xlate)
  141. index = indio_dev->info->fwnode_xlate(indio_dev, &iiospec);
  142. else
  143. index = __fwnode_iio_simple_xlate(indio_dev, &iiospec);
  144. fwnode_handle_put(iiospec.fwnode);
  145. if (index < 0)
  146. goto err_put;
  147. channel->channel = &indio_dev->channels[index];
  148. return 0;
  149. err_put:
  150. iio_device_put(indio_dev);
  151. return index;
  152. }
  153. static struct iio_channel *fwnode_iio_channel_get(struct fwnode_handle *fwnode,
  154. int index)
  155. {
  156. int err;
  157. if (index < 0)
  158. return ERR_PTR(-EINVAL);
  159. struct iio_channel *channel __free(kfree) =
  160. kzalloc(sizeof(*channel), GFP_KERNEL);
  161. if (!channel)
  162. return ERR_PTR(-ENOMEM);
  163. err = __fwnode_iio_channel_get(channel, fwnode, index);
  164. if (err)
  165. return ERR_PTR(err);
  166. return_ptr(channel);
  167. }
  168. static struct iio_channel *
  169. __fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, const char *name)
  170. {
  171. struct iio_channel *chan;
  172. int index = 0;
  173. /*
  174. * For named iio channels, first look up the name in the
  175. * "io-channel-names" property. If it cannot be found, the
  176. * index will be an error code, and fwnode_iio_channel_get()
  177. * will fail.
  178. */
  179. if (name)
  180. index = fwnode_property_match_string(fwnode, "io-channel-names",
  181. name);
  182. chan = fwnode_iio_channel_get(fwnode, index);
  183. if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
  184. return chan;
  185. if (name) {
  186. if (index >= 0) {
  187. pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n",
  188. fwnode, name, index);
  189. /*
  190. * In this case, we found 'name' in 'io-channel-names'
  191. * but somehow we still fail so that we should not proceed
  192. * with any other lookup. Hence, explicitly return -EINVAL
  193. * (maybe not the better error code) so that the caller
  194. * won't do a system lookup.
  195. */
  196. return ERR_PTR(-EINVAL);
  197. }
  198. /*
  199. * If index < 0, then fwnode_property_get_reference_args() fails
  200. * with -EINVAL or -ENOENT (ACPI case) which is expected. We
  201. * should not proceed if we get any other error.
  202. */
  203. if (PTR_ERR(chan) != -EINVAL && PTR_ERR(chan) != -ENOENT)
  204. return chan;
  205. } else if (PTR_ERR(chan) != -ENOENT) {
  206. /*
  207. * if !name, then we should only proceed the lookup if
  208. * fwnode_property_get_reference_args() returns -ENOENT.
  209. */
  210. return chan;
  211. }
  212. /* so we continue the lookup */
  213. return ERR_PTR(-ENODEV);
  214. }
  215. struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
  216. const char *name)
  217. {
  218. struct fwnode_handle *parent;
  219. struct iio_channel *chan;
  220. /* Walk up the tree of devices looking for a matching iio channel */
  221. chan = __fwnode_iio_channel_get_by_name(fwnode, name);
  222. if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV)
  223. return chan;
  224. /*
  225. * No matching IIO channel found on this node.
  226. * If the parent node has a "io-channel-ranges" property,
  227. * then we can try one of its channels.
  228. */
  229. fwnode_for_each_parent_node(fwnode, parent) {
  230. if (!fwnode_property_present(parent, "io-channel-ranges")) {
  231. fwnode_handle_put(parent);
  232. return ERR_PTR(-ENODEV);
  233. }
  234. chan = __fwnode_iio_channel_get_by_name(parent, name);
  235. if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) {
  236. fwnode_handle_put(parent);
  237. return chan;
  238. }
  239. }
  240. return ERR_PTR(-ENODEV);
  241. }
  242. EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name);
  243. static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev)
  244. {
  245. struct fwnode_handle *fwnode = dev_fwnode(dev);
  246. int i, mapind, nummaps = 0;
  247. int ret;
  248. do {
  249. ret = fwnode_property_get_reference_args(fwnode, "io-channels",
  250. "#io-channel-cells", 0,
  251. nummaps, NULL);
  252. if (ret < 0)
  253. break;
  254. } while (++nummaps);
  255. if (nummaps == 0)
  256. return ERR_PTR(-ENODEV);
  257. /* NULL terminated array to save passing size */
  258. struct iio_channel *chans __free(kfree) =
  259. kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
  260. if (!chans)
  261. return ERR_PTR(-ENOMEM);
  262. /* Search for FW matches */
  263. for (mapind = 0; mapind < nummaps; mapind++) {
  264. ret = __fwnode_iio_channel_get(&chans[mapind], fwnode, mapind);
  265. if (ret)
  266. goto error_free_chans;
  267. }
  268. return_ptr(chans);
  269. error_free_chans:
  270. for (i = 0; i < mapind; i++)
  271. iio_device_put(chans[i].indio_dev);
  272. return ERR_PTR(ret);
  273. }
  274. static struct iio_channel *iio_channel_get_sys(const char *name,
  275. const char *channel_name)
  276. {
  277. struct iio_map_internal *c_i = NULL, *c = NULL;
  278. int err;
  279. if (!(name || channel_name))
  280. return ERR_PTR(-ENODEV);
  281. /* first find matching entry the channel map */
  282. scoped_guard(mutex, &iio_map_list_lock) {
  283. list_for_each_entry(c_i, &iio_map_list, l) {
  284. if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
  285. (channel_name &&
  286. strcmp(channel_name, c_i->map->consumer_channel) != 0))
  287. continue;
  288. c = c_i;
  289. iio_device_get(c->indio_dev);
  290. break;
  291. }
  292. }
  293. if (!c)
  294. return ERR_PTR(-ENODEV);
  295. struct iio_channel *channel __free(kfree) =
  296. kzalloc(sizeof(*channel), GFP_KERNEL);
  297. if (!channel) {
  298. err = -ENOMEM;
  299. goto error_no_mem;
  300. }
  301. channel->indio_dev = c->indio_dev;
  302. if (c->map->adc_channel_label) {
  303. channel->channel =
  304. iio_chan_spec_from_name(channel->indio_dev,
  305. c->map->adc_channel_label);
  306. if (!channel->channel) {
  307. err = -EINVAL;
  308. goto error_no_mem;
  309. }
  310. }
  311. return_ptr(channel);
  312. error_no_mem:
  313. iio_device_put(c->indio_dev);
  314. return ERR_PTR(err);
  315. }
  316. struct iio_channel *iio_channel_get(struct device *dev,
  317. const char *channel_name)
  318. {
  319. const char *name = dev ? dev_name(dev) : NULL;
  320. struct iio_channel *channel;
  321. if (dev) {
  322. channel = fwnode_iio_channel_get_by_name(dev_fwnode(dev),
  323. channel_name);
  324. if (!IS_ERR(channel) || PTR_ERR(channel) != -ENODEV)
  325. return channel;
  326. }
  327. return iio_channel_get_sys(name, channel_name);
  328. }
  329. EXPORT_SYMBOL_GPL(iio_channel_get);
  330. void iio_channel_release(struct iio_channel *channel)
  331. {
  332. if (!channel)
  333. return;
  334. iio_device_put(channel->indio_dev);
  335. kfree(channel);
  336. }
  337. EXPORT_SYMBOL_GPL(iio_channel_release);
  338. static void devm_iio_channel_free(void *iio_channel)
  339. {
  340. iio_channel_release(iio_channel);
  341. }
  342. struct iio_channel *devm_iio_channel_get(struct device *dev,
  343. const char *channel_name)
  344. {
  345. struct iio_channel *channel;
  346. int ret;
  347. channel = iio_channel_get(dev, channel_name);
  348. if (IS_ERR(channel))
  349. return channel;
  350. ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
  351. if (ret)
  352. return ERR_PTR(ret);
  353. return channel;
  354. }
  355. EXPORT_SYMBOL_GPL(devm_iio_channel_get);
  356. struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
  357. struct fwnode_handle *fwnode,
  358. const char *channel_name)
  359. {
  360. struct iio_channel *channel;
  361. int ret;
  362. channel = fwnode_iio_channel_get_by_name(fwnode, channel_name);
  363. if (IS_ERR(channel))
  364. return channel;
  365. ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
  366. if (ret)
  367. return ERR_PTR(ret);
  368. return channel;
  369. }
  370. EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name);
  371. struct iio_channel *iio_channel_get_all(struct device *dev)
  372. {
  373. const char *name;
  374. struct iio_map_internal *c = NULL;
  375. struct iio_channel *fw_chans;
  376. int nummaps = 0;
  377. int mapind = 0;
  378. int i, ret;
  379. if (!dev)
  380. return ERR_PTR(-EINVAL);
  381. fw_chans = fwnode_iio_channel_get_all(dev);
  382. /*
  383. * We only want to carry on if the error is -ENODEV. Anything else
  384. * should be reported up the stack.
  385. */
  386. if (!IS_ERR(fw_chans) || PTR_ERR(fw_chans) != -ENODEV)
  387. return fw_chans;
  388. name = dev_name(dev);
  389. guard(mutex)(&iio_map_list_lock);
  390. /* first count the matching maps */
  391. list_for_each_entry(c, &iio_map_list, l)
  392. if (name && strcmp(name, c->map->consumer_dev_name) != 0)
  393. continue;
  394. else
  395. nummaps++;
  396. if (nummaps == 0)
  397. return ERR_PTR(-ENODEV);
  398. /* NULL terminated array to save passing size */
  399. struct iio_channel *chans __free(kfree) =
  400. kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
  401. if (!chans)
  402. return ERR_PTR(-ENOMEM);
  403. /* for each map fill in the chans element */
  404. list_for_each_entry(c, &iio_map_list, l) {
  405. if (name && strcmp(name, c->map->consumer_dev_name) != 0)
  406. continue;
  407. chans[mapind].indio_dev = c->indio_dev;
  408. chans[mapind].data = c->map->consumer_data;
  409. chans[mapind].channel =
  410. iio_chan_spec_from_name(chans[mapind].indio_dev,
  411. c->map->adc_channel_label);
  412. if (!chans[mapind].channel) {
  413. ret = -EINVAL;
  414. goto error_free_chans;
  415. }
  416. iio_device_get(chans[mapind].indio_dev);
  417. mapind++;
  418. }
  419. if (mapind == 0) {
  420. ret = -ENODEV;
  421. goto error_free_chans;
  422. }
  423. return_ptr(chans);
  424. error_free_chans:
  425. for (i = 0; i < mapind; i++)
  426. iio_device_put(chans[i].indio_dev);
  427. return ERR_PTR(ret);
  428. }
  429. EXPORT_SYMBOL_GPL(iio_channel_get_all);
  430. void iio_channel_release_all(struct iio_channel *channels)
  431. {
  432. struct iio_channel *chan = &channels[0];
  433. while (chan->indio_dev) {
  434. iio_device_put(chan->indio_dev);
  435. chan++;
  436. }
  437. kfree(channels);
  438. }
  439. EXPORT_SYMBOL_GPL(iio_channel_release_all);
  440. static void devm_iio_channel_free_all(void *iio_channels)
  441. {
  442. iio_channel_release_all(iio_channels);
  443. }
  444. struct iio_channel *devm_iio_channel_get_all(struct device *dev)
  445. {
  446. struct iio_channel *channels;
  447. int ret;
  448. channels = iio_channel_get_all(dev);
  449. if (IS_ERR(channels))
  450. return channels;
  451. ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all,
  452. channels);
  453. if (ret)
  454. return ERR_PTR(ret);
  455. return channels;
  456. }
  457. EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
  458. static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
  459. enum iio_chan_info_enum info)
  460. {
  461. const struct iio_info *iio_info = chan->indio_dev->info;
  462. int unused;
  463. int vals[INDIO_MAX_RAW_ELEMENTS];
  464. int ret;
  465. int val_len = 2;
  466. if (!val2)
  467. val2 = &unused;
  468. if (!iio_channel_has_info(chan->channel, info))
  469. return -EINVAL;
  470. if (iio_info->read_raw_multi) {
  471. ret = iio_info->read_raw_multi(chan->indio_dev,
  472. chan->channel,
  473. INDIO_MAX_RAW_ELEMENTS,
  474. vals, &val_len, info);
  475. *val = vals[0];
  476. *val2 = vals[1];
  477. } else if (iio_info->read_raw) {
  478. ret = iio_info->read_raw(chan->indio_dev,
  479. chan->channel, val, val2, info);
  480. } else {
  481. return -EINVAL;
  482. }
  483. return ret;
  484. }
  485. int iio_read_channel_raw(struct iio_channel *chan, int *val)
  486. {
  487. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  488. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  489. if (!chan->indio_dev->info)
  490. return -ENODEV;
  491. return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
  492. }
  493. EXPORT_SYMBOL_GPL(iio_read_channel_raw);
  494. int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
  495. {
  496. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  497. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  498. if (!chan->indio_dev->info)
  499. return -ENODEV;
  500. return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
  501. }
  502. EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
  503. static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
  504. int raw, int *processed,
  505. unsigned int scale)
  506. {
  507. int scale_type, scale_val, scale_val2;
  508. int offset_type, offset_val, offset_val2;
  509. s64 raw64 = raw;
  510. offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
  511. IIO_CHAN_INFO_OFFSET);
  512. if (offset_type >= 0) {
  513. switch (offset_type) {
  514. case IIO_VAL_INT:
  515. break;
  516. case IIO_VAL_INT_PLUS_MICRO:
  517. case IIO_VAL_INT_PLUS_NANO:
  518. /*
  519. * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
  520. * implicitely truncate the offset to it's integer form.
  521. */
  522. break;
  523. case IIO_VAL_FRACTIONAL:
  524. offset_val /= offset_val2;
  525. break;
  526. case IIO_VAL_FRACTIONAL_LOG2:
  527. offset_val >>= offset_val2;
  528. break;
  529. default:
  530. return -EINVAL;
  531. }
  532. raw64 += offset_val;
  533. }
  534. scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
  535. IIO_CHAN_INFO_SCALE);
  536. if (scale_type < 0) {
  537. /*
  538. * If no channel scaling is available apply consumer scale to
  539. * raw value and return.
  540. */
  541. *processed = raw * scale;
  542. return 0;
  543. }
  544. switch (scale_type) {
  545. case IIO_VAL_INT:
  546. *processed = raw64 * scale_val * scale;
  547. break;
  548. case IIO_VAL_INT_PLUS_MICRO:
  549. if (scale_val2 < 0)
  550. *processed = -raw64 * scale_val * scale;
  551. else
  552. *processed = raw64 * scale_val * scale;
  553. *processed += div_s64(raw64 * (s64)scale_val2 * scale,
  554. 1000000LL);
  555. break;
  556. case IIO_VAL_INT_PLUS_NANO:
  557. if (scale_val2 < 0)
  558. *processed = -raw64 * scale_val * scale;
  559. else
  560. *processed = raw64 * scale_val * scale;
  561. *processed += div_s64(raw64 * (s64)scale_val2 * scale,
  562. 1000000000LL);
  563. break;
  564. case IIO_VAL_FRACTIONAL:
  565. *processed = div_s64(raw64 * (s64)scale_val * scale,
  566. scale_val2);
  567. break;
  568. case IIO_VAL_FRACTIONAL_LOG2:
  569. *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
  570. break;
  571. default:
  572. return -EINVAL;
  573. }
  574. return 0;
  575. }
  576. int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
  577. int *processed, unsigned int scale)
  578. {
  579. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  580. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  581. if (!chan->indio_dev->info)
  582. return -ENODEV;
  583. return iio_convert_raw_to_processed_unlocked(chan, raw, processed,
  584. scale);
  585. }
  586. EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
  587. int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
  588. enum iio_chan_info_enum attribute)
  589. {
  590. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  591. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  592. if (!chan->indio_dev->info)
  593. return -ENODEV;
  594. return iio_channel_read(chan, val, val2, attribute);
  595. }
  596. EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
  597. int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
  598. {
  599. return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
  600. }
  601. EXPORT_SYMBOL_GPL(iio_read_channel_offset);
  602. int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
  603. unsigned int scale)
  604. {
  605. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  606. int ret;
  607. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  608. if (!chan->indio_dev->info)
  609. return -ENODEV;
  610. if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
  611. ret = iio_channel_read(chan, val, NULL,
  612. IIO_CHAN_INFO_PROCESSED);
  613. if (ret < 0)
  614. return ret;
  615. *val *= scale;
  616. return ret;
  617. } else {
  618. ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
  619. if (ret < 0)
  620. return ret;
  621. return iio_convert_raw_to_processed_unlocked(chan, *val, val,
  622. scale);
  623. }
  624. }
  625. EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
  626. int iio_read_channel_processed(struct iio_channel *chan, int *val)
  627. {
  628. /* This is just a special case with scale factor 1 */
  629. return iio_read_channel_processed_scale(chan, val, 1);
  630. }
  631. EXPORT_SYMBOL_GPL(iio_read_channel_processed);
  632. int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
  633. {
  634. return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
  635. }
  636. EXPORT_SYMBOL_GPL(iio_read_channel_scale);
  637. static int iio_channel_read_avail(struct iio_channel *chan,
  638. const int **vals, int *type, int *length,
  639. enum iio_chan_info_enum info)
  640. {
  641. const struct iio_info *iio_info = chan->indio_dev->info;
  642. if (!iio_channel_has_available(chan->channel, info))
  643. return -EINVAL;
  644. if (iio_info->read_avail)
  645. return iio_info->read_avail(chan->indio_dev, chan->channel,
  646. vals, type, length, info);
  647. return -EINVAL;
  648. }
  649. int iio_read_avail_channel_attribute(struct iio_channel *chan,
  650. const int **vals, int *type, int *length,
  651. enum iio_chan_info_enum attribute)
  652. {
  653. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  654. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  655. if (!chan->indio_dev->info)
  656. return -ENODEV;
  657. return iio_channel_read_avail(chan, vals, type, length, attribute);
  658. }
  659. EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
  660. int iio_read_avail_channel_raw(struct iio_channel *chan,
  661. const int **vals, int *length)
  662. {
  663. int ret;
  664. int type;
  665. ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
  666. IIO_CHAN_INFO_RAW);
  667. if (ret >= 0 && type != IIO_VAL_INT)
  668. /* raw values are assumed to be IIO_VAL_INT */
  669. ret = -EINVAL;
  670. return ret;
  671. }
  672. EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
  673. static int iio_channel_read_max(struct iio_channel *chan,
  674. int *val, int *val2, int *type,
  675. enum iio_chan_info_enum info)
  676. {
  677. const int *vals;
  678. int length;
  679. int ret;
  680. ret = iio_channel_read_avail(chan, &vals, type, &length, info);
  681. if (ret < 0)
  682. return ret;
  683. switch (ret) {
  684. case IIO_AVAIL_RANGE:
  685. switch (*type) {
  686. case IIO_VAL_INT:
  687. *val = vals[2];
  688. break;
  689. default:
  690. *val = vals[4];
  691. if (val2)
  692. *val2 = vals[5];
  693. }
  694. return 0;
  695. case IIO_AVAIL_LIST:
  696. if (length <= 0)
  697. return -EINVAL;
  698. switch (*type) {
  699. case IIO_VAL_INT:
  700. *val = max_array(vals, length);
  701. break;
  702. default:
  703. /* TODO: learn about max for other iio values */
  704. return -EINVAL;
  705. }
  706. return 0;
  707. default:
  708. return -EINVAL;
  709. }
  710. }
  711. int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
  712. {
  713. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  714. int type;
  715. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  716. if (!chan->indio_dev->info)
  717. return -ENODEV;
  718. return iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
  719. }
  720. EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
  721. static int iio_channel_read_min(struct iio_channel *chan,
  722. int *val, int *val2, int *type,
  723. enum iio_chan_info_enum info)
  724. {
  725. const int *vals;
  726. int length;
  727. int ret;
  728. ret = iio_channel_read_avail(chan, &vals, type, &length, info);
  729. if (ret < 0)
  730. return ret;
  731. switch (ret) {
  732. case IIO_AVAIL_RANGE:
  733. switch (*type) {
  734. case IIO_VAL_INT:
  735. *val = vals[0];
  736. break;
  737. default:
  738. *val = vals[0];
  739. if (val2)
  740. *val2 = vals[1];
  741. }
  742. return 0;
  743. case IIO_AVAIL_LIST:
  744. if (length <= 0)
  745. return -EINVAL;
  746. switch (*type) {
  747. case IIO_VAL_INT:
  748. *val = min_array(vals, length);
  749. break;
  750. default:
  751. /* TODO: learn about min for other iio values */
  752. return -EINVAL;
  753. }
  754. return 0;
  755. default:
  756. return -EINVAL;
  757. }
  758. }
  759. int iio_read_min_channel_raw(struct iio_channel *chan, int *val)
  760. {
  761. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  762. int type;
  763. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  764. if (!chan->indio_dev->info)
  765. return -ENODEV;
  766. return iio_channel_read_min(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
  767. }
  768. EXPORT_SYMBOL_GPL(iio_read_min_channel_raw);
  769. int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
  770. {
  771. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  772. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  773. if (!chan->indio_dev->info)
  774. return -ENODEV;
  775. *type = chan->channel->type;
  776. return 0;
  777. }
  778. EXPORT_SYMBOL_GPL(iio_get_channel_type);
  779. static int iio_channel_write(struct iio_channel *chan, int val, int val2,
  780. enum iio_chan_info_enum info)
  781. {
  782. const struct iio_info *iio_info = chan->indio_dev->info;
  783. if (iio_info->write_raw)
  784. return iio_info->write_raw(chan->indio_dev,
  785. chan->channel, val, val2, info);
  786. return -EINVAL;
  787. }
  788. int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
  789. enum iio_chan_info_enum attribute)
  790. {
  791. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
  792. guard(mutex)(&iio_dev_opaque->info_exist_lock);
  793. if (!chan->indio_dev->info)
  794. return -ENODEV;
  795. return iio_channel_write(chan, val, val2, attribute);
  796. }
  797. EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
  798. int iio_write_channel_raw(struct iio_channel *chan, int val)
  799. {
  800. return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
  801. }
  802. EXPORT_SYMBOL_GPL(iio_write_channel_raw);
  803. unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
  804. {
  805. const struct iio_chan_spec_ext_info *ext_info;
  806. unsigned int i = 0;
  807. if (!chan->channel->ext_info)
  808. return i;
  809. for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
  810. ++i;
  811. return i;
  812. }
  813. EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
  814. static const struct iio_chan_spec_ext_info *
  815. iio_lookup_ext_info(const struct iio_channel *chan, const char *attr)
  816. {
  817. const struct iio_chan_spec_ext_info *ext_info;
  818. if (!chan->channel->ext_info)
  819. return NULL;
  820. for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
  821. if (!strcmp(attr, ext_info->name))
  822. return ext_info;
  823. }
  824. return NULL;
  825. }
  826. ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
  827. const char *attr, char *buf)
  828. {
  829. const struct iio_chan_spec_ext_info *ext_info;
  830. ext_info = iio_lookup_ext_info(chan, attr);
  831. if (!ext_info)
  832. return -EINVAL;
  833. return ext_info->read(chan->indio_dev, ext_info->private,
  834. chan->channel, buf);
  835. }
  836. EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
  837. ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
  838. const char *buf, size_t len)
  839. {
  840. const struct iio_chan_spec_ext_info *ext_info;
  841. ext_info = iio_lookup_ext_info(chan, attr);
  842. if (!ext_info)
  843. return -EINVAL;
  844. return ext_info->write(chan->indio_dev, ext_info->private,
  845. chan->channel, buf, len);
  846. }
  847. EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
  848. ssize_t iio_read_channel_label(struct iio_channel *chan, char *buf)
  849. {
  850. return do_iio_read_channel_label(chan->indio_dev, chan->channel, buf);
  851. }
  852. EXPORT_SYMBOL_GPL(iio_read_channel_label);