industrialio-trigger.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* The industrial I/O core, trigger handling functions
  3. *
  4. * Copyright (c) 2008 Jonathan Cameron
  5. */
  6. #include <linux/cleanup.h>
  7. #include <linux/kernel.h>
  8. #include <linux/idr.h>
  9. #include <linux/err.h>
  10. #include <linux/device.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/list.h>
  13. #include <linux/slab.h>
  14. #include <linux/iio/iio.h>
  15. #include <linux/iio/iio-opaque.h>
  16. #include <linux/iio/trigger.h>
  17. #include "iio_core.h"
  18. #include "iio_core_trigger.h"
  19. #include <linux/iio/trigger_consumer.h>
  20. /* RFC - Question of approach
  21. * Make the common case (single sensor single trigger)
  22. * simple by starting trigger capture from when first sensors
  23. * is added.
  24. *
  25. * Complex simultaneous start requires use of 'hold' functionality
  26. * of the trigger. (not implemented)
  27. *
  28. * Any other suggestions?
  29. */
  30. static DEFINE_IDA(iio_trigger_ida);
  31. /* Single list of all available triggers */
  32. static LIST_HEAD(iio_trigger_list);
  33. static DEFINE_MUTEX(iio_trigger_list_lock);
  34. /**
  35. * name_show() - retrieve useful identifying name
  36. * @dev: device associated with the iio_trigger
  37. * @attr: pointer to the device_attribute structure that is
  38. * being processed
  39. * @buf: buffer to print the name into
  40. *
  41. * Return: a negative number on failure or the number of written
  42. * characters on success.
  43. */
  44. static ssize_t name_show(struct device *dev, struct device_attribute *attr,
  45. char *buf)
  46. {
  47. struct iio_trigger *trig = to_iio_trigger(dev);
  48. return sysfs_emit(buf, "%s\n", trig->name);
  49. }
  50. static DEVICE_ATTR_RO(name);
  51. static struct attribute *iio_trig_dev_attrs[] = {
  52. &dev_attr_name.attr,
  53. NULL,
  54. };
  55. ATTRIBUTE_GROUPS(iio_trig_dev);
  56. static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
  57. int iio_trigger_register(struct iio_trigger *trig_info)
  58. {
  59. int ret;
  60. trig_info->id = ida_alloc(&iio_trigger_ida, GFP_KERNEL);
  61. if (trig_info->id < 0)
  62. return trig_info->id;
  63. /* Set the name used for the sysfs directory etc */
  64. dev_set_name(&trig_info->dev, "trigger%d", trig_info->id);
  65. ret = device_add(&trig_info->dev);
  66. if (ret)
  67. goto error_unregister_id;
  68. /* Add to list of available triggers held by the IIO core */
  69. scoped_guard(mutex, &iio_trigger_list_lock) {
  70. if (__iio_trigger_find_by_name(trig_info->name)) {
  71. pr_err("Duplicate trigger name '%s'\n", trig_info->name);
  72. ret = -EEXIST;
  73. goto error_device_del;
  74. }
  75. list_add_tail(&trig_info->list, &iio_trigger_list);
  76. }
  77. return 0;
  78. error_device_del:
  79. device_del(&trig_info->dev);
  80. error_unregister_id:
  81. ida_free(&iio_trigger_ida, trig_info->id);
  82. return ret;
  83. }
  84. EXPORT_SYMBOL(iio_trigger_register);
  85. void iio_trigger_unregister(struct iio_trigger *trig_info)
  86. {
  87. scoped_guard(mutex, &iio_trigger_list_lock)
  88. list_del(&trig_info->list);
  89. ida_free(&iio_trigger_ida, trig_info->id);
  90. /* Possible issue in here */
  91. device_del(&trig_info->dev);
  92. }
  93. EXPORT_SYMBOL(iio_trigger_unregister);
  94. int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
  95. {
  96. struct iio_dev_opaque *iio_dev_opaque;
  97. if (!indio_dev || !trig)
  98. return -EINVAL;
  99. iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  100. guard(mutex)(&iio_dev_opaque->mlock);
  101. WARN_ON(iio_dev_opaque->trig_readonly);
  102. indio_dev->trig = iio_trigger_get(trig);
  103. iio_dev_opaque->trig_readonly = true;
  104. return 0;
  105. }
  106. EXPORT_SYMBOL(iio_trigger_set_immutable);
  107. /* Search for trigger by name, assuming iio_trigger_list_lock held */
  108. static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
  109. {
  110. struct iio_trigger *iter;
  111. list_for_each_entry(iter, &iio_trigger_list, list)
  112. if (!strcmp(iter->name, name))
  113. return iter;
  114. return NULL;
  115. }
  116. static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
  117. {
  118. struct iio_trigger *iter;
  119. guard(mutex)(&iio_trigger_list_lock);
  120. list_for_each_entry(iter, &iio_trigger_list, list)
  121. if (sysfs_streq(iter->name, name))
  122. return iio_trigger_get(iter);
  123. return NULL;
  124. }
  125. static void iio_reenable_work_fn(struct work_struct *work)
  126. {
  127. struct iio_trigger *trig = container_of(work, struct iio_trigger,
  128. reenable_work);
  129. /*
  130. * This 'might' occur after the trigger state is set to disabled -
  131. * in that case the driver should skip reenabling.
  132. */
  133. trig->ops->reenable(trig);
  134. }
  135. /*
  136. * In general, reenable callbacks may need to sleep and this path is
  137. * not performance sensitive, so just queue up a work item
  138. * to reneable the trigger for us.
  139. *
  140. * Races that can cause this.
  141. * 1) A handler occurs entirely in interrupt context so the counter
  142. * the final decrement is still in this interrupt.
  143. * 2) The trigger has been removed, but one last interrupt gets through.
  144. *
  145. * For (1) we must call reenable, but not in atomic context.
  146. * For (2) it should be safe to call reenanble, if drivers never blindly
  147. * reenable after state is off.
  148. */
  149. static void iio_trigger_notify_done_atomic(struct iio_trigger *trig)
  150. {
  151. if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
  152. trig->ops->reenable)
  153. schedule_work(&trig->reenable_work);
  154. }
  155. /**
  156. * iio_trigger_poll() - Call the IRQ trigger handler of the consumers
  157. * @trig: trigger which occurred
  158. *
  159. * This function should only be called from a hard IRQ context.
  160. */
  161. void iio_trigger_poll(struct iio_trigger *trig)
  162. {
  163. int i;
  164. if (!atomic_read(&trig->use_count)) {
  165. atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  166. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  167. if (trig->subirqs[i].enabled)
  168. generic_handle_irq(trig->subirq_base + i);
  169. else
  170. iio_trigger_notify_done_atomic(trig);
  171. }
  172. }
  173. }
  174. EXPORT_SYMBOL(iio_trigger_poll);
  175. irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
  176. {
  177. iio_trigger_poll(private);
  178. return IRQ_HANDLED;
  179. }
  180. EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
  181. /**
  182. * iio_trigger_poll_nested() - Call the threaded trigger handler of the
  183. * consumers
  184. * @trig: trigger which occurred
  185. *
  186. * This function should only be called from a kernel thread context.
  187. */
  188. void iio_trigger_poll_nested(struct iio_trigger *trig)
  189. {
  190. int i;
  191. if (!atomic_read(&trig->use_count)) {
  192. atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  193. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  194. if (trig->subirqs[i].enabled)
  195. handle_nested_irq(trig->subirq_base + i);
  196. else
  197. iio_trigger_notify_done(trig);
  198. }
  199. }
  200. }
  201. EXPORT_SYMBOL(iio_trigger_poll_nested);
  202. void iio_trigger_notify_done(struct iio_trigger *trig)
  203. {
  204. if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
  205. trig->ops->reenable)
  206. trig->ops->reenable(trig);
  207. }
  208. EXPORT_SYMBOL(iio_trigger_notify_done);
  209. /* Trigger Consumer related functions */
  210. static int iio_trigger_get_irq(struct iio_trigger *trig)
  211. {
  212. int ret;
  213. scoped_guard(mutex, &trig->pool_lock) {
  214. ret = bitmap_find_free_region(trig->pool,
  215. CONFIG_IIO_CONSUMERS_PER_TRIGGER,
  216. ilog2(1));
  217. if (ret < 0)
  218. return ret;
  219. }
  220. return ret + trig->subirq_base;
  221. }
  222. static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
  223. {
  224. guard(mutex)(&trig->pool_lock);
  225. clear_bit(irq - trig->subirq_base, trig->pool);
  226. }
  227. /* Complexity in here. With certain triggers (datardy) an acknowledgement
  228. * may be needed if the pollfuncs do not include the data read for the
  229. * triggering device.
  230. * This is not currently handled. Alternative of not enabling trigger unless
  231. * the relevant function is in there may be the best option.
  232. */
  233. /* Worth protecting against double additions? */
  234. int iio_trigger_attach_poll_func(struct iio_trigger *trig,
  235. struct iio_poll_func *pf)
  236. {
  237. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
  238. bool notinuse =
  239. bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  240. int ret = 0;
  241. /* Prevent the module from being removed whilst attached to a trigger */
  242. __module_get(iio_dev_opaque->driver_module);
  243. /* Get irq number */
  244. pf->irq = iio_trigger_get_irq(trig);
  245. if (pf->irq < 0) {
  246. pr_err("Could not find an available irq for trigger %s, CONFIG_IIO_CONSUMERS_PER_TRIGGER=%d limit might be exceeded\n",
  247. trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  248. goto out_put_module;
  249. }
  250. /* Request irq */
  251. ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
  252. pf->type, pf->name,
  253. pf);
  254. if (ret < 0)
  255. goto out_put_irq;
  256. /* Enable trigger in driver */
  257. if (trig->ops && trig->ops->set_trigger_state && notinuse) {
  258. ret = trig->ops->set_trigger_state(trig, true);
  259. if (ret)
  260. goto out_free_irq;
  261. }
  262. /*
  263. * Check if we just registered to our own trigger: we determine that
  264. * this is the case if the IIO device and the trigger device share the
  265. * same parent device.
  266. */
  267. if (!iio_validate_own_trigger(pf->indio_dev, trig))
  268. trig->attached_own_device = true;
  269. return ret;
  270. out_free_irq:
  271. free_irq(pf->irq, pf);
  272. out_put_irq:
  273. iio_trigger_put_irq(trig, pf->irq);
  274. out_put_module:
  275. module_put(iio_dev_opaque->driver_module);
  276. return ret;
  277. }
  278. int iio_trigger_detach_poll_func(struct iio_trigger *trig,
  279. struct iio_poll_func *pf)
  280. {
  281. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
  282. bool no_other_users =
  283. bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1;
  284. int ret = 0;
  285. if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
  286. ret = trig->ops->set_trigger_state(trig, false);
  287. if (ret)
  288. return ret;
  289. }
  290. if (pf->indio_dev->dev.parent == trig->dev.parent)
  291. trig->attached_own_device = false;
  292. iio_trigger_put_irq(trig, pf->irq);
  293. free_irq(pf->irq, pf);
  294. module_put(iio_dev_opaque->driver_module);
  295. pf->irq = 0;
  296. return ret;
  297. }
  298. irqreturn_t iio_pollfunc_store_time(int irq, void *p)
  299. {
  300. struct iio_poll_func *pf = p;
  301. pf->timestamp = iio_get_time_ns(pf->indio_dev);
  302. return IRQ_WAKE_THREAD;
  303. }
  304. EXPORT_SYMBOL(iio_pollfunc_store_time);
  305. struct iio_poll_func
  306. *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
  307. irqreturn_t (*thread)(int irq, void *p),
  308. int type,
  309. struct iio_dev *indio_dev,
  310. const char *fmt,
  311. ...)
  312. {
  313. va_list vargs;
  314. struct iio_poll_func *pf;
  315. pf = kmalloc(sizeof(*pf), GFP_KERNEL);
  316. if (!pf)
  317. return NULL;
  318. va_start(vargs, fmt);
  319. pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
  320. va_end(vargs);
  321. if (pf->name == NULL) {
  322. kfree(pf);
  323. return NULL;
  324. }
  325. pf->h = h;
  326. pf->thread = thread;
  327. pf->type = type;
  328. pf->indio_dev = indio_dev;
  329. return pf;
  330. }
  331. EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
  332. void iio_dealloc_pollfunc(struct iio_poll_func *pf)
  333. {
  334. kfree(pf->name);
  335. kfree(pf);
  336. }
  337. EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
  338. /**
  339. * current_trigger_show() - trigger consumer sysfs query current trigger
  340. * @dev: device associated with an industrial I/O device
  341. * @attr: pointer to the device_attribute structure that
  342. * is being processed
  343. * @buf: buffer where the current trigger name will be printed into
  344. *
  345. * For trigger consumers the current_trigger interface allows the trigger
  346. * used by the device to be queried.
  347. *
  348. * Return: a negative number on failure, the number of characters written
  349. * on success or 0 if no trigger is available
  350. */
  351. static ssize_t current_trigger_show(struct device *dev,
  352. struct device_attribute *attr, char *buf)
  353. {
  354. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  355. if (indio_dev->trig)
  356. return sysfs_emit(buf, "%s\n", indio_dev->trig->name);
  357. return 0;
  358. }
  359. /**
  360. * current_trigger_store() - trigger consumer sysfs set current trigger
  361. * @dev: device associated with an industrial I/O device
  362. * @attr: device attribute that is being processed
  363. * @buf: string buffer that holds the name of the trigger
  364. * @len: length of the trigger name held by buf
  365. *
  366. * For trigger consumers the current_trigger interface allows the trigger
  367. * used for this device to be specified at run time based on the trigger's
  368. * name.
  369. *
  370. * Return: negative error code on failure or length of the buffer
  371. * on success
  372. */
  373. static ssize_t current_trigger_store(struct device *dev,
  374. struct device_attribute *attr,
  375. const char *buf, size_t len)
  376. {
  377. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  378. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  379. struct iio_trigger *oldtrig = indio_dev->trig;
  380. struct iio_trigger *trig;
  381. int ret;
  382. scoped_guard(mutex, &iio_dev_opaque->mlock) {
  383. if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED)
  384. return -EBUSY;
  385. if (iio_dev_opaque->trig_readonly)
  386. return -EPERM;
  387. }
  388. trig = iio_trigger_acquire_by_name(buf);
  389. if (oldtrig == trig) {
  390. ret = len;
  391. goto out_trigger_put;
  392. }
  393. if (trig && indio_dev->info->validate_trigger) {
  394. ret = indio_dev->info->validate_trigger(indio_dev, trig);
  395. if (ret)
  396. goto out_trigger_put;
  397. }
  398. if (trig && trig->ops && trig->ops->validate_device) {
  399. ret = trig->ops->validate_device(trig, indio_dev);
  400. if (ret)
  401. goto out_trigger_put;
  402. }
  403. indio_dev->trig = trig;
  404. if (oldtrig) {
  405. if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
  406. iio_trigger_detach_poll_func(oldtrig,
  407. indio_dev->pollfunc_event);
  408. iio_trigger_put(oldtrig);
  409. }
  410. if (indio_dev->trig) {
  411. if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
  412. iio_trigger_attach_poll_func(indio_dev->trig,
  413. indio_dev->pollfunc_event);
  414. }
  415. return len;
  416. out_trigger_put:
  417. if (trig)
  418. iio_trigger_put(trig);
  419. return ret;
  420. }
  421. static DEVICE_ATTR_RW(current_trigger);
  422. static struct attribute *iio_trigger_consumer_attrs[] = {
  423. &dev_attr_current_trigger.attr,
  424. NULL,
  425. };
  426. static const struct attribute_group iio_trigger_consumer_attr_group = {
  427. .name = "trigger",
  428. .attrs = iio_trigger_consumer_attrs,
  429. };
  430. static void iio_trig_release(struct device *device)
  431. {
  432. struct iio_trigger *trig = to_iio_trigger(device);
  433. int i;
  434. if (trig->subirq_base) {
  435. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  436. irq_modify_status(trig->subirq_base + i,
  437. IRQ_NOAUTOEN,
  438. IRQ_NOREQUEST | IRQ_NOPROBE);
  439. irq_set_chip(trig->subirq_base + i,
  440. NULL);
  441. irq_set_handler(trig->subirq_base + i,
  442. NULL);
  443. }
  444. irq_free_descs(trig->subirq_base,
  445. CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  446. }
  447. kfree(trig->name);
  448. kfree(trig);
  449. }
  450. static const struct device_type iio_trig_type = {
  451. .release = iio_trig_release,
  452. .groups = iio_trig_dev_groups,
  453. };
  454. static void iio_trig_subirqmask(struct irq_data *d)
  455. {
  456. struct irq_chip *chip = irq_data_get_irq_chip(d);
  457. struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
  458. trig->subirqs[d->irq - trig->subirq_base].enabled = false;
  459. }
  460. static void iio_trig_subirqunmask(struct irq_data *d)
  461. {
  462. struct irq_chip *chip = irq_data_get_irq_chip(d);
  463. struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
  464. trig->subirqs[d->irq - trig->subirq_base].enabled = true;
  465. }
  466. static __printf(3, 0)
  467. struct iio_trigger *viio_trigger_alloc(struct device *parent,
  468. struct module *this_mod,
  469. const char *fmt,
  470. va_list vargs)
  471. {
  472. struct iio_trigger *trig;
  473. int i;
  474. trig = kzalloc(sizeof(*trig), GFP_KERNEL);
  475. if (!trig)
  476. return NULL;
  477. trig->dev.parent = parent;
  478. trig->dev.type = &iio_trig_type;
  479. trig->dev.bus = &iio_bus_type;
  480. device_initialize(&trig->dev);
  481. INIT_WORK(&trig->reenable_work, iio_reenable_work_fn);
  482. mutex_init(&trig->pool_lock);
  483. trig->subirq_base = irq_alloc_descs(-1, 0,
  484. CONFIG_IIO_CONSUMERS_PER_TRIGGER,
  485. 0);
  486. if (trig->subirq_base < 0)
  487. goto free_trig;
  488. trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
  489. if (trig->name == NULL)
  490. goto free_descs;
  491. INIT_LIST_HEAD(&trig->list);
  492. trig->owner = this_mod;
  493. trig->subirq_chip.name = trig->name;
  494. trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
  495. trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
  496. for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
  497. irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
  498. irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
  499. irq_modify_status(trig->subirq_base + i,
  500. IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
  501. }
  502. return trig;
  503. free_descs:
  504. irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
  505. free_trig:
  506. kfree(trig);
  507. return NULL;
  508. }
  509. /**
  510. * __iio_trigger_alloc - Allocate a trigger
  511. * @parent: Device to allocate iio_trigger for
  512. * @this_mod: module allocating the trigger
  513. * @fmt: trigger name format. If it includes format
  514. * specifiers, the additional arguments following
  515. * format are formatted and inserted in the resulting
  516. * string replacing their respective specifiers.
  517. * RETURNS:
  518. * Pointer to allocated iio_trigger on success, NULL on failure.
  519. */
  520. struct iio_trigger *__iio_trigger_alloc(struct device *parent,
  521. struct module *this_mod,
  522. const char *fmt, ...)
  523. {
  524. struct iio_trigger *trig;
  525. va_list vargs;
  526. va_start(vargs, fmt);
  527. trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
  528. va_end(vargs);
  529. return trig;
  530. }
  531. EXPORT_SYMBOL(__iio_trigger_alloc);
  532. void iio_trigger_free(struct iio_trigger *trig)
  533. {
  534. if (trig)
  535. put_device(&trig->dev);
  536. }
  537. EXPORT_SYMBOL(iio_trigger_free);
  538. static void devm_iio_trigger_release(struct device *dev, void *res)
  539. {
  540. iio_trigger_free(*(struct iio_trigger **)res);
  541. }
  542. /**
  543. * __devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
  544. * Managed iio_trigger_alloc. iio_trigger allocated with this function is
  545. * automatically freed on driver detach.
  546. * @parent: Device to allocate iio_trigger for
  547. * @this_mod: module allocating the trigger
  548. * @fmt: trigger name format. If it includes format
  549. * specifiers, the additional arguments following
  550. * format are formatted and inserted in the resulting
  551. * string replacing their respective specifiers.
  552. *
  553. *
  554. * RETURNS:
  555. * Pointer to allocated iio_trigger on success, NULL on failure.
  556. */
  557. struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
  558. struct module *this_mod,
  559. const char *fmt, ...)
  560. {
  561. struct iio_trigger **ptr, *trig;
  562. va_list vargs;
  563. ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
  564. GFP_KERNEL);
  565. if (!ptr)
  566. return NULL;
  567. /* use raw alloc_dr for kmalloc caller tracing */
  568. va_start(vargs, fmt);
  569. trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
  570. va_end(vargs);
  571. if (trig) {
  572. *ptr = trig;
  573. devres_add(parent, ptr);
  574. } else {
  575. devres_free(ptr);
  576. }
  577. return trig;
  578. }
  579. EXPORT_SYMBOL_GPL(__devm_iio_trigger_alloc);
  580. static void devm_iio_trigger_unreg(void *trigger_info)
  581. {
  582. iio_trigger_unregister(trigger_info);
  583. }
  584. /**
  585. * devm_iio_trigger_register - Resource-managed iio_trigger_register()
  586. * @dev: device this trigger was allocated for
  587. * @trig_info: trigger to register
  588. *
  589. * Managed iio_trigger_register(). The IIO trigger registered with this
  590. * function is automatically unregistered on driver detach. This function
  591. * calls iio_trigger_register() internally. Refer to that function for more
  592. * information.
  593. *
  594. * RETURNS:
  595. * 0 on success, negative error number on failure.
  596. */
  597. int devm_iio_trigger_register(struct device *dev,
  598. struct iio_trigger *trig_info)
  599. {
  600. int ret;
  601. ret = iio_trigger_register(trig_info);
  602. if (ret)
  603. return ret;
  604. return devm_add_action_or_reset(dev, devm_iio_trigger_unreg, trig_info);
  605. }
  606. EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
  607. bool iio_trigger_using_own(struct iio_dev *indio_dev)
  608. {
  609. return indio_dev->trig->attached_own_device;
  610. }
  611. EXPORT_SYMBOL(iio_trigger_using_own);
  612. /**
  613. * iio_validate_own_trigger - Check if a trigger and IIO device belong to
  614. * the same device
  615. * @idev: the IIO device to check
  616. * @trig: the IIO trigger to check
  617. *
  618. * This function can be used as the validate_trigger callback for triggers that
  619. * can only be attached to their own device.
  620. *
  621. * Return: 0 if both the trigger and the IIO device belong to the same
  622. * device, -EINVAL otherwise.
  623. */
  624. int iio_validate_own_trigger(struct iio_dev *idev, struct iio_trigger *trig)
  625. {
  626. if (idev->dev.parent != trig->dev.parent)
  627. return -EINVAL;
  628. return 0;
  629. }
  630. EXPORT_SYMBOL_GPL(iio_validate_own_trigger);
  631. /**
  632. * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
  633. * the same device
  634. * @trig: The IIO trigger to check
  635. * @indio_dev: the IIO device to check
  636. *
  637. * This function can be used as the validate_device callback for triggers that
  638. * can only be attached to their own device.
  639. *
  640. * Return: 0 if both the trigger and the IIO device belong to the same
  641. * device, -EINVAL otherwise.
  642. */
  643. int iio_trigger_validate_own_device(struct iio_trigger *trig,
  644. struct iio_dev *indio_dev)
  645. {
  646. if (indio_dev->dev.parent != trig->dev.parent)
  647. return -EINVAL;
  648. return 0;
  649. }
  650. EXPORT_SYMBOL(iio_trigger_validate_own_device);
  651. int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
  652. {
  653. return iio_device_register_sysfs_group(indio_dev,
  654. &iio_trigger_consumer_attr_group);
  655. }
  656. void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
  657. {
  658. /* Clean up an associated but not attached trigger reference */
  659. if (indio_dev->trig)
  660. iio_trigger_put(indio_dev->trig);
  661. }
  662. int iio_device_suspend_triggering(struct iio_dev *indio_dev)
  663. {
  664. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  665. guard(mutex)(&iio_dev_opaque->mlock);
  666. if ((indio_dev->pollfunc) && (indio_dev->pollfunc->irq > 0))
  667. disable_irq(indio_dev->pollfunc->irq);
  668. return 0;
  669. }
  670. EXPORT_SYMBOL(iio_device_suspend_triggering);
  671. int iio_device_resume_triggering(struct iio_dev *indio_dev)
  672. {
  673. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  674. guard(mutex)(&iio_dev_opaque->mlock);
  675. if ((indio_dev->pollfunc) && (indio_dev->pollfunc->irq > 0))
  676. enable_irq(indio_dev->pollfunc->irq);
  677. return 0;
  678. }
  679. EXPORT_SYMBOL(iio_device_resume_triggering);