dmaengine.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
  4. */
  5. /*
  6. * This code implements the DMA subsystem. It provides a HW-neutral interface
  7. * for other kernel code to use asynchronous memory copy capabilities,
  8. * if present, and allows different HW DMA drivers to register as providing
  9. * this capability.
  10. *
  11. * Due to the fact we are accelerating what is already a relatively fast
  12. * operation, the code goes to great lengths to avoid additional overhead,
  13. * such as locking.
  14. *
  15. * LOCKING:
  16. *
  17. * The subsystem keeps a global list of dma_device structs it is protected by a
  18. * mutex, dma_list_mutex.
  19. *
  20. * A subsystem can get access to a channel by calling dmaengine_get() followed
  21. * by dma_find_channel(), or if it has need for an exclusive channel it can call
  22. * dma_request_channel(). Once a channel is allocated a reference is taken
  23. * against its corresponding driver to disable removal.
  24. *
  25. * Each device has a channels list, which runs unlocked but is never modified
  26. * once the device is registered, it's just setup by the driver.
  27. *
  28. * See Documentation/driver-api/dmaengine for more details
  29. */
  30. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  31. #include <linux/platform_device.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/init.h>
  34. #include <linux/module.h>
  35. #include <linux/mm.h>
  36. #include <linux/device.h>
  37. #include <linux/dmaengine.h>
  38. #include <linux/hardirq.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/percpu.h>
  41. #include <linux/rcupdate.h>
  42. #include <linux/mutex.h>
  43. #include <linux/jiffies.h>
  44. #include <linux/rculist.h>
  45. #include <linux/idr.h>
  46. #include <linux/slab.h>
  47. #include <linux/acpi.h>
  48. #include <linux/acpi_dma.h>
  49. #include <linux/of_dma.h>
  50. #include <linux/mempool.h>
  51. #include <linux/numa.h>
  52. #include "dmaengine.h"
  53. static DEFINE_MUTEX(dma_list_mutex);
  54. static DEFINE_IDA(dma_ida);
  55. static LIST_HEAD(dma_device_list);
  56. static long dmaengine_ref_count;
  57. /* --- debugfs implementation --- */
  58. #ifdef CONFIG_DEBUG_FS
  59. #include <linux/debugfs.h>
  60. static struct dentry *rootdir;
  61. static void dmaengine_debug_register(struct dma_device *dma_dev)
  62. {
  63. dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
  64. rootdir);
  65. if (IS_ERR(dma_dev->dbg_dev_root))
  66. dma_dev->dbg_dev_root = NULL;
  67. }
  68. static void dmaengine_debug_unregister(struct dma_device *dma_dev)
  69. {
  70. debugfs_remove_recursive(dma_dev->dbg_dev_root);
  71. dma_dev->dbg_dev_root = NULL;
  72. }
  73. static void dmaengine_dbg_summary_show(struct seq_file *s,
  74. struct dma_device *dma_dev)
  75. {
  76. struct dma_chan *chan;
  77. list_for_each_entry(chan, &dma_dev->channels, device_node) {
  78. if (chan->client_count) {
  79. seq_printf(s, " %-13s| %s", dma_chan_name(chan),
  80. chan->dbg_client_name ?: "in-use");
  81. if (chan->router)
  82. seq_printf(s, " (via router: %s)\n",
  83. dev_name(chan->router->dev));
  84. else
  85. seq_puts(s, "\n");
  86. }
  87. }
  88. }
  89. static int dmaengine_summary_show(struct seq_file *s, void *data)
  90. {
  91. struct dma_device *dma_dev = NULL;
  92. mutex_lock(&dma_list_mutex);
  93. list_for_each_entry(dma_dev, &dma_device_list, global_node) {
  94. seq_printf(s, "dma%d (%s): number of channels: %u\n",
  95. dma_dev->dev_id, dev_name(dma_dev->dev),
  96. dma_dev->chancnt);
  97. if (dma_dev->dbg_summary_show)
  98. dma_dev->dbg_summary_show(s, dma_dev);
  99. else
  100. dmaengine_dbg_summary_show(s, dma_dev);
  101. if (!list_is_last(&dma_dev->global_node, &dma_device_list))
  102. seq_puts(s, "\n");
  103. }
  104. mutex_unlock(&dma_list_mutex);
  105. return 0;
  106. }
  107. DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
  108. static void __init dmaengine_debugfs_init(void)
  109. {
  110. rootdir = debugfs_create_dir("dmaengine", NULL);
  111. /* /sys/kernel/debug/dmaengine/summary */
  112. debugfs_create_file("summary", 0444, rootdir, NULL,
  113. &dmaengine_summary_fops);
  114. }
  115. #else
  116. static inline void dmaengine_debugfs_init(void) { }
  117. static inline int dmaengine_debug_register(struct dma_device *dma_dev)
  118. {
  119. return 0;
  120. }
  121. static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
  122. #endif /* DEBUG_FS */
  123. /* --- sysfs implementation --- */
  124. #define DMA_SLAVE_NAME "slave"
  125. /**
  126. * dev_to_dma_chan - convert a device pointer to its sysfs container object
  127. * @dev: device node
  128. *
  129. * Must be called under dma_list_mutex.
  130. */
  131. static struct dma_chan *dev_to_dma_chan(struct device *dev)
  132. {
  133. struct dma_chan_dev *chan_dev;
  134. chan_dev = container_of(dev, typeof(*chan_dev), device);
  135. return chan_dev->chan;
  136. }
  137. static ssize_t memcpy_count_show(struct device *dev,
  138. struct device_attribute *attr, char *buf)
  139. {
  140. struct dma_chan *chan;
  141. unsigned long count = 0;
  142. int i;
  143. int err;
  144. mutex_lock(&dma_list_mutex);
  145. chan = dev_to_dma_chan(dev);
  146. if (chan) {
  147. for_each_possible_cpu(i)
  148. count += per_cpu_ptr(chan->local, i)->memcpy_count;
  149. err = sysfs_emit(buf, "%lu\n", count);
  150. } else
  151. err = -ENODEV;
  152. mutex_unlock(&dma_list_mutex);
  153. return err;
  154. }
  155. static DEVICE_ATTR_RO(memcpy_count);
  156. static ssize_t bytes_transferred_show(struct device *dev,
  157. struct device_attribute *attr, char *buf)
  158. {
  159. struct dma_chan *chan;
  160. unsigned long count = 0;
  161. int i;
  162. int err;
  163. mutex_lock(&dma_list_mutex);
  164. chan = dev_to_dma_chan(dev);
  165. if (chan) {
  166. for_each_possible_cpu(i)
  167. count += per_cpu_ptr(chan->local, i)->bytes_transferred;
  168. err = sysfs_emit(buf, "%lu\n", count);
  169. } else
  170. err = -ENODEV;
  171. mutex_unlock(&dma_list_mutex);
  172. return err;
  173. }
  174. static DEVICE_ATTR_RO(bytes_transferred);
  175. static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
  176. char *buf)
  177. {
  178. struct dma_chan *chan;
  179. int err;
  180. mutex_lock(&dma_list_mutex);
  181. chan = dev_to_dma_chan(dev);
  182. if (chan)
  183. err = sysfs_emit(buf, "%d\n", chan->client_count);
  184. else
  185. err = -ENODEV;
  186. mutex_unlock(&dma_list_mutex);
  187. return err;
  188. }
  189. static DEVICE_ATTR_RO(in_use);
  190. static struct attribute *dma_dev_attrs[] = {
  191. &dev_attr_memcpy_count.attr,
  192. &dev_attr_bytes_transferred.attr,
  193. &dev_attr_in_use.attr,
  194. NULL,
  195. };
  196. ATTRIBUTE_GROUPS(dma_dev);
  197. static void chan_dev_release(struct device *dev)
  198. {
  199. struct dma_chan_dev *chan_dev;
  200. chan_dev = container_of(dev, typeof(*chan_dev), device);
  201. kfree(chan_dev);
  202. }
  203. static struct class dma_devclass = {
  204. .name = "dma",
  205. .dev_groups = dma_dev_groups,
  206. .dev_release = chan_dev_release,
  207. };
  208. /* --- client and device registration --- */
  209. /* enable iteration over all operation types */
  210. static dma_cap_mask_t dma_cap_mask_all;
  211. /**
  212. * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
  213. * @chan: associated channel for this entry
  214. */
  215. struct dma_chan_tbl_ent {
  216. struct dma_chan *chan;
  217. };
  218. /* percpu lookup table for memory-to-memory offload providers */
  219. static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
  220. static int __init dma_channel_table_init(void)
  221. {
  222. enum dma_transaction_type cap;
  223. int err = 0;
  224. bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
  225. /* 'interrupt', 'private', and 'slave' are channel capabilities,
  226. * but are not associated with an operation so they do not need
  227. * an entry in the channel_table
  228. */
  229. clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
  230. clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
  231. clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
  232. for_each_dma_cap_mask(cap, dma_cap_mask_all) {
  233. channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
  234. if (!channel_table[cap]) {
  235. err = -ENOMEM;
  236. break;
  237. }
  238. }
  239. if (err) {
  240. pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
  241. for_each_dma_cap_mask(cap, dma_cap_mask_all)
  242. free_percpu(channel_table[cap]);
  243. }
  244. return err;
  245. }
  246. arch_initcall(dma_channel_table_init);
  247. /**
  248. * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
  249. * @chan: DMA channel to test
  250. * @cpu: CPU index which the channel should be close to
  251. *
  252. * Returns true if the channel is in the same NUMA-node as the CPU.
  253. */
  254. static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
  255. {
  256. int node = dev_to_node(chan->device->dev);
  257. return node == NUMA_NO_NODE ||
  258. cpumask_test_cpu(cpu, cpumask_of_node(node));
  259. }
  260. /**
  261. * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
  262. * @cap: capability to match
  263. * @cpu: CPU index which the channel should be close to
  264. *
  265. * If some channels are close to the given CPU, the one with the lowest
  266. * reference count is returned. Otherwise, CPU is ignored and only the
  267. * reference count is taken into account.
  268. *
  269. * Must be called under dma_list_mutex.
  270. */
  271. static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
  272. {
  273. struct dma_device *device;
  274. struct dma_chan *chan;
  275. struct dma_chan *min = NULL;
  276. struct dma_chan *localmin = NULL;
  277. list_for_each_entry(device, &dma_device_list, global_node) {
  278. if (!dma_has_cap(cap, device->cap_mask) ||
  279. dma_has_cap(DMA_PRIVATE, device->cap_mask))
  280. continue;
  281. list_for_each_entry(chan, &device->channels, device_node) {
  282. if (!chan->client_count)
  283. continue;
  284. if (!min || chan->table_count < min->table_count)
  285. min = chan;
  286. if (dma_chan_is_local(chan, cpu))
  287. if (!localmin ||
  288. chan->table_count < localmin->table_count)
  289. localmin = chan;
  290. }
  291. }
  292. chan = localmin ? localmin : min;
  293. if (chan)
  294. chan->table_count++;
  295. return chan;
  296. }
  297. /**
  298. * dma_channel_rebalance - redistribute the available channels
  299. *
  300. * Optimize for CPU isolation (each CPU gets a dedicated channel for an
  301. * operation type) in the SMP case, and operation isolation (avoid
  302. * multi-tasking channels) in the non-SMP case.
  303. *
  304. * Must be called under dma_list_mutex.
  305. */
  306. static void dma_channel_rebalance(void)
  307. {
  308. struct dma_chan *chan;
  309. struct dma_device *device;
  310. int cpu;
  311. int cap;
  312. /* undo the last distribution */
  313. for_each_dma_cap_mask(cap, dma_cap_mask_all)
  314. for_each_possible_cpu(cpu)
  315. per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
  316. list_for_each_entry(device, &dma_device_list, global_node) {
  317. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  318. continue;
  319. list_for_each_entry(chan, &device->channels, device_node)
  320. chan->table_count = 0;
  321. }
  322. /* don't populate the channel_table if no clients are available */
  323. if (!dmaengine_ref_count)
  324. return;
  325. /* redistribute available channels */
  326. for_each_dma_cap_mask(cap, dma_cap_mask_all)
  327. for_each_online_cpu(cpu) {
  328. chan = min_chan(cap, cpu);
  329. per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
  330. }
  331. }
  332. static int dma_device_satisfies_mask(struct dma_device *device,
  333. const dma_cap_mask_t *want)
  334. {
  335. dma_cap_mask_t has;
  336. bitmap_and(has.bits, want->bits, device->cap_mask.bits,
  337. DMA_TX_TYPE_END);
  338. return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
  339. }
  340. static struct module *dma_chan_to_owner(struct dma_chan *chan)
  341. {
  342. return chan->device->owner;
  343. }
  344. /**
  345. * balance_ref_count - catch up the channel reference count
  346. * @chan: channel to balance ->client_count versus dmaengine_ref_count
  347. *
  348. * Must be called under dma_list_mutex.
  349. */
  350. static void balance_ref_count(struct dma_chan *chan)
  351. {
  352. struct module *owner = dma_chan_to_owner(chan);
  353. while (chan->client_count < dmaengine_ref_count) {
  354. __module_get(owner);
  355. chan->client_count++;
  356. }
  357. }
  358. static void dma_device_release(struct kref *ref)
  359. {
  360. struct dma_device *device = container_of(ref, struct dma_device, ref);
  361. list_del_rcu(&device->global_node);
  362. dma_channel_rebalance();
  363. if (device->device_release)
  364. device->device_release(device);
  365. }
  366. static void dma_device_put(struct dma_device *device)
  367. {
  368. lockdep_assert_held(&dma_list_mutex);
  369. kref_put(&device->ref, dma_device_release);
  370. }
  371. /**
  372. * dma_chan_get - try to grab a DMA channel's parent driver module
  373. * @chan: channel to grab
  374. *
  375. * Must be called under dma_list_mutex.
  376. */
  377. static int dma_chan_get(struct dma_chan *chan)
  378. {
  379. struct module *owner = dma_chan_to_owner(chan);
  380. int ret;
  381. /* The channel is already in use, update client count */
  382. if (chan->client_count) {
  383. __module_get(owner);
  384. chan->client_count++;
  385. return 0;
  386. }
  387. if (!try_module_get(owner))
  388. return -ENODEV;
  389. ret = kref_get_unless_zero(&chan->device->ref);
  390. if (!ret) {
  391. ret = -ENODEV;
  392. goto module_put_out;
  393. }
  394. /* allocate upon first client reference */
  395. if (chan->device->device_alloc_chan_resources) {
  396. ret = chan->device->device_alloc_chan_resources(chan);
  397. if (ret < 0)
  398. goto err_out;
  399. }
  400. chan->client_count++;
  401. if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
  402. balance_ref_count(chan);
  403. return 0;
  404. err_out:
  405. dma_device_put(chan->device);
  406. module_put_out:
  407. module_put(owner);
  408. return ret;
  409. }
  410. /**
  411. * dma_chan_put - drop a reference to a DMA channel's parent driver module
  412. * @chan: channel to release
  413. *
  414. * Must be called under dma_list_mutex.
  415. */
  416. static void dma_chan_put(struct dma_chan *chan)
  417. {
  418. /* This channel is not in use, bail out */
  419. if (!chan->client_count)
  420. return;
  421. chan->client_count--;
  422. /* This channel is not in use anymore, free it */
  423. if (!chan->client_count && chan->device->device_free_chan_resources) {
  424. /* Make sure all operations have completed */
  425. dmaengine_synchronize(chan);
  426. chan->device->device_free_chan_resources(chan);
  427. }
  428. /* If the channel is used via a DMA request router, free the mapping */
  429. if (chan->router && chan->router->route_free) {
  430. chan->router->route_free(chan->router->dev, chan->route_data);
  431. chan->router = NULL;
  432. chan->route_data = NULL;
  433. }
  434. dma_device_put(chan->device);
  435. module_put(dma_chan_to_owner(chan));
  436. }
  437. enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
  438. {
  439. enum dma_status status;
  440. unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
  441. dma_async_issue_pending(chan);
  442. do {
  443. status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
  444. if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
  445. dev_err(chan->device->dev, "%s: timeout!\n", __func__);
  446. return DMA_ERROR;
  447. }
  448. if (status != DMA_IN_PROGRESS)
  449. break;
  450. cpu_relax();
  451. } while (1);
  452. return status;
  453. }
  454. EXPORT_SYMBOL(dma_sync_wait);
  455. /**
  456. * dma_find_channel - find a channel to carry out the operation
  457. * @tx_type: transaction type
  458. */
  459. struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
  460. {
  461. return this_cpu_read(channel_table[tx_type]->chan);
  462. }
  463. EXPORT_SYMBOL(dma_find_channel);
  464. /**
  465. * dma_issue_pending_all - flush all pending operations across all channels
  466. */
  467. void dma_issue_pending_all(void)
  468. {
  469. struct dma_device *device;
  470. struct dma_chan *chan;
  471. rcu_read_lock();
  472. list_for_each_entry_rcu(device, &dma_device_list, global_node) {
  473. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  474. continue;
  475. list_for_each_entry(chan, &device->channels, device_node)
  476. if (chan->client_count)
  477. device->device_issue_pending(chan);
  478. }
  479. rcu_read_unlock();
  480. }
  481. EXPORT_SYMBOL(dma_issue_pending_all);
  482. int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
  483. {
  484. struct dma_device *device;
  485. if (!chan || !caps)
  486. return -EINVAL;
  487. device = chan->device;
  488. /* check if the channel supports slave transactions */
  489. if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
  490. test_bit(DMA_CYCLIC, device->cap_mask.bits)))
  491. return -ENXIO;
  492. /*
  493. * Check whether it reports it uses the generic slave
  494. * capabilities, if not, that means it doesn't support any
  495. * kind of slave capabilities reporting.
  496. */
  497. if (!device->directions)
  498. return -ENXIO;
  499. caps->src_addr_widths = device->src_addr_widths;
  500. caps->dst_addr_widths = device->dst_addr_widths;
  501. caps->directions = device->directions;
  502. caps->min_burst = device->min_burst;
  503. caps->max_burst = device->max_burst;
  504. caps->max_sg_burst = device->max_sg_burst;
  505. caps->residue_granularity = device->residue_granularity;
  506. caps->descriptor_reuse = device->descriptor_reuse;
  507. caps->cmd_pause = !!device->device_pause;
  508. caps->cmd_resume = !!device->device_resume;
  509. caps->cmd_terminate = !!device->device_terminate_all;
  510. /*
  511. * DMA engine device might be configured with non-uniformly
  512. * distributed slave capabilities per device channels. In this
  513. * case the corresponding driver may provide the device_caps
  514. * callback to override the generic capabilities with
  515. * channel-specific ones.
  516. */
  517. if (device->device_caps)
  518. device->device_caps(chan, caps);
  519. return 0;
  520. }
  521. EXPORT_SYMBOL_GPL(dma_get_slave_caps);
  522. static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
  523. struct dma_device *dev,
  524. dma_filter_fn fn, void *fn_param)
  525. {
  526. struct dma_chan *chan;
  527. if (mask && !dma_device_satisfies_mask(dev, mask)) {
  528. dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
  529. return NULL;
  530. }
  531. /* devices with multiple channels need special handling as we need to
  532. * ensure that all channels are either private or public.
  533. */
  534. if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
  535. list_for_each_entry(chan, &dev->channels, device_node) {
  536. /* some channels are already publicly allocated */
  537. if (chan->client_count)
  538. return NULL;
  539. }
  540. list_for_each_entry(chan, &dev->channels, device_node) {
  541. if (chan->client_count) {
  542. dev_dbg(dev->dev, "%s: %s busy\n",
  543. __func__, dma_chan_name(chan));
  544. continue;
  545. }
  546. if (fn && !fn(chan, fn_param)) {
  547. dev_dbg(dev->dev, "%s: %s filter said false\n",
  548. __func__, dma_chan_name(chan));
  549. continue;
  550. }
  551. return chan;
  552. }
  553. return NULL;
  554. }
  555. static struct dma_chan *find_candidate(struct dma_device *device,
  556. const dma_cap_mask_t *mask,
  557. dma_filter_fn fn, void *fn_param)
  558. {
  559. struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
  560. int err;
  561. if (chan) {
  562. /* Found a suitable channel, try to grab, prep, and return it.
  563. * We first set DMA_PRIVATE to disable balance_ref_count as this
  564. * channel will not be published in the general-purpose
  565. * allocator
  566. */
  567. dma_cap_set(DMA_PRIVATE, device->cap_mask);
  568. device->privatecnt++;
  569. err = dma_chan_get(chan);
  570. if (err) {
  571. if (err == -ENODEV) {
  572. dev_dbg(device->dev, "%s: %s module removed\n",
  573. __func__, dma_chan_name(chan));
  574. list_del_rcu(&device->global_node);
  575. } else
  576. dev_dbg(device->dev,
  577. "%s: failed to get %s: (%d)\n",
  578. __func__, dma_chan_name(chan), err);
  579. if (--device->privatecnt == 0)
  580. dma_cap_clear(DMA_PRIVATE, device->cap_mask);
  581. chan = ERR_PTR(err);
  582. }
  583. }
  584. return chan ? chan : ERR_PTR(-EPROBE_DEFER);
  585. }
  586. /**
  587. * dma_get_slave_channel - try to get specific channel exclusively
  588. * @chan: target channel
  589. */
  590. struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
  591. {
  592. /* lock against __dma_request_channel */
  593. mutex_lock(&dma_list_mutex);
  594. if (chan->client_count == 0) {
  595. struct dma_device *device = chan->device;
  596. int err;
  597. dma_cap_set(DMA_PRIVATE, device->cap_mask);
  598. device->privatecnt++;
  599. err = dma_chan_get(chan);
  600. if (err) {
  601. dev_dbg(chan->device->dev,
  602. "%s: failed to get %s: (%d)\n",
  603. __func__, dma_chan_name(chan), err);
  604. chan = NULL;
  605. if (--device->privatecnt == 0)
  606. dma_cap_clear(DMA_PRIVATE, device->cap_mask);
  607. }
  608. } else
  609. chan = NULL;
  610. mutex_unlock(&dma_list_mutex);
  611. return chan;
  612. }
  613. EXPORT_SYMBOL_GPL(dma_get_slave_channel);
  614. struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
  615. {
  616. dma_cap_mask_t mask;
  617. struct dma_chan *chan;
  618. dma_cap_zero(mask);
  619. dma_cap_set(DMA_SLAVE, mask);
  620. /* lock against __dma_request_channel */
  621. mutex_lock(&dma_list_mutex);
  622. chan = find_candidate(device, &mask, NULL, NULL);
  623. mutex_unlock(&dma_list_mutex);
  624. return IS_ERR(chan) ? NULL : chan;
  625. }
  626. EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
  627. /**
  628. * __dma_request_channel - try to allocate an exclusive channel
  629. * @mask: capabilities that the channel must satisfy
  630. * @fn: optional callback to disposition available channels
  631. * @fn_param: opaque parameter to pass to dma_filter_fn()
  632. * @np: device node to look for DMA channels
  633. *
  634. * Returns pointer to appropriate DMA channel on success or NULL.
  635. */
  636. struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
  637. dma_filter_fn fn, void *fn_param,
  638. struct device_node *np)
  639. {
  640. struct dma_device *device, *_d;
  641. struct dma_chan *chan = NULL;
  642. /* Find a channel */
  643. mutex_lock(&dma_list_mutex);
  644. list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
  645. /* Finds a DMA controller with matching device node */
  646. if (np && device->dev->of_node && np != device->dev->of_node)
  647. continue;
  648. chan = find_candidate(device, mask, fn, fn_param);
  649. if (!IS_ERR(chan))
  650. break;
  651. chan = NULL;
  652. }
  653. mutex_unlock(&dma_list_mutex);
  654. pr_debug("%s: %s (%s)\n",
  655. __func__,
  656. chan ? "success" : "fail",
  657. chan ? dma_chan_name(chan) : NULL);
  658. return chan;
  659. }
  660. EXPORT_SYMBOL_GPL(__dma_request_channel);
  661. static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
  662. const char *name,
  663. struct device *dev)
  664. {
  665. int i;
  666. if (!device->filter.mapcnt)
  667. return NULL;
  668. for (i = 0; i < device->filter.mapcnt; i++) {
  669. const struct dma_slave_map *map = &device->filter.map[i];
  670. if (!strcmp(map->devname, dev_name(dev)) &&
  671. !strcmp(map->slave, name))
  672. return map;
  673. }
  674. return NULL;
  675. }
  676. /**
  677. * dma_request_chan - try to allocate an exclusive slave channel
  678. * @dev: pointer to client device structure
  679. * @name: slave channel name
  680. *
  681. * Returns pointer to appropriate DMA channel on success or an error pointer.
  682. */
  683. struct dma_chan *dma_request_chan(struct device *dev, const char *name)
  684. {
  685. struct dma_device *d, *_d;
  686. struct dma_chan *chan = NULL;
  687. /* If device-tree is present get slave info from here */
  688. if (dev->of_node)
  689. chan = of_dma_request_slave_channel(dev->of_node, name);
  690. /* If device was enumerated by ACPI get slave info from here */
  691. if (has_acpi_companion(dev) && !chan)
  692. chan = acpi_dma_request_slave_chan_by_name(dev, name);
  693. if (PTR_ERR(chan) == -EPROBE_DEFER)
  694. return chan;
  695. if (!IS_ERR_OR_NULL(chan))
  696. goto found;
  697. /* Try to find the channel via the DMA filter map(s) */
  698. mutex_lock(&dma_list_mutex);
  699. list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
  700. dma_cap_mask_t mask;
  701. const struct dma_slave_map *map = dma_filter_match(d, name, dev);
  702. if (!map)
  703. continue;
  704. dma_cap_zero(mask);
  705. dma_cap_set(DMA_SLAVE, mask);
  706. chan = find_candidate(d, &mask, d->filter.fn, map->param);
  707. if (!IS_ERR(chan))
  708. break;
  709. }
  710. mutex_unlock(&dma_list_mutex);
  711. if (IS_ERR(chan))
  712. return chan;
  713. if (!chan)
  714. return ERR_PTR(-EPROBE_DEFER);
  715. found:
  716. #ifdef CONFIG_DEBUG_FS
  717. chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
  718. name);
  719. #endif
  720. chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
  721. if (!chan->name)
  722. return chan;
  723. chan->slave = dev;
  724. if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
  725. DMA_SLAVE_NAME))
  726. dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
  727. if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
  728. dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
  729. return chan;
  730. }
  731. EXPORT_SYMBOL_GPL(dma_request_chan);
  732. /**
  733. * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
  734. * @mask: capabilities that the channel must satisfy
  735. *
  736. * Returns pointer to appropriate DMA channel on success or an error pointer.
  737. */
  738. struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
  739. {
  740. struct dma_chan *chan;
  741. if (!mask)
  742. return ERR_PTR(-ENODEV);
  743. chan = __dma_request_channel(mask, NULL, NULL, NULL);
  744. if (!chan) {
  745. mutex_lock(&dma_list_mutex);
  746. if (list_empty(&dma_device_list))
  747. chan = ERR_PTR(-EPROBE_DEFER);
  748. else
  749. chan = ERR_PTR(-ENODEV);
  750. mutex_unlock(&dma_list_mutex);
  751. }
  752. return chan;
  753. }
  754. EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
  755. void dma_release_channel(struct dma_chan *chan)
  756. {
  757. mutex_lock(&dma_list_mutex);
  758. WARN_ONCE(chan->client_count != 1,
  759. "chan reference count %d != 1\n", chan->client_count);
  760. dma_chan_put(chan);
  761. /* drop PRIVATE cap enabled by __dma_request_channel() */
  762. if (--chan->device->privatecnt == 0)
  763. dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
  764. if (chan->slave) {
  765. sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
  766. sysfs_remove_link(&chan->slave->kobj, chan->name);
  767. kfree(chan->name);
  768. chan->name = NULL;
  769. chan->slave = NULL;
  770. }
  771. #ifdef CONFIG_DEBUG_FS
  772. kfree(chan->dbg_client_name);
  773. chan->dbg_client_name = NULL;
  774. #endif
  775. mutex_unlock(&dma_list_mutex);
  776. }
  777. EXPORT_SYMBOL_GPL(dma_release_channel);
  778. /**
  779. * dmaengine_get - register interest in dma_channels
  780. */
  781. void dmaengine_get(void)
  782. {
  783. struct dma_device *device, *_d;
  784. struct dma_chan *chan;
  785. int err;
  786. mutex_lock(&dma_list_mutex);
  787. dmaengine_ref_count++;
  788. /* try to grab channels */
  789. list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
  790. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  791. continue;
  792. list_for_each_entry(chan, &device->channels, device_node) {
  793. err = dma_chan_get(chan);
  794. if (err == -ENODEV) {
  795. /* module removed before we could use it */
  796. list_del_rcu(&device->global_node);
  797. break;
  798. } else if (err)
  799. dev_dbg(chan->device->dev,
  800. "%s: failed to get %s: (%d)\n",
  801. __func__, dma_chan_name(chan), err);
  802. }
  803. }
  804. /* if this is the first reference and there were channels
  805. * waiting we need to rebalance to get those channels
  806. * incorporated into the channel table
  807. */
  808. if (dmaengine_ref_count == 1)
  809. dma_channel_rebalance();
  810. mutex_unlock(&dma_list_mutex);
  811. }
  812. EXPORT_SYMBOL(dmaengine_get);
  813. /**
  814. * dmaengine_put - let DMA drivers be removed when ref_count == 0
  815. */
  816. void dmaengine_put(void)
  817. {
  818. struct dma_device *device, *_d;
  819. struct dma_chan *chan;
  820. mutex_lock(&dma_list_mutex);
  821. dmaengine_ref_count--;
  822. BUG_ON(dmaengine_ref_count < 0);
  823. /* drop channel references */
  824. list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
  825. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  826. continue;
  827. list_for_each_entry(chan, &device->channels, device_node)
  828. dma_chan_put(chan);
  829. }
  830. mutex_unlock(&dma_list_mutex);
  831. }
  832. EXPORT_SYMBOL(dmaengine_put);
  833. static bool device_has_all_tx_types(struct dma_device *device)
  834. {
  835. /* A device that satisfies this test has channels that will never cause
  836. * an async_tx channel switch event as all possible operation types can
  837. * be handled.
  838. */
  839. #ifdef CONFIG_ASYNC_TX_DMA
  840. if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
  841. return false;
  842. #endif
  843. #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
  844. if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
  845. return false;
  846. #endif
  847. #if IS_ENABLED(CONFIG_ASYNC_XOR)
  848. if (!dma_has_cap(DMA_XOR, device->cap_mask))
  849. return false;
  850. #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
  851. if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
  852. return false;
  853. #endif
  854. #endif
  855. #if IS_ENABLED(CONFIG_ASYNC_PQ)
  856. if (!dma_has_cap(DMA_PQ, device->cap_mask))
  857. return false;
  858. #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
  859. if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
  860. return false;
  861. #endif
  862. #endif
  863. return true;
  864. }
  865. static int get_dma_id(struct dma_device *device)
  866. {
  867. int rc = ida_alloc(&dma_ida, GFP_KERNEL);
  868. if (rc < 0)
  869. return rc;
  870. device->dev_id = rc;
  871. return 0;
  872. }
  873. static int __dma_async_device_channel_register(struct dma_device *device,
  874. struct dma_chan *chan,
  875. const char *name)
  876. {
  877. int rc;
  878. chan->local = alloc_percpu(typeof(*chan->local));
  879. if (!chan->local)
  880. return -ENOMEM;
  881. chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
  882. if (!chan->dev) {
  883. rc = -ENOMEM;
  884. goto err_free_local;
  885. }
  886. /*
  887. * When the chan_id is a negative value, we are dynamically adding
  888. * the channel. Otherwise we are static enumerating.
  889. */
  890. chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
  891. if (chan->chan_id < 0) {
  892. pr_err("%s: unable to alloc ida for chan: %d\n",
  893. __func__, chan->chan_id);
  894. rc = chan->chan_id;
  895. goto err_free_dev;
  896. }
  897. chan->dev->device.class = &dma_devclass;
  898. chan->dev->device.parent = device->dev;
  899. chan->dev->chan = chan;
  900. chan->dev->dev_id = device->dev_id;
  901. if (!name)
  902. dev_set_name(&chan->dev->device, "dma%dchan%d", device->dev_id, chan->chan_id);
  903. else
  904. dev_set_name(&chan->dev->device, "%s", name);
  905. rc = device_register(&chan->dev->device);
  906. if (rc)
  907. goto err_out_ida;
  908. chan->client_count = 0;
  909. device->chancnt++;
  910. return 0;
  911. err_out_ida:
  912. ida_free(&device->chan_ida, chan->chan_id);
  913. err_free_dev:
  914. kfree(chan->dev);
  915. err_free_local:
  916. free_percpu(chan->local);
  917. chan->local = NULL;
  918. return rc;
  919. }
  920. int dma_async_device_channel_register(struct dma_device *device,
  921. struct dma_chan *chan,
  922. const char *name)
  923. {
  924. int rc;
  925. rc = __dma_async_device_channel_register(device, chan, name);
  926. if (rc < 0)
  927. return rc;
  928. dma_channel_rebalance();
  929. return 0;
  930. }
  931. EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
  932. static void __dma_async_device_channel_unregister(struct dma_device *device,
  933. struct dma_chan *chan)
  934. {
  935. if (chan->local == NULL)
  936. return;
  937. WARN_ONCE(!device->device_release && chan->client_count,
  938. "%s called while %d clients hold a reference\n",
  939. __func__, chan->client_count);
  940. mutex_lock(&dma_list_mutex);
  941. device->chancnt--;
  942. chan->dev->chan = NULL;
  943. mutex_unlock(&dma_list_mutex);
  944. ida_free(&device->chan_ida, chan->chan_id);
  945. device_unregister(&chan->dev->device);
  946. free_percpu(chan->local);
  947. }
  948. void dma_async_device_channel_unregister(struct dma_device *device,
  949. struct dma_chan *chan)
  950. {
  951. __dma_async_device_channel_unregister(device, chan);
  952. dma_channel_rebalance();
  953. }
  954. EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
  955. /**
  956. * dma_async_device_register - registers DMA devices found
  957. * @device: pointer to &struct dma_device
  958. *
  959. * After calling this routine the structure should not be freed except in the
  960. * device_release() callback which will be called after
  961. * dma_async_device_unregister() is called and no further references are taken.
  962. */
  963. int dma_async_device_register(struct dma_device *device)
  964. {
  965. int rc;
  966. struct dma_chan* chan;
  967. if (!device)
  968. return -ENODEV;
  969. /* validate device routines */
  970. if (!device->dev) {
  971. pr_err("DMAdevice must have dev\n");
  972. return -EIO;
  973. }
  974. device->owner = device->dev->driver->owner;
  975. #define CHECK_CAP(_name, _type) \
  976. { \
  977. if (dma_has_cap(_type, device->cap_mask) && !device->device_prep_##_name) { \
  978. dev_err(device->dev, \
  979. "Device claims capability %s, but op is not defined\n", \
  980. __stringify(_type)); \
  981. return -EIO; \
  982. } \
  983. }
  984. CHECK_CAP(dma_memcpy, DMA_MEMCPY);
  985. CHECK_CAP(dma_xor, DMA_XOR);
  986. CHECK_CAP(dma_xor_val, DMA_XOR_VAL);
  987. CHECK_CAP(dma_pq, DMA_PQ);
  988. CHECK_CAP(dma_pq_val, DMA_PQ_VAL);
  989. CHECK_CAP(dma_memset, DMA_MEMSET);
  990. CHECK_CAP(dma_interrupt, DMA_INTERRUPT);
  991. CHECK_CAP(dma_cyclic, DMA_CYCLIC);
  992. CHECK_CAP(interleaved_dma, DMA_INTERLEAVE);
  993. #undef CHECK_CAP
  994. if (!device->device_tx_status) {
  995. dev_err(device->dev, "Device tx_status is not defined\n");
  996. return -EIO;
  997. }
  998. if (!device->device_issue_pending) {
  999. dev_err(device->dev, "Device issue_pending is not defined\n");
  1000. return -EIO;
  1001. }
  1002. if (!device->device_release)
  1003. dev_dbg(device->dev,
  1004. "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
  1005. kref_init(&device->ref);
  1006. /* note: this only matters in the
  1007. * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
  1008. */
  1009. if (device_has_all_tx_types(device))
  1010. dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
  1011. rc = get_dma_id(device);
  1012. if (rc != 0)
  1013. return rc;
  1014. ida_init(&device->chan_ida);
  1015. /* represent channels in sysfs. Probably want devs too */
  1016. list_for_each_entry(chan, &device->channels, device_node) {
  1017. rc = __dma_async_device_channel_register(device, chan, NULL);
  1018. if (rc < 0)
  1019. goto err_out;
  1020. }
  1021. mutex_lock(&dma_list_mutex);
  1022. /* take references on public channels */
  1023. if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
  1024. list_for_each_entry(chan, &device->channels, device_node) {
  1025. /* if clients are already waiting for channels we need
  1026. * to take references on their behalf
  1027. */
  1028. if (dma_chan_get(chan) == -ENODEV) {
  1029. /* note we can only get here for the first
  1030. * channel as the remaining channels are
  1031. * guaranteed to get a reference
  1032. */
  1033. rc = -ENODEV;
  1034. mutex_unlock(&dma_list_mutex);
  1035. goto err_out;
  1036. }
  1037. }
  1038. list_add_tail_rcu(&device->global_node, &dma_device_list);
  1039. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  1040. device->privatecnt++; /* Always private */
  1041. dma_channel_rebalance();
  1042. mutex_unlock(&dma_list_mutex);
  1043. dmaengine_debug_register(device);
  1044. return 0;
  1045. err_out:
  1046. /* if we never registered a channel just release the idr */
  1047. if (!device->chancnt) {
  1048. ida_free(&dma_ida, device->dev_id);
  1049. return rc;
  1050. }
  1051. list_for_each_entry(chan, &device->channels, device_node) {
  1052. if (chan->local == NULL)
  1053. continue;
  1054. mutex_lock(&dma_list_mutex);
  1055. chan->dev->chan = NULL;
  1056. mutex_unlock(&dma_list_mutex);
  1057. device_unregister(&chan->dev->device);
  1058. free_percpu(chan->local);
  1059. }
  1060. return rc;
  1061. }
  1062. EXPORT_SYMBOL(dma_async_device_register);
  1063. /**
  1064. * dma_async_device_unregister - unregister a DMA device
  1065. * @device: pointer to &struct dma_device
  1066. *
  1067. * This routine is called by dma driver exit routines, dmaengine holds module
  1068. * references to prevent it being called while channels are in use.
  1069. */
  1070. void dma_async_device_unregister(struct dma_device *device)
  1071. {
  1072. struct dma_chan *chan, *n;
  1073. dmaengine_debug_unregister(device);
  1074. list_for_each_entry_safe(chan, n, &device->channels, device_node)
  1075. __dma_async_device_channel_unregister(device, chan);
  1076. mutex_lock(&dma_list_mutex);
  1077. /*
  1078. * setting DMA_PRIVATE ensures the device being torn down will not
  1079. * be used in the channel_table
  1080. */
  1081. dma_cap_set(DMA_PRIVATE, device->cap_mask);
  1082. dma_channel_rebalance();
  1083. ida_free(&dma_ida, device->dev_id);
  1084. dma_device_put(device);
  1085. mutex_unlock(&dma_list_mutex);
  1086. }
  1087. EXPORT_SYMBOL(dma_async_device_unregister);
  1088. static void dmaenginem_async_device_unregister(void *device)
  1089. {
  1090. dma_async_device_unregister(device);
  1091. }
  1092. /**
  1093. * dmaenginem_async_device_register - registers DMA devices found
  1094. * @device: pointer to &struct dma_device
  1095. *
  1096. * The operation is managed and will be undone on driver detach.
  1097. */
  1098. int dmaenginem_async_device_register(struct dma_device *device)
  1099. {
  1100. int ret;
  1101. ret = dma_async_device_register(device);
  1102. if (ret)
  1103. return ret;
  1104. return devm_add_action_or_reset(device->dev, dmaenginem_async_device_unregister, device);
  1105. }
  1106. EXPORT_SYMBOL(dmaenginem_async_device_register);
  1107. struct dmaengine_unmap_pool {
  1108. struct kmem_cache *cache;
  1109. const char *name;
  1110. mempool_t *pool;
  1111. size_t size;
  1112. };
  1113. #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
  1114. static struct dmaengine_unmap_pool unmap_pool[] = {
  1115. __UNMAP_POOL(2),
  1116. #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
  1117. __UNMAP_POOL(16),
  1118. __UNMAP_POOL(128),
  1119. __UNMAP_POOL(256),
  1120. #endif
  1121. };
  1122. static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
  1123. {
  1124. int order = get_count_order(nr);
  1125. switch (order) {
  1126. case 0 ... 1:
  1127. return &unmap_pool[0];
  1128. #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
  1129. case 2 ... 4:
  1130. return &unmap_pool[1];
  1131. case 5 ... 7:
  1132. return &unmap_pool[2];
  1133. case 8:
  1134. return &unmap_pool[3];
  1135. #endif
  1136. default:
  1137. BUG();
  1138. return NULL;
  1139. }
  1140. }
  1141. static void dmaengine_unmap(struct kref *kref)
  1142. {
  1143. struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
  1144. struct device *dev = unmap->dev;
  1145. int cnt, i;
  1146. cnt = unmap->to_cnt;
  1147. for (i = 0; i < cnt; i++)
  1148. dma_unmap_page(dev, unmap->addr[i], unmap->len,
  1149. DMA_TO_DEVICE);
  1150. cnt += unmap->from_cnt;
  1151. for (; i < cnt; i++)
  1152. dma_unmap_page(dev, unmap->addr[i], unmap->len,
  1153. DMA_FROM_DEVICE);
  1154. cnt += unmap->bidi_cnt;
  1155. for (; i < cnt; i++) {
  1156. if (unmap->addr[i] == 0)
  1157. continue;
  1158. dma_unmap_page(dev, unmap->addr[i], unmap->len,
  1159. DMA_BIDIRECTIONAL);
  1160. }
  1161. cnt = unmap->map_cnt;
  1162. mempool_free(unmap, __get_unmap_pool(cnt)->pool);
  1163. }
  1164. void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
  1165. {
  1166. if (unmap)
  1167. kref_put(&unmap->kref, dmaengine_unmap);
  1168. }
  1169. EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
  1170. static void dmaengine_destroy_unmap_pool(void)
  1171. {
  1172. int i;
  1173. for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
  1174. struct dmaengine_unmap_pool *p = &unmap_pool[i];
  1175. mempool_destroy(p->pool);
  1176. p->pool = NULL;
  1177. kmem_cache_destroy(p->cache);
  1178. p->cache = NULL;
  1179. }
  1180. }
  1181. static int __init dmaengine_init_unmap_pool(void)
  1182. {
  1183. int i;
  1184. for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
  1185. struct dmaengine_unmap_pool *p = &unmap_pool[i];
  1186. size_t size;
  1187. size = sizeof(struct dmaengine_unmap_data) +
  1188. sizeof(dma_addr_t) * p->size;
  1189. p->cache = kmem_cache_create(p->name, size, 0,
  1190. SLAB_HWCACHE_ALIGN, NULL);
  1191. if (!p->cache)
  1192. break;
  1193. p->pool = mempool_create_slab_pool(1, p->cache);
  1194. if (!p->pool)
  1195. break;
  1196. }
  1197. if (i == ARRAY_SIZE(unmap_pool))
  1198. return 0;
  1199. dmaengine_destroy_unmap_pool();
  1200. return -ENOMEM;
  1201. }
  1202. struct dmaengine_unmap_data *
  1203. dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
  1204. {
  1205. struct dmaengine_unmap_data *unmap;
  1206. unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
  1207. if (!unmap)
  1208. return NULL;
  1209. memset(unmap, 0, sizeof(*unmap));
  1210. kref_init(&unmap->kref);
  1211. unmap->dev = dev;
  1212. unmap->map_cnt = nr;
  1213. return unmap;
  1214. }
  1215. EXPORT_SYMBOL(dmaengine_get_unmap_data);
  1216. void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
  1217. struct dma_chan *chan)
  1218. {
  1219. tx->chan = chan;
  1220. #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
  1221. spin_lock_init(&tx->lock);
  1222. #endif
  1223. }
  1224. EXPORT_SYMBOL(dma_async_tx_descriptor_init);
  1225. static inline int desc_check_and_set_metadata_mode(
  1226. struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
  1227. {
  1228. /* Make sure that the metadata mode is not mixed */
  1229. if (!desc->desc_metadata_mode) {
  1230. if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
  1231. desc->desc_metadata_mode = mode;
  1232. else
  1233. return -ENOTSUPP;
  1234. } else if (desc->desc_metadata_mode != mode) {
  1235. return -EINVAL;
  1236. }
  1237. return 0;
  1238. }
  1239. int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
  1240. void *data, size_t len)
  1241. {
  1242. int ret;
  1243. if (!desc)
  1244. return -EINVAL;
  1245. ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
  1246. if (ret)
  1247. return ret;
  1248. if (!desc->metadata_ops || !desc->metadata_ops->attach)
  1249. return -ENOTSUPP;
  1250. return desc->metadata_ops->attach(desc, data, len);
  1251. }
  1252. EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
  1253. void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
  1254. size_t *payload_len, size_t *max_len)
  1255. {
  1256. int ret;
  1257. if (!desc)
  1258. return ERR_PTR(-EINVAL);
  1259. ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
  1260. if (ret)
  1261. return ERR_PTR(ret);
  1262. if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
  1263. return ERR_PTR(-ENOTSUPP);
  1264. return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
  1265. }
  1266. EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
  1267. int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
  1268. size_t payload_len)
  1269. {
  1270. int ret;
  1271. if (!desc)
  1272. return -EINVAL;
  1273. ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
  1274. if (ret)
  1275. return ret;
  1276. if (!desc->metadata_ops || !desc->metadata_ops->set_len)
  1277. return -ENOTSUPP;
  1278. return desc->metadata_ops->set_len(desc, payload_len);
  1279. }
  1280. EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
  1281. /**
  1282. * dma_wait_for_async_tx - spin wait for a transaction to complete
  1283. * @tx: in-flight transaction to wait on
  1284. */
  1285. enum dma_status
  1286. dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
  1287. {
  1288. unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
  1289. if (!tx)
  1290. return DMA_COMPLETE;
  1291. while (tx->cookie == -EBUSY) {
  1292. if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
  1293. dev_err(tx->chan->device->dev,
  1294. "%s timeout waiting for descriptor submission\n",
  1295. __func__);
  1296. return DMA_ERROR;
  1297. }
  1298. cpu_relax();
  1299. }
  1300. return dma_sync_wait(tx->chan, tx->cookie);
  1301. }
  1302. EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
  1303. /**
  1304. * dma_run_dependencies - process dependent operations on the target channel
  1305. * @tx: transaction with dependencies
  1306. *
  1307. * Helper routine for DMA drivers to process (start) dependent operations
  1308. * on their target channel.
  1309. */
  1310. void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
  1311. {
  1312. struct dma_async_tx_descriptor *dep = txd_next(tx);
  1313. struct dma_async_tx_descriptor *dep_next;
  1314. struct dma_chan *chan;
  1315. if (!dep)
  1316. return;
  1317. /* we'll submit tx->next now, so clear the link */
  1318. txd_clear_next(tx);
  1319. chan = dep->chan;
  1320. /* keep submitting up until a channel switch is detected
  1321. * in that case we will be called again as a result of
  1322. * processing the interrupt from async_tx_channel_switch
  1323. */
  1324. for (; dep; dep = dep_next) {
  1325. txd_lock(dep);
  1326. txd_clear_parent(dep);
  1327. dep_next = txd_next(dep);
  1328. if (dep_next && dep_next->chan == chan)
  1329. txd_clear_next(dep); /* ->next will be submitted */
  1330. else
  1331. dep_next = NULL; /* submit current dep and terminate */
  1332. txd_unlock(dep);
  1333. dep->tx_submit(dep);
  1334. }
  1335. chan->device->device_issue_pending(chan);
  1336. }
  1337. EXPORT_SYMBOL_GPL(dma_run_dependencies);
  1338. static int __init dma_bus_init(void)
  1339. {
  1340. int err = dmaengine_init_unmap_pool();
  1341. if (err)
  1342. return err;
  1343. err = class_register(&dma_devclass);
  1344. if (!err)
  1345. dmaengine_debugfs_init();
  1346. return err;
  1347. }
  1348. arch_initcall(dma_bus_init);