ark-dma.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921
  1. /*
  2. * Arkmicro dma driver
  3. *
  4. * Licensed under GPLv2 or later.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/device.h>
  8. #include <linux/clk.h>
  9. #include <linux/pm_runtime.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/of.h>
  14. #include <linux/of_dma.h>
  15. #include <linux/bitops.h>
  16. #include <linux/delay.h>
  17. #include <linux/dmapool.h>
  18. #include <linux/err.h>
  19. #include <linux/init.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/mm.h>
  22. #include <linux/slab.h>
  23. #include <dmaengine.h>
  24. #include "ark-dma.h"
  25. #define DRV_NAME "dw_dmac"
  26. /*
  27. * This supports the Synopsys "DesignWare AHB Central DMA Controller",
  28. * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
  29. * of which use ARM any more). See the "Databook" from Synopsys for
  30. * information beyond what licensees probably provide.
  31. *
  32. * The driver has been tested with the Atmel AT32AP7000, which does not
  33. * support descriptor writeback.
  34. */
  35. #define DWC_DEFAULT_CTLLO(_chan) ({ \
  36. struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
  37. struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
  38. bool _is_slave = is_slave_direction(_dwc->direction); \
  39. u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
  40. DW_DMA_MSIZE_16; \
  41. u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
  42. DW_DMA_MSIZE_16; \
  43. u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \
  44. _dwc->dws.p_master : _dwc->dws.m_master; \
  45. u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \
  46. _dwc->dws.p_master : _dwc->dws.m_master; \
  47. \
  48. (DWC_CTLL_DST_MSIZE(_dmsize) \
  49. | DWC_CTLL_SRC_MSIZE(_smsize) \
  50. | DWC_CTLL_LLP_D_EN \
  51. | DWC_CTLL_LLP_S_EN \
  52. | DWC_CTLL_DMS(_dms) \
  53. | DWC_CTLL_SMS(_sms)); \
  54. })
  55. /* The set of bus widths supported by the DMA controller */
  56. #define DW_DMA_BUSWIDTHS \
  57. BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
  58. BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  59. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  60. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
  61. /*----------------------------------------------------------------------*/
  62. static struct device *chan2dev(struct dma_chan *chan)
  63. {
  64. return &chan->dev->device;
  65. }
  66. static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
  67. {
  68. return to_dw_desc(dwc->active_list.next);
  69. }
  70. static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
  71. {
  72. struct dw_desc *desc = txd_to_dw_desc(tx);
  73. struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
  74. dma_cookie_t cookie;
  75. unsigned long flags;
  76. spin_lock_irqsave(&dwc->lock, flags);
  77. cookie = dma_cookie_assign(tx);
  78. /*
  79. * REVISIT: We should attempt to chain as many descriptors as
  80. * possible, perhaps even appending to those already submitted
  81. * for DMA. But this is hard to do in a race-free manner.
  82. */
  83. list_add_tail(&desc->desc_node, &dwc->queue);
  84. spin_unlock_irqrestore(&dwc->lock, flags);
  85. dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
  86. __func__, desc->txd.cookie);
  87. return cookie;
  88. }
  89. static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
  90. {
  91. struct dw_dma *dw = to_dw_dma(dwc->chan.device);
  92. struct dw_desc *desc;
  93. dma_addr_t phys;
  94. desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
  95. if (!desc)
  96. return NULL;
  97. dwc->descs_allocated++;
  98. INIT_LIST_HEAD(&desc->tx_list);
  99. dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
  100. desc->txd.tx_submit = dwc_tx_submit;
  101. desc->txd.flags = DMA_CTRL_ACK;
  102. desc->txd.phys = phys;
  103. return desc;
  104. }
  105. static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
  106. {
  107. struct dw_dma *dw = to_dw_dma(dwc->chan.device);
  108. struct dw_desc *child, *_next;
  109. if (unlikely(!desc))
  110. return;
  111. list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
  112. list_del(&child->desc_node);
  113. dma_pool_free(dw->desc_pool, child, child->txd.phys);
  114. dwc->descs_allocated--;
  115. }
  116. dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
  117. dwc->descs_allocated--;
  118. }
  119. static void dwc_initialize(struct dw_dma_chan *dwc)
  120. {
  121. struct dw_dma *dw = to_dw_dma(dwc->chan.device);
  122. u32 cfghi = DWC_CFGH_FIFO_MODE;
  123. u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
  124. bool hs_polarity = dwc->dws.hs_polarity;
  125. if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
  126. return;
  127. //printk("#####Enabling channel interrupts (mask=0x%x)\n", dwc->mask);
  128. cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
  129. cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
  130. /* Set polarity of handshake interface */
  131. cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0;
  132. channel_writel(dwc, CFG_LO, cfglo);
  133. channel_writel(dwc, CFG_HI, cfghi);
  134. /* Enable interrupts */
  135. channel_set_bit(dw, MASK.XFER, dwc->mask);
  136. channel_set_bit(dw, MASK.ERROR, dwc->mask);
  137. set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
  138. }
  139. /*----------------------------------------------------------------------*/
  140. static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
  141. {
  142. dev_err(chan2dev(&dwc->chan),
  143. " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
  144. channel_readl(dwc, SAR),
  145. channel_readl(dwc, DAR),
  146. channel_readl(dwc, LLP),
  147. channel_readl(dwc, CTL_HI),
  148. channel_readl(dwc, CTL_LO));
  149. }
  150. static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
  151. {
  152. channel_clear_bit(dw, CH_EN, dwc->mask);
  153. while (dma_readl(dw, CH_EN) & dwc->mask)
  154. cpu_relax();
  155. }
  156. /*----------------------------------------------------------------------*/
  157. /* Perform single block transfer */
  158. static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
  159. struct dw_desc *desc)
  160. {
  161. struct dw_dma *dw = to_dw_dma(dwc->chan.device);
  162. u32 ctllo;
  163. /*
  164. * Software emulation of LLP mode relies on interrupts to continue
  165. * multi block transfer.
  166. */
  167. ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
  168. channel_writel(dwc, SAR, lli_read(desc, sar));
  169. channel_writel(dwc, DAR, lli_read(desc, dar));
  170. channel_writel(dwc, CTL_LO, ctllo);
  171. channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
  172. channel_set_bit(dw, CH_EN, dwc->mask);
  173. /* Move pointer to next descriptor */
  174. dwc->tx_node_active = dwc->tx_node_active->next;
  175. }
  176. /* Called with dwc->lock held and bh disabled */
  177. static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
  178. {
  179. struct dw_dma *dw = to_dw_dma(dwc->chan.device);
  180. u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
  181. unsigned long was_soft_llp;
  182. /* ASSERT: channel is idle */
  183. if (dma_readl(dw, CH_EN) & dwc->mask) {
  184. dev_err(chan2dev(&dwc->chan),
  185. "%s: BUG: Attempted to start non-idle channel\n",
  186. __func__);
  187. dwc_dump_chan_regs(dwc);
  188. /* The tasklet will hopefully advance the queue... */
  189. return;
  190. }
  191. if (dwc->nollp) {
  192. was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
  193. &dwc->flags);
  194. if (was_soft_llp) {
  195. dev_err(chan2dev(&dwc->chan),
  196. "BUG: Attempted to start new LLP transfer inside ongoing one\n");
  197. return;
  198. }
  199. dwc_initialize(dwc);
  200. first->residue = first->total_len;
  201. dwc->tx_node_active = &first->tx_list;
  202. /* Submit first block */
  203. dwc_do_single_block(dwc, first);
  204. return;
  205. }
  206. dwc_initialize(dwc);
  207. channel_writel(dwc, LLP, first->txd.phys | lms);
  208. channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
  209. channel_writel(dwc, CTL_HI, 0);
  210. channel_set_bit(dw, CH_EN, dwc->mask);
  211. }
  212. static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
  213. {
  214. struct dw_desc *desc;
  215. if (list_empty(&dwc->queue))
  216. return;
  217. list_move(dwc->queue.next, &dwc->active_list);
  218. desc = dwc_first_active(dwc);
  219. dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
  220. dwc_dostart(dwc, desc);
  221. }
  222. /*----------------------------------------------------------------------*/
  223. static void
  224. dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
  225. bool callback_required)
  226. {
  227. struct dma_async_tx_descriptor *txd = &desc->txd;
  228. struct dw_desc *child;
  229. unsigned long flags;
  230. struct dmaengine_desc_callback cb;
  231. dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
  232. spin_lock_irqsave(&dwc->lock, flags);
  233. dma_cookie_complete(txd);
  234. if (callback_required)
  235. dmaengine_desc_get_callback(txd, &cb);
  236. else
  237. memset(&cb, 0, sizeof(cb));
  238. /* async_tx_ack */
  239. list_for_each_entry(child, &desc->tx_list, desc_node)
  240. async_tx_ack(&child->txd);
  241. async_tx_ack(&desc->txd);
  242. dwc_desc_put(dwc, desc);
  243. spin_unlock_irqrestore(&dwc->lock, flags);
  244. dmaengine_desc_callback_invoke(&cb, NULL);
  245. }
  246. static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
  247. {
  248. struct dw_desc *desc, *_desc;
  249. LIST_HEAD(list);
  250. unsigned long flags;
  251. spin_lock_irqsave(&dwc->lock, flags);
  252. if (dma_readl(dw, CH_EN) & dwc->mask) {
  253. dev_err(chan2dev(&dwc->chan),
  254. "BUG: XFER bit set, but channel not idle!\n");
  255. /* Try to continue after resetting the channel... */
  256. dwc_chan_disable(dw, dwc);
  257. }
  258. /*
  259. * Submit queued descriptors ASAP, i.e. before we go through
  260. * the completed ones.
  261. */
  262. list_splice_init(&dwc->active_list, &list);
  263. dwc_dostart_first_queued(dwc);
  264. spin_unlock_irqrestore(&dwc->lock, flags);
  265. list_for_each_entry_safe(desc, _desc, &list, desc_node)
  266. dwc_descriptor_complete(dwc, desc, true);
  267. }
  268. /* Returns how many bytes were already received from source */
  269. static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
  270. {
  271. u32 ctlhi = channel_readl(dwc, CTL_HI);
  272. u32 ctllo = channel_readl(dwc, CTL_LO);
  273. return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
  274. }
  275. static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
  276. {
  277. dma_addr_t llp;
  278. struct dw_desc *desc, *_desc;
  279. struct dw_desc *child;
  280. u32 status_xfer;
  281. unsigned long flags;
  282. spin_lock_irqsave(&dwc->lock, flags);
  283. llp = channel_readl(dwc, LLP);
  284. status_xfer = dma_readl(dw, RAW.XFER);
  285. if (status_xfer & dwc->mask) {
  286. /* Everything we've submitted is done */
  287. dma_writel(dw, CLEAR.XFER, dwc->mask);
  288. if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
  289. struct list_head *head, *active = dwc->tx_node_active;
  290. /*
  291. * We are inside first active descriptor.
  292. * Otherwise something is really wrong.
  293. */
  294. desc = dwc_first_active(dwc);
  295. head = &desc->tx_list;
  296. if (active != head) {
  297. /* Update residue to reflect last sent descriptor */
  298. if (active == head->next)
  299. desc->residue -= desc->len;
  300. else
  301. desc->residue -= to_dw_desc(active->prev)->len;
  302. child = to_dw_desc(active);
  303. /* Submit next block */
  304. dwc_do_single_block(dwc, child);
  305. spin_unlock_irqrestore(&dwc->lock, flags);
  306. return;
  307. }
  308. /* We are done here */
  309. clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
  310. }
  311. spin_unlock_irqrestore(&dwc->lock, flags);
  312. dwc_complete_all(dw, dwc);
  313. return;
  314. }
  315. if (list_empty(&dwc->active_list)) {
  316. spin_unlock_irqrestore(&dwc->lock, flags);
  317. return;
  318. }
  319. if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
  320. dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
  321. spin_unlock_irqrestore(&dwc->lock, flags);
  322. return;
  323. }
  324. dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
  325. list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
  326. /* Initial residue value */
  327. desc->residue = desc->total_len;
  328. /* Check first descriptors addr */
  329. if (desc->txd.phys == DWC_LLP_LOC(llp)) {
  330. spin_unlock_irqrestore(&dwc->lock, flags);
  331. return;
  332. }
  333. /* Check first descriptors llp */
  334. if (lli_read(desc, llp) == llp) {
  335. /* This one is currently in progress */
  336. desc->residue -= dwc_get_sent(dwc);
  337. spin_unlock_irqrestore(&dwc->lock, flags);
  338. return;
  339. }
  340. desc->residue -= desc->len;
  341. list_for_each_entry(child, &desc->tx_list, desc_node) {
  342. if (lli_read(child, llp) == llp) {
  343. /* Currently in progress */
  344. desc->residue -= dwc_get_sent(dwc);
  345. spin_unlock_irqrestore(&dwc->lock, flags);
  346. return;
  347. }
  348. desc->residue -= child->len;
  349. }
  350. /*
  351. * No descriptors so far seem to be in progress, i.e.
  352. * this one must be done.
  353. */
  354. spin_unlock_irqrestore(&dwc->lock, flags);
  355. dwc_descriptor_complete(dwc, desc, true);
  356. spin_lock_irqsave(&dwc->lock, flags);
  357. }
  358. dev_err(chan2dev(&dwc->chan),
  359. "BUG: All descriptors done, but channel not idle!\n");
  360. /* Try to continue after resetting the channel... */
  361. dwc_chan_disable(dw, dwc);
  362. dwc_dostart_first_queued(dwc);
  363. spin_unlock_irqrestore(&dwc->lock, flags);
  364. }
  365. static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
  366. {
  367. dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
  368. lli_read(desc, sar),
  369. lli_read(desc, dar),
  370. lli_read(desc, llp),
  371. lli_read(desc, ctlhi),
  372. lli_read(desc, ctllo));
  373. }
  374. static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
  375. {
  376. struct dw_desc *bad_desc;
  377. struct dw_desc *child;
  378. unsigned long flags;
  379. dwc_scan_descriptors(dw, dwc);
  380. spin_lock_irqsave(&dwc->lock, flags);
  381. /*
  382. * The descriptor currently at the head of the active list is
  383. * borked. Since we don't have any way to report errors, we'll
  384. * just have to scream loudly and try to carry on.
  385. */
  386. bad_desc = dwc_first_active(dwc);
  387. list_del_init(&bad_desc->desc_node);
  388. list_move(dwc->queue.next, dwc->active_list.prev);
  389. /* Clear the error flag and try to restart the controller */
  390. dma_writel(dw, CLEAR.ERROR, dwc->mask);
  391. if (!list_empty(&dwc->active_list))
  392. dwc_dostart(dwc, dwc_first_active(dwc));
  393. /*
  394. * WARN may seem harsh, but since this only happens
  395. * when someone submits a bad physical address in a
  396. * descriptor, we should consider ourselves lucky that the
  397. * controller flagged an error instead of scribbling over
  398. * random memory locations.
  399. */
  400. dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
  401. " cookie: %d\n", bad_desc->txd.cookie);
  402. dwc_dump_lli(dwc, bad_desc);
  403. list_for_each_entry(child, &bad_desc->tx_list, desc_node)
  404. dwc_dump_lli(dwc, child);
  405. spin_unlock_irqrestore(&dwc->lock, flags);
  406. /* Pretend the descriptor completed successfully */
  407. dwc_descriptor_complete(dwc, bad_desc, true);
  408. }
  409. /* --------------------- Cyclic DMA API extensions -------------------- */
  410. dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
  411. {
  412. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  413. return channel_readl(dwc, SAR);
  414. }
  415. EXPORT_SYMBOL(dw_dma_get_src_addr);
  416. dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
  417. {
  418. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  419. return channel_readl(dwc, DAR);
  420. }
  421. EXPORT_SYMBOL(dw_dma_get_dst_addr);
  422. /* Called with dwc->lock held and all DMAC interrupts disabled */
  423. static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
  424. u32 status_block, u32 status_err, u32 status_xfer)
  425. {
  426. unsigned long flags;
  427. if (status_block & dwc->mask) {
  428. void (*callback)(void *param);
  429. void *callback_param;
  430. dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
  431. channel_readl(dwc, LLP));
  432. dma_writel(dw, CLEAR.BLOCK, dwc->mask);
  433. callback = dwc->cdesc->period_callback;
  434. callback_param = dwc->cdesc->period_callback_param;
  435. if (callback)
  436. callback(callback_param);
  437. }
  438. /*
  439. * Error and transfer complete are highly unlikely, and will most
  440. * likely be due to a configuration error by the user.
  441. */
  442. if (unlikely(status_err & dwc->mask) ||
  443. unlikely(status_xfer & dwc->mask)) {
  444. unsigned int i;
  445. dev_err(chan2dev(&dwc->chan),
  446. "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
  447. status_xfer ? "xfer" : "error");
  448. spin_lock_irqsave(&dwc->lock, flags);
  449. dwc_dump_chan_regs(dwc);
  450. dwc_chan_disable(dw, dwc);
  451. /* Make sure DMA does not restart by loading a new list */
  452. channel_writel(dwc, LLP, 0);
  453. channel_writel(dwc, CTL_LO, 0);
  454. channel_writel(dwc, CTL_HI, 0);
  455. dma_writel(dw, CLEAR.BLOCK, dwc->mask);
  456. dma_writel(dw, CLEAR.ERROR, dwc->mask);
  457. dma_writel(dw, CLEAR.XFER, dwc->mask);
  458. for (i = 0; i < dwc->cdesc->periods; i++)
  459. dwc_dump_lli(dwc, dwc->cdesc->desc[i]);
  460. spin_unlock_irqrestore(&dwc->lock, flags);
  461. }
  462. /* Re-enable interrupts */
  463. channel_set_bit(dw, MASK.BLOCK, dwc->mask);
  464. }
  465. /* ------------------------------------------------------------------------- */
  466. static void dw_dma_tasklet(unsigned long data)
  467. {
  468. struct dw_dma *dw = (struct dw_dma *)data;
  469. struct dw_dma_chan *dwc;
  470. u32 status_block;
  471. u32 status_xfer;
  472. u32 status_err;
  473. unsigned int i;
  474. status_block = dma_readl(dw, RAW.BLOCK);
  475. status_xfer = dma_readl(dw, RAW.XFER);
  476. status_err = dma_readl(dw, RAW.ERROR);
  477. dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
  478. for (i = 0; i < dw->dma.chancnt; i++) {
  479. dwc = &dw->chan[i];
  480. if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)){
  481. dwc_handle_cyclic(dw, dwc, status_block, status_err,
  482. status_xfer);
  483. }else if (status_err & (1 << i)){
  484. dwc_handle_error(dw, dwc);
  485. }else if (status_xfer & (1 << i)){
  486. dwc_scan_descriptors(dw, dwc);
  487. }
  488. }
  489. /* Re-enable interrupts */
  490. channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
  491. channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
  492. }
  493. static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
  494. {
  495. struct dw_dma *dw = dev_id;
  496. u32 status;
  497. /* Check if we have any interrupt from the DMAC which is not in use */
  498. if (!dw->in_use)
  499. return IRQ_NONE;
  500. status = dma_readl(dw, STATUS_INT);
  501. dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
  502. /* Check if we have any interrupt from the DMAC */
  503. if (!status)
  504. return IRQ_NONE;
  505. /*
  506. * Just disable the interrupts. We'll turn them back on in the
  507. * softirq handler.
  508. */
  509. channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
  510. channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
  511. channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
  512. status = dma_readl(dw, STATUS_INT);
  513. if (status) {
  514. dev_err(dw->dma.dev,
  515. "BUG: Unexpected interrupts pending: 0x%x\n",
  516. status);
  517. /* Try to recover */
  518. channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
  519. channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
  520. channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
  521. channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
  522. channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
  523. }
  524. tasklet_schedule(&dw->tasklet);
  525. return IRQ_HANDLED;
  526. }
  527. /*----------------------------------------------------------------------*/
  528. static struct dma_async_tx_descriptor *
  529. dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  530. size_t len, unsigned long flags)
  531. {
  532. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  533. struct dw_dma *dw = to_dw_dma(chan->device);
  534. struct dw_desc *desc;
  535. struct dw_desc *first;
  536. struct dw_desc *prev;
  537. size_t xfer_count;
  538. size_t offset;
  539. u8 m_master = dwc->dws.m_master;
  540. unsigned int src_width;
  541. unsigned int dst_width;
  542. unsigned int data_width = dw->pdata->data_width[m_master];
  543. u32 ctllo;
  544. u8 lms = DWC_LLP_LMS(m_master);
  545. dev_vdbg(chan2dev(chan),
  546. "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
  547. &dest, &src, len, flags);
  548. if (unlikely(!len)) {
  549. dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
  550. return NULL;
  551. }
  552. dwc->direction = DMA_MEM_TO_MEM;
  553. src_width = dst_width = __ffs(data_width | src | dest | len);
  554. ctllo = DWC_DEFAULT_CTLLO(chan)
  555. | DWC_CTLL_DST_WIDTH(dst_width)
  556. | DWC_CTLL_SRC_WIDTH(src_width)
  557. | DWC_CTLL_DST_INC
  558. | DWC_CTLL_SRC_INC
  559. | DWC_CTLL_FC_M2M;
  560. prev = first = NULL;
  561. for (offset = 0; offset < len; offset += xfer_count << src_width) {
  562. xfer_count = min_t(size_t, (len - offset) >> src_width,
  563. dwc->block_size);
  564. desc = dwc_desc_get(dwc);
  565. if (!desc)
  566. goto err_desc_get;
  567. lli_write(desc, sar, src + offset);
  568. lli_write(desc, dar, dest + offset);
  569. lli_write(desc, ctllo, ctllo);
  570. lli_write(desc, ctlhi, xfer_count);
  571. desc->len = xfer_count << src_width;
  572. if (!first) {
  573. first = desc;
  574. } else {
  575. lli_write(prev, llp, desc->txd.phys | lms);
  576. list_add_tail(&desc->desc_node, &first->tx_list);
  577. }
  578. prev = desc;
  579. }
  580. if (flags & DMA_PREP_INTERRUPT)
  581. /* Trigger interrupt after last block */
  582. lli_set(prev, ctllo, DWC_CTLL_INT_EN);
  583. prev->lli.llp = 0;
  584. lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
  585. first->txd.flags = flags;
  586. first->total_len = len;
  587. return &first->txd;
  588. err_desc_get:
  589. dwc_desc_put(dwc, first);
  590. return NULL;
  591. }
  592. static struct dma_async_tx_descriptor *
  593. dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  594. unsigned int sg_len, enum dma_transfer_direction direction,
  595. unsigned long flags, void *context)
  596. {
  597. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  598. struct dw_dma *dw = to_dw_dma(chan->device);
  599. struct dma_slave_config *sconfig = &dwc->dma_sconfig;
  600. struct dw_desc *prev;
  601. struct dw_desc *first;
  602. u32 ctllo;
  603. u8 m_master = dwc->dws.m_master;
  604. u8 lms = DWC_LLP_LMS(m_master);
  605. dma_addr_t reg;
  606. unsigned int reg_width;
  607. unsigned int mem_width;
  608. unsigned int data_width = dw->pdata->data_width[m_master];
  609. unsigned int i;
  610. struct scatterlist *sg;
  611. size_t total_len = 0;
  612. dev_vdbg(chan2dev(chan), "%s\n", __func__);
  613. if (unlikely(!is_slave_direction(direction) || !sg_len))
  614. return NULL;
  615. dwc->direction = direction;
  616. prev = first = NULL;
  617. switch (direction) {
  618. case DMA_MEM_TO_DEV:
  619. reg_width = __ffs(sconfig->dst_addr_width);
  620. reg = sconfig->dst_addr;
  621. ctllo = (DWC_DEFAULT_CTLLO(chan)
  622. | DWC_CTLL_DST_WIDTH(reg_width)
  623. | DWC_CTLL_DST_FIX
  624. | DWC_CTLL_SRC_INC);
  625. ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
  626. DWC_CTLL_FC(DW_DMA_FC_D_M2P);
  627. for_each_sg(sgl, sg, sg_len, i) {
  628. struct dw_desc *desc;
  629. u32 len, dlen, mem;
  630. mem = sg_dma_address(sg);
  631. len = sg_dma_len(sg);
  632. mem_width = __ffs(data_width | mem | len);
  633. slave_sg_todev_fill_desc:
  634. desc = dwc_desc_get(dwc);
  635. if (!desc)
  636. goto err_desc_get;
  637. lli_write(desc, sar, mem);
  638. lli_write(desc, dar, reg);
  639. lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
  640. if ((len >> mem_width) > dwc->block_size) {
  641. dlen = dwc->block_size << mem_width;
  642. mem += dlen;
  643. len -= dlen;
  644. } else {
  645. dlen = len;
  646. len = 0;
  647. }
  648. lli_write(desc, ctlhi, dlen >> mem_width);
  649. desc->len = dlen;
  650. if (!first) {
  651. first = desc;
  652. } else {
  653. lli_write(prev, llp, desc->txd.phys | lms);
  654. list_add_tail(&desc->desc_node, &first->tx_list);
  655. }
  656. prev = desc;
  657. total_len += dlen;
  658. if (len)
  659. goto slave_sg_todev_fill_desc;
  660. }
  661. break;
  662. case DMA_DEV_TO_MEM:
  663. reg_width = __ffs(sconfig->src_addr_width);
  664. reg = sconfig->src_addr;
  665. ctllo = (DWC_DEFAULT_CTLLO(chan)
  666. | DWC_CTLL_SRC_WIDTH(reg_width)
  667. | DWC_CTLL_DST_INC
  668. | DWC_CTLL_SRC_FIX);
  669. ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
  670. DWC_CTLL_FC(DW_DMA_FC_D_P2M);
  671. for_each_sg(sgl, sg, sg_len, i) {
  672. struct dw_desc *desc;
  673. u32 len, dlen, mem;
  674. mem = sg_dma_address(sg);
  675. len = sg_dma_len(sg);
  676. mem_width = __ffs(data_width | mem | len);
  677. slave_sg_fromdev_fill_desc:
  678. desc = dwc_desc_get(dwc);
  679. if (!desc)
  680. goto err_desc_get;
  681. lli_write(desc, sar, reg);
  682. lli_write(desc, dar, mem);
  683. lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
  684. if ((len >> reg_width) > dwc->block_size) {
  685. dlen = dwc->block_size << reg_width;
  686. mem += dlen;
  687. len -= dlen;
  688. } else {
  689. dlen = len;
  690. len = 0;
  691. }
  692. lli_write(desc, ctlhi, dlen >> reg_width);
  693. desc->len = dlen;
  694. if (!first) {
  695. first = desc;
  696. } else {
  697. lli_write(prev, llp, desc->txd.phys | lms);
  698. list_add_tail(&desc->desc_node, &first->tx_list);
  699. }
  700. prev = desc;
  701. total_len += dlen;
  702. if (len)
  703. goto slave_sg_fromdev_fill_desc;
  704. }
  705. break;
  706. default:
  707. return NULL;
  708. }
  709. if (flags & DMA_PREP_INTERRUPT)
  710. /* Trigger interrupt after last block */
  711. lli_set(prev, ctllo, DWC_CTLL_INT_EN);
  712. prev->lli.llp = 0;
  713. lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
  714. first->total_len = total_len;
  715. return &first->txd;
  716. err_desc_get:
  717. dev_err(chan2dev(chan),
  718. "not enough descriptors available. Direction %d\n", direction);
  719. dwc_desc_put(dwc, first);
  720. return NULL;
  721. }
  722. bool dw_dma_filter(struct dma_chan *chan, void *param)
  723. {
  724. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  725. struct dw_dma_slave *dws = param;
  726. if (dws->dma_dev != chan->device->dev)
  727. return false;
  728. /* We have to copy data since dws can be temporary storage */
  729. memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
  730. return true;
  731. }
  732. EXPORT_SYMBOL_GPL(dw_dma_filter);
  733. /*
  734. * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
  735. * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
  736. *
  737. * NOTE: burst size 2 is not supported by controller.
  738. *
  739. * This can be done by finding least significant bit set: n & (n - 1)
  740. */
  741. static inline void convert_burst(u32 *maxburst)
  742. {
  743. if (*maxburst > 1)
  744. *maxburst = fls(*maxburst) - 2;
  745. else
  746. *maxburst = 0;
  747. }
  748. static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
  749. {
  750. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  751. /* Check if chan will be configured for slave transfers */
  752. if (!is_slave_direction(sconfig->direction))
  753. return -EINVAL;
  754. memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
  755. dwc->direction = sconfig->direction;
  756. convert_burst(&dwc->dma_sconfig.src_maxburst);
  757. convert_burst(&dwc->dma_sconfig.dst_maxburst);
  758. return 0;
  759. }
  760. static int dwc_pause(struct dma_chan *chan)
  761. {
  762. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  763. unsigned long flags;
  764. unsigned int count = 20; /* timeout iterations */
  765. u32 cfglo;
  766. spin_lock_irqsave(&dwc->lock, flags);
  767. cfglo = channel_readl(dwc, CFG_LO);
  768. channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
  769. while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
  770. udelay(2);
  771. set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
  772. spin_unlock_irqrestore(&dwc->lock, flags);
  773. return 0;
  774. }
  775. static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
  776. {
  777. u32 cfglo = channel_readl(dwc, CFG_LO);
  778. channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
  779. clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
  780. }
  781. static int dwc_resume(struct dma_chan *chan)
  782. {
  783. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  784. unsigned long flags;
  785. spin_lock_irqsave(&dwc->lock, flags);
  786. if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
  787. dwc_chan_resume(dwc);
  788. spin_unlock_irqrestore(&dwc->lock, flags);
  789. return 0;
  790. }
  791. static int dwc_terminate_all(struct dma_chan *chan)
  792. {
  793. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  794. struct dw_dma *dw = to_dw_dma(chan->device);
  795. struct dw_desc *desc, *_desc;
  796. unsigned long flags;
  797. LIST_HEAD(list);
  798. spin_lock_irqsave(&dwc->lock, flags);
  799. clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
  800. dwc_chan_disable(dw, dwc);
  801. dwc_chan_resume(dwc);
  802. /* active_list entries will end up before queued entries */
  803. list_splice_init(&dwc->queue, &list);
  804. list_splice_init(&dwc->active_list, &list);
  805. spin_unlock_irqrestore(&dwc->lock, flags);
  806. /* Flush all pending and queued descriptors */
  807. list_for_each_entry_safe(desc, _desc, &list, desc_node)
  808. dwc_descriptor_complete(dwc, desc, false);
  809. return 0;
  810. }
  811. static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
  812. {
  813. struct dw_desc *desc;
  814. list_for_each_entry(desc, &dwc->active_list, desc_node)
  815. if (desc->txd.cookie == c)
  816. return desc;
  817. return NULL;
  818. }
  819. static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
  820. {
  821. struct dw_desc *desc;
  822. unsigned long flags;
  823. u32 residue;
  824. spin_lock_irqsave(&dwc->lock, flags);
  825. desc = dwc_find_desc(dwc, cookie);
  826. if (desc) {
  827. if (desc == dwc_first_active(dwc)) {
  828. residue = desc->residue;
  829. if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
  830. residue -= dwc_get_sent(dwc);
  831. } else {
  832. residue = desc->total_len;
  833. }
  834. } else {
  835. residue = 0;
  836. }
  837. spin_unlock_irqrestore(&dwc->lock, flags);
  838. return residue;
  839. }
  840. static enum dma_status
  841. dwc_tx_status(struct dma_chan *chan,
  842. dma_cookie_t cookie,
  843. struct dma_tx_state *txstate)
  844. {
  845. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  846. enum dma_status ret;
  847. ret = dma_cookie_status(chan, cookie, txstate);
  848. if (ret == DMA_COMPLETE)
  849. return ret;
  850. dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
  851. ret = dma_cookie_status(chan, cookie, txstate);
  852. if (ret == DMA_COMPLETE)
  853. return ret;
  854. dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
  855. if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
  856. return DMA_PAUSED;
  857. return ret;
  858. }
  859. static void dwc_issue_pending(struct dma_chan *chan)
  860. {
  861. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  862. unsigned long flags;
  863. spin_lock_irqsave(&dwc->lock, flags);
  864. if (list_empty(&dwc->active_list))
  865. dwc_dostart_first_queued(dwc);
  866. spin_unlock_irqrestore(&dwc->lock, flags);
  867. }
  868. /*----------------------------------------------------------------------*/
  869. static void dw_dma_off(struct dw_dma *dw)
  870. {
  871. unsigned int i;
  872. dma_writel(dw, CFG, 0);
  873. channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
  874. channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
  875. channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
  876. channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
  877. channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
  878. while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
  879. cpu_relax();
  880. for (i = 0; i < dw->dma.chancnt; i++)
  881. clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
  882. }
  883. static void dw_dma_on(struct dw_dma *dw)
  884. {
  885. dma_writel(dw, CFG, DW_CFG_DMA_EN);
  886. }
  887. static int dwc_alloc_chan_resources(struct dma_chan *chan)
  888. {
  889. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  890. struct dw_dma *dw = to_dw_dma(chan->device);
  891. dev_vdbg(chan2dev(chan), "%s\n", __func__);
  892. /* ASSERT: channel is idle */
  893. if (dma_readl(dw, CH_EN) & dwc->mask) {
  894. dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
  895. return -EIO;
  896. }
  897. dma_cookie_init(chan);
  898. /*
  899. * NOTE: some controllers may have additional features that we
  900. * need to initialize here, like "scatter-gather" (which
  901. * doesn't mean what you think it means), and status writeback.
  902. */
  903. /*
  904. * We need controller-specific data to set up slave transfers.
  905. */
  906. if (chan->private && !dw_dma_filter(chan, chan->private)) {
  907. dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
  908. return -EINVAL;
  909. }
  910. /* Enable controller here if needed */
  911. if (!dw->in_use)
  912. dw_dma_on(dw);
  913. dw->in_use |= dwc->mask;
  914. return 0;
  915. }
  916. static void dwc_free_chan_resources(struct dma_chan *chan)
  917. {
  918. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  919. struct dw_dma *dw = to_dw_dma(chan->device);
  920. unsigned long flags;
  921. LIST_HEAD(list);
  922. dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
  923. dwc->descs_allocated);
  924. /* ASSERT: channel is idle */
  925. BUG_ON(!list_empty(&dwc->active_list));
  926. BUG_ON(!list_empty(&dwc->queue));
  927. BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
  928. spin_lock_irqsave(&dwc->lock, flags);
  929. /* Clear custom channel configuration */
  930. memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
  931. clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
  932. /* Disable interrupts */
  933. channel_clear_bit(dw, MASK.XFER, dwc->mask);
  934. channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
  935. channel_clear_bit(dw, MASK.ERROR, dwc->mask);
  936. spin_unlock_irqrestore(&dwc->lock, flags);
  937. /* Disable controller in case it was a last user */
  938. dw->in_use &= ~dwc->mask;
  939. if (!dw->in_use)
  940. dw_dma_off(dw);
  941. dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
  942. }
  943. /* --------------------- Cyclic DMA API extensions -------------------- */
  944. /**
  945. * dw_dma_cyclic_start - start the cyclic DMA transfer
  946. * @chan: the DMA channel to start
  947. *
  948. * Must be called with soft interrupts disabled. Returns zero on success or
  949. * -errno on failure.
  950. */
  951. int dw_dma_cyclic_start(struct dma_chan *chan)
  952. {
  953. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  954. struct dw_dma *dw = to_dw_dma(chan->device);
  955. unsigned long flags;
  956. if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
  957. dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
  958. return -ENODEV;
  959. }
  960. spin_lock_irqsave(&dwc->lock, flags);
  961. /* Enable interrupts to perform cyclic transfer */
  962. channel_set_bit(dw, MASK.BLOCK, dwc->mask);
  963. dwc_dostart(dwc, dwc->cdesc->desc[0]);
  964. spin_unlock_irqrestore(&dwc->lock, flags);
  965. return 0;
  966. }
  967. EXPORT_SYMBOL(dw_dma_cyclic_start);
  968. /**
  969. * dw_dma_cyclic_stop - stop the cyclic DMA transfer
  970. * @chan: the DMA channel to stop
  971. *
  972. * Must be called with soft interrupts disabled.
  973. */
  974. void dw_dma_cyclic_stop(struct dma_chan *chan)
  975. {
  976. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  977. struct dw_dma *dw = to_dw_dma(dwc->chan.device);
  978. unsigned long flags;
  979. spin_lock_irqsave(&dwc->lock, flags);
  980. dwc_chan_disable(dw, dwc);
  981. spin_unlock_irqrestore(&dwc->lock, flags);
  982. }
  983. EXPORT_SYMBOL(dw_dma_cyclic_stop);
  984. /**
  985. * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
  986. * @chan: the DMA channel to prepare
  987. * @buf_addr: physical DMA address where the buffer starts
  988. * @buf_len: total number of bytes for the entire buffer
  989. * @period_len: number of bytes for each period
  990. * @direction: transfer direction, to or from device
  991. *
  992. * Must be called before trying to start the transfer. Returns a valid struct
  993. * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
  994. */
  995. struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
  996. dma_addr_t buf_addr, size_t buf_len, size_t period_len,
  997. enum dma_transfer_direction direction)
  998. {
  999. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  1000. struct dma_slave_config *sconfig = &dwc->dma_sconfig;
  1001. struct dw_cyclic_desc *cdesc;
  1002. struct dw_cyclic_desc *retval = NULL;
  1003. struct dw_desc *desc;
  1004. struct dw_desc *last = NULL;
  1005. u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
  1006. unsigned long was_cyclic;
  1007. unsigned int reg_width;
  1008. unsigned int periods;
  1009. unsigned int i;
  1010. unsigned long flags;
  1011. spin_lock_irqsave(&dwc->lock, flags);
  1012. if (dwc->nollp) {
  1013. spin_unlock_irqrestore(&dwc->lock, flags);
  1014. dev_dbg(chan2dev(&dwc->chan),
  1015. "channel doesn't support LLP transfers\n");
  1016. return ERR_PTR(-EINVAL);
  1017. }
  1018. if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
  1019. spin_unlock_irqrestore(&dwc->lock, flags);
  1020. dev_dbg(chan2dev(&dwc->chan),
  1021. "queue and/or active list are not empty\n");
  1022. return ERR_PTR(-EBUSY);
  1023. }
  1024. was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
  1025. spin_unlock_irqrestore(&dwc->lock, flags);
  1026. if (was_cyclic) {
  1027. dev_dbg(chan2dev(&dwc->chan),
  1028. "channel already prepared for cyclic DMA\n");
  1029. return ERR_PTR(-EBUSY);
  1030. }
  1031. retval = ERR_PTR(-EINVAL);
  1032. if (unlikely(!is_slave_direction(direction))){
  1033. goto out_err;}
  1034. dwc->direction = direction;
  1035. if (direction == DMA_MEM_TO_DEV){
  1036. reg_width = __ffs(sconfig->dst_addr_width);}
  1037. else{
  1038. reg_width = __ffs(sconfig->src_addr_width);}
  1039. periods = buf_len / period_len;
  1040. /* Check for too big/unaligned periods and unaligned DMA buffer. */
  1041. if (period_len > (dwc->block_size << reg_width)){
  1042. goto out_err;}
  1043. if (unlikely(period_len & ((1 << reg_width) - 1))){
  1044. goto out_err;}
  1045. if (unlikely(buf_addr & ((1 << reg_width) - 1))){
  1046. goto out_err;}
  1047. retval = ERR_PTR(-ENOMEM);
  1048. cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
  1049. if (!cdesc)
  1050. goto out_err;
  1051. cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
  1052. if (!cdesc->desc)
  1053. goto out_err_alloc;
  1054. for (i = 0; i < periods; i++) {
  1055. desc = dwc_desc_get(dwc);
  1056. if (!desc)
  1057. goto out_err_desc_get;
  1058. switch (direction) {
  1059. case DMA_MEM_TO_DEV:
  1060. lli_write(desc, dar, sconfig->dst_addr);
  1061. lli_write(desc, sar, buf_addr + period_len * i);
  1062. lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
  1063. | DWC_CTLL_DST_WIDTH(reg_width)
  1064. | DWC_CTLL_SRC_WIDTH(reg_width)
  1065. | DWC_CTLL_DST_FIX
  1066. | DWC_CTLL_SRC_INC
  1067. | DWC_CTLL_INT_EN));
  1068. lli_set(desc, ctllo, sconfig->device_fc ?
  1069. DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
  1070. DWC_CTLL_FC(DW_DMA_FC_D_M2P));
  1071. break;
  1072. case DMA_DEV_TO_MEM:
  1073. lli_write(desc, dar, buf_addr + period_len * i);
  1074. lli_write(desc, sar, sconfig->src_addr);
  1075. lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
  1076. | DWC_CTLL_SRC_WIDTH(reg_width)
  1077. | DWC_CTLL_DST_WIDTH(reg_width)
  1078. | DWC_CTLL_DST_INC
  1079. | DWC_CTLL_SRC_FIX
  1080. | DWC_CTLL_INT_EN));
  1081. lli_set(desc, ctllo, sconfig->device_fc ?
  1082. DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
  1083. DWC_CTLL_FC(DW_DMA_FC_D_P2M));
  1084. break;
  1085. default:
  1086. break;
  1087. }
  1088. lli_write(desc, ctlhi, period_len >> reg_width);
  1089. cdesc->desc[i] = desc;
  1090. if (last)
  1091. lli_write(last, llp, desc->txd.phys | lms);
  1092. last = desc;
  1093. }
  1094. /* Let's make a cyclic list */
  1095. lli_write(last, llp, cdesc->desc[0]->txd.phys | lms);
  1096. dev_dbg(chan2dev(&dwc->chan),
  1097. "cyclic prepared buf %pad len %zu period %zu periods %d\n",
  1098. &buf_addr, buf_len, period_len, periods);
  1099. cdesc->periods = periods;
  1100. dwc->cdesc = cdesc;
  1101. return cdesc;
  1102. out_err_desc_get:
  1103. while (i--)
  1104. dwc_desc_put(dwc, cdesc->desc[i]);
  1105. out_err_alloc:
  1106. kfree(cdesc);
  1107. out_err:
  1108. clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
  1109. return (struct dw_cyclic_desc *)retval;
  1110. }
  1111. EXPORT_SYMBOL(dw_dma_cyclic_prep);
  1112. /**
  1113. * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
  1114. * @chan: the DMA channel to free
  1115. */
  1116. void dw_dma_cyclic_free(struct dma_chan *chan)
  1117. {
  1118. struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  1119. struct dw_dma *dw = to_dw_dma(dwc->chan.device);
  1120. struct dw_cyclic_desc *cdesc = dwc->cdesc;
  1121. unsigned int i;
  1122. unsigned long flags;
  1123. dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
  1124. if (!cdesc)
  1125. return;
  1126. spin_lock_irqsave(&dwc->lock, flags);
  1127. dwc_chan_disable(dw, dwc);
  1128. dma_writel(dw, CLEAR.BLOCK, dwc->mask);
  1129. dma_writel(dw, CLEAR.ERROR, dwc->mask);
  1130. dma_writel(dw, CLEAR.XFER, dwc->mask);
  1131. spin_unlock_irqrestore(&dwc->lock, flags);
  1132. for (i = 0; i < cdesc->periods; i++)
  1133. dwc_desc_put(dwc, cdesc->desc[i]);
  1134. kfree(cdesc->desc);
  1135. kfree(cdesc);
  1136. dwc->cdesc = NULL;
  1137. clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
  1138. }
  1139. EXPORT_SYMBOL(dw_dma_cyclic_free);
  1140. /*----------------------------------------------------------------------*/
  1141. static int dw_dma_probe(struct dw_dma_chip *chip)
  1142. {
  1143. struct dw_dma_platform_data *pdata;
  1144. struct dw_dma *dw;
  1145. bool autocfg = false;
  1146. unsigned int dw_params;
  1147. unsigned int i;
  1148. int err;
  1149. dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
  1150. if (!dw)
  1151. return -ENOMEM;
  1152. dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
  1153. if (!dw->pdata)
  1154. return -ENOMEM;
  1155. dw->regs = chip->regs;
  1156. chip->dw = dw;
  1157. pm_runtime_get_sync(chip->dev);
  1158. if (!chip->pdata) {
  1159. dw_params = dma_readl(dw, DW_PARAMS);
  1160. dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
  1161. autocfg = dw_params >> DW_PARAMS_EN & 1;
  1162. if (!autocfg) {
  1163. err = -EINVAL;
  1164. goto err_pdata;
  1165. }
  1166. /* Reassign the platform data pointer */
  1167. pdata = dw->pdata;
  1168. /* Get hardware configuration parameters */
  1169. pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
  1170. pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
  1171. for (i = 0; i < pdata->nr_masters; i++) {
  1172. pdata->data_width[i] =
  1173. 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
  1174. }
  1175. pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
  1176. /* Fill platform data with the default values */
  1177. pdata->is_private = true;
  1178. pdata->is_memcpy = true;
  1179. pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
  1180. pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
  1181. } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
  1182. err = -EINVAL;
  1183. goto err_pdata;
  1184. } else {
  1185. memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
  1186. /* Reassign the platform data pointer */
  1187. pdata = dw->pdata;
  1188. }
  1189. dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
  1190. GFP_KERNEL);
  1191. if (!dw->chan) {
  1192. err = -ENOMEM;
  1193. goto err_pdata;
  1194. }
  1195. /* Calculate all channel mask before DMA setup */
  1196. dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
  1197. /* Force dma off, just in case */
  1198. //dw_dma_off(dw); //dma is in use to play audio in u-boot
  1199. /* Create a pool of consistent memory blocks for hardware descriptors */
  1200. dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
  1201. sizeof(struct dw_desc), 4, 0);
  1202. if (!dw->desc_pool) {
  1203. dev_err(chip->dev, "No memory for descriptors dma pool\n");
  1204. err = -ENOMEM;
  1205. goto err_pdata;
  1206. }
  1207. tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
  1208. err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
  1209. "dw_dmac", dw);
  1210. if (err)
  1211. goto err_pdata;
  1212. INIT_LIST_HEAD(&dw->dma.channels);
  1213. for (i = 0; i < pdata->nr_channels; i++) {
  1214. struct dw_dma_chan *dwc = &dw->chan[i];
  1215. dwc->chan.device = &dw->dma;
  1216. dma_cookie_init(&dwc->chan);
  1217. if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
  1218. list_add_tail(&dwc->chan.device_node,
  1219. &dw->dma.channels);
  1220. else
  1221. list_add(&dwc->chan.device_node, &dw->dma.channels);
  1222. /* 7 is highest priority & 0 is lowest. */
  1223. if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
  1224. dwc->priority = pdata->nr_channels - i - 1;
  1225. else
  1226. dwc->priority = i;
  1227. dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
  1228. spin_lock_init(&dwc->lock);
  1229. dwc->mask = 1 << i;
  1230. INIT_LIST_HEAD(&dwc->active_list);
  1231. INIT_LIST_HEAD(&dwc->queue);
  1232. if (i != 0)
  1233. channel_clear_bit(dw, CH_EN, dwc->mask); /* ch0 used in u-boot */
  1234. dwc->direction = DMA_TRANS_NONE;
  1235. /* Hardware configuration */
  1236. if (autocfg) {
  1237. unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
  1238. void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
  1239. unsigned int dwc_params = dma_readl_native(addr);
  1240. dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
  1241. dwc_params);
  1242. /*
  1243. * Decode maximum block size for given channel. The
  1244. * stored 4 bit value represents blocks from 0x00 for 3
  1245. * up to 0x0a for 4095.
  1246. */
  1247. dwc->block_size =
  1248. (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
  1249. dwc->nollp =
  1250. (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
  1251. } else {
  1252. dwc->block_size = pdata->block_size;
  1253. dwc->nollp = pdata->is_nollp;
  1254. }
  1255. }
  1256. /* Clear all interrupts on all channels. */
  1257. dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
  1258. dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
  1259. dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
  1260. dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
  1261. dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
  1262. /* Set capabilities */
  1263. dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
  1264. if (pdata->is_private)
  1265. dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
  1266. if (pdata->is_memcpy)
  1267. dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
  1268. dw->dma.dev = chip->dev;
  1269. dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
  1270. dw->dma.device_free_chan_resources = dwc_free_chan_resources;
  1271. dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
  1272. dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
  1273. dw->dma.device_config = dwc_config;
  1274. dw->dma.device_pause = dwc_pause;
  1275. dw->dma.device_resume = dwc_resume;
  1276. dw->dma.device_terminate_all = dwc_terminate_all;
  1277. dw->dma.device_tx_status = dwc_tx_status;
  1278. dw->dma.device_issue_pending = dwc_issue_pending;
  1279. /* DMA capabilities */
  1280. dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
  1281. dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
  1282. dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
  1283. BIT(DMA_MEM_TO_MEM);
  1284. dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1285. err = dma_async_device_register(&dw->dma);
  1286. if (err)
  1287. goto err_dma_register;
  1288. dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
  1289. pdata->nr_channels);
  1290. pm_runtime_put_sync_suspend(chip->dev);
  1291. return 0;
  1292. err_dma_register:
  1293. free_irq(chip->irq, dw);
  1294. err_pdata:
  1295. pm_runtime_put_sync_suspend(chip->dev);
  1296. return err;
  1297. }
  1298. static int dw_dma_remove(struct dw_dma_chip *chip)
  1299. {
  1300. struct dw_dma *dw = chip->dw;
  1301. struct dw_dma_chan *dwc, *_dwc;
  1302. pm_runtime_get_sync(chip->dev);
  1303. dw_dma_off(dw);
  1304. dma_async_device_unregister(&dw->dma);
  1305. free_irq(chip->irq, dw);
  1306. tasklet_kill(&dw->tasklet);
  1307. list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
  1308. chan.device_node) {
  1309. list_del(&dwc->chan.device_node);
  1310. channel_clear_bit(dw, CH_EN, dwc->mask);
  1311. }
  1312. pm_runtime_put_sync_suspend(chip->dev);
  1313. return 0;
  1314. }
  1315. int dw_dma_disable(struct dw_dma_chip *chip)
  1316. {
  1317. struct dw_dma *dw = chip->dw;
  1318. dw_dma_off(dw);
  1319. return 0;
  1320. }
  1321. EXPORT_SYMBOL_GPL(dw_dma_disable);
  1322. int dw_dma_enable(struct dw_dma_chip *chip)
  1323. {
  1324. struct dw_dma *dw = chip->dw;
  1325. dw_dma_on(dw);
  1326. return 0;
  1327. }
  1328. EXPORT_SYMBOL_GPL(dw_dma_enable);
  1329. static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
  1330. struct of_dma *ofdma)
  1331. {
  1332. struct dw_dma *dw = ofdma->of_dma_data;
  1333. struct dw_dma_slave slave = {
  1334. .dma_dev = dw->dma.dev,
  1335. };
  1336. dma_cap_mask_t cap;
  1337. if (dma_spec->args_count != 3)
  1338. return NULL;
  1339. slave.src_id = dma_spec->args[0];
  1340. slave.dst_id = dma_spec->args[0];
  1341. slave.m_master = dma_spec->args[1];
  1342. slave.p_master = dma_spec->args[2];
  1343. if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
  1344. slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
  1345. slave.m_master >= dw->pdata->nr_masters ||
  1346. slave.p_master >= dw->pdata->nr_masters))
  1347. return NULL;
  1348. dma_cap_zero(cap);
  1349. dma_cap_set(DMA_SLAVE, cap);
  1350. /* TODO: there should be a simpler way to do this */
  1351. return dma_request_channel(cap, dw_dma_filter, &slave);
  1352. }
  1353. #ifdef CONFIG_OF
  1354. static struct dw_dma_platform_data *
  1355. dw_dma_parse_dt(struct platform_device *pdev)
  1356. {
  1357. struct device_node *np = pdev->dev.of_node;
  1358. struct dw_dma_platform_data *pdata;
  1359. u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
  1360. u32 nr_masters;
  1361. u32 nr_channels;
  1362. if (!np) {
  1363. dev_err(&pdev->dev, "Missing DT data\n");
  1364. return NULL;
  1365. }
  1366. if (of_property_read_u32(np, "dma-masters", &nr_masters))
  1367. return NULL;
  1368. if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
  1369. return NULL;
  1370. if (of_property_read_u32(np, "dma-channels", &nr_channels))
  1371. return NULL;
  1372. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  1373. if (!pdata)
  1374. return NULL;
  1375. pdata->nr_masters = nr_masters;
  1376. pdata->nr_channels = nr_channels;
  1377. if (of_property_read_bool(np, "is_private"))
  1378. pdata->is_private = true;
  1379. if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
  1380. pdata->chan_allocation_order = (unsigned char)tmp;
  1381. if (!of_property_read_u32(np, "chan_priority", &tmp))
  1382. pdata->chan_priority = tmp;
  1383. if (!of_property_read_u32(np, "block_size", &tmp))
  1384. pdata->block_size = tmp;
  1385. if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
  1386. for (tmp = 0; tmp < nr_masters; tmp++)
  1387. pdata->data_width[tmp] = arr[tmp];
  1388. } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
  1389. for (tmp = 0; tmp < nr_masters; tmp++)
  1390. pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
  1391. }
  1392. return pdata;
  1393. }
  1394. #else
  1395. static inline struct dw_dma_platform_data *
  1396. dw_dma_parse_dt(struct platform_device *pdev)
  1397. {
  1398. return NULL;
  1399. }
  1400. #endif
  1401. static int dw_probe(struct platform_device *pdev)
  1402. {
  1403. struct dw_dma_chip *chip;
  1404. struct device *dev = &pdev->dev;
  1405. struct resource *mem;
  1406. const struct dw_dma_platform_data *pdata;
  1407. int err;
  1408. chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
  1409. if (!chip)
  1410. return -ENOMEM;
  1411. chip->irq = platform_get_irq(pdev, 0);
  1412. if (chip->irq < 0)
  1413. return chip->irq;
  1414. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1415. chip->regs = devm_ioremap_resource(dev, mem);
  1416. if (IS_ERR(chip->regs))
  1417. return PTR_ERR(chip->regs);
  1418. err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  1419. if (err)
  1420. return err;
  1421. pdata = dev_get_platdata(dev);
  1422. if (!pdata)
  1423. pdata = dw_dma_parse_dt(pdev);
  1424. chip->dev = dev;
  1425. chip->pdata = pdata;
  1426. chip->clk = devm_clk_get(chip->dev, "hclk");
  1427. if (IS_ERR(chip->clk))
  1428. return PTR_ERR(chip->clk);
  1429. err = clk_prepare_enable(chip->clk);
  1430. if (err)
  1431. return err;
  1432. pm_runtime_enable(&pdev->dev);
  1433. err = dw_dma_probe(chip);
  1434. if (err)
  1435. goto err_dw_dma_probe;
  1436. platform_set_drvdata(pdev, chip);
  1437. if (pdev->dev.of_node) {
  1438. err = of_dma_controller_register(pdev->dev.of_node,
  1439. dw_dma_of_xlate, chip->dw);
  1440. if (err)
  1441. dev_err(&pdev->dev,
  1442. "could not register of_dma_controller\n");
  1443. }
  1444. return 0;
  1445. err_dw_dma_probe:
  1446. pm_runtime_disable(&pdev->dev);
  1447. clk_disable_unprepare(chip->clk);
  1448. return err;
  1449. }
  1450. static void dw_remove(struct platform_device *pdev)
  1451. {
  1452. struct dw_dma_chip *chip = platform_get_drvdata(pdev);
  1453. if (pdev->dev.of_node)
  1454. of_dma_controller_free(pdev->dev.of_node);
  1455. dw_dma_remove(chip);
  1456. pm_runtime_disable(&pdev->dev);
  1457. clk_disable_unprepare(chip->clk);
  1458. return ;
  1459. }
  1460. static void dw_shutdown(struct platform_device *pdev)
  1461. {
  1462. struct dw_dma_chip *chip = platform_get_drvdata(pdev);
  1463. /*
  1464. * We have to call dw_dma_disable() to stop any ongoing transfer. On
  1465. * some platforms we can't do that since DMA device is powered off.
  1466. * Moreover we have no possibility to check if the platform is affected
  1467. * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
  1468. * unconditionally. On the other hand we can't use
  1469. * pm_runtime_suspended() because runtime PM framework is not fully
  1470. * used by the driver.
  1471. */
  1472. pm_runtime_get_sync(chip->dev);
  1473. dw_dma_disable(chip);
  1474. pm_runtime_put_sync_suspend(chip->dev);
  1475. clk_disable_unprepare(chip->clk);
  1476. }
  1477. #ifdef CONFIG_OF
  1478. static const struct of_device_id dw_dma_of_id_table[] = {
  1479. { .compatible = "arkmicro,ark-dma" },
  1480. {}
  1481. };
  1482. MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
  1483. #endif
  1484. #ifdef CONFIG_PM_SLEEP
  1485. static int dw_suspend_late(struct device *dev)
  1486. {
  1487. struct platform_device *pdev = to_platform_device(dev);
  1488. struct dw_dma_chip *chip = platform_get_drvdata(pdev);
  1489. dw_dma_disable(chip);
  1490. clk_disable_unprepare(chip->clk);
  1491. return 0;
  1492. }
  1493. static int dw_resume_early(struct device *dev)
  1494. {
  1495. struct platform_device *pdev = to_platform_device(dev);
  1496. struct dw_dma_chip *chip = platform_get_drvdata(pdev);
  1497. clk_prepare_enable(chip->clk);
  1498. return dw_dma_enable(chip);
  1499. }
  1500. #endif /* CONFIG_PM_SLEEP */
  1501. static const struct dev_pm_ops dw_dev_pm_ops = {
  1502. SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
  1503. };
  1504. static struct platform_driver dw_driver = {
  1505. .probe = dw_probe,
  1506. .remove = dw_remove,
  1507. .shutdown = dw_shutdown,
  1508. .driver = {
  1509. .name = DRV_NAME,
  1510. .pm = &dw_dev_pm_ops,
  1511. .of_match_table = of_match_ptr(dw_dma_of_id_table),
  1512. },
  1513. };
  1514. static int __init dw_init(void)
  1515. {
  1516. return platform_driver_register(&dw_driver);
  1517. }
  1518. subsys_initcall(dw_init);
  1519. static void __exit dw_exit(void)
  1520. {
  1521. platform_driver_unregister(&dw_driver);
  1522. }
  1523. module_exit(dw_exit);
  1524. MODULE_AUTHOR("Sim");
  1525. MODULE_DESCRIPTION("Arkmicro dma driver");
  1526. MODULE_LICENSE("GPL v2");