ark-axi-dma.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507
  1. // SPDX-License-Identifier: GPL-2.0
  2. // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
  3. /*
  4. * Synopsys DesignWare AXI DMA Controller driver.
  5. *
  6. * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
  7. */
  8. #include <linux/bitops.h>
  9. #include <linux/delay.h>
  10. #include <linux/device.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/dmapool.h>
  13. #include <linux/err.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/io.h>
  16. #include <linux/iopoll.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/of.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/property.h>
  23. #include <linux/types.h>
  24. #include <linux/of_dma.h>
  25. #include "ark-axi-dma.h"
  26. #include "dmaengine.h"
  27. #include "virt-dma.h"
  28. /*
  29. * This supports the Synopsys "DesignWare AHB Central DMA Controller",
  30. * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
  31. * of which use ARM any more). See the "Databook" from Synopsys for
  32. * information beyond what licensees probably provide.
  33. *
  34. * The driver has been tested with the Atmel AT32AP7000, which does not
  35. * support descriptor writeback.
  36. */
  37. #define DWC_DEFAULT_CTLLO(_chan) ({ \
  38. struct axi_dma_chan *_dwc = dchan_to_axi_dma_chan(_chan); \
  39. struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
  40. bool _is_slave = is_slave_direction(_dwc->direction); \
  41. u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
  42. DWAXIDMAC_BURST_TRANS_LEN_16; \
  43. u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
  44. DWAXIDMAC_BURST_TRANS_LEN_16; \
  45. u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \
  46. _dwc->dws.p_master : _dwc->dws.m_master; \
  47. u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \
  48. _dwc->dws.p_master : _dwc->dws.m_master; \
  49. \
  50. (DWC_CTLL_DST_MSIZE(_dmsize) \
  51. | DWC_CTLL_SRC_MSIZE(_smsize) \
  52. | DWC_CTLL_DMS(_dms) \
  53. | DWC_CTLL_SMS(_sms)); \
  54. })
  55. /*
  56. * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
  57. * master data bus width up to 512 bits (for both AXI master interfaces), but
  58. * it depends on IP block configurarion.
  59. */
  60. #define AXI_DMA_BUSWIDTHS \
  61. (DMA_SLAVE_BUSWIDTH_1_BYTE | \
  62. DMA_SLAVE_BUSWIDTH_2_BYTES | \
  63. DMA_SLAVE_BUSWIDTH_4_BYTES | \
  64. DMA_SLAVE_BUSWIDTH_8_BYTES | \
  65. DMA_SLAVE_BUSWIDTH_16_BYTES | \
  66. DMA_SLAVE_BUSWIDTH_32_BYTES | \
  67. DMA_SLAVE_BUSWIDTH_64_BYTES)
  68. static inline void
  69. axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
  70. {
  71. iowrite32(val, chip->regs + reg);
  72. }
  73. static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
  74. {
  75. return ioread32(chip->regs + reg);
  76. }
  77. static inline void
  78. axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
  79. {
  80. iowrite32(val, chan->chan_regs + reg);
  81. }
  82. static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
  83. {
  84. return ioread32(chan->chan_regs + reg);
  85. }
  86. static inline void
  87. axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
  88. {
  89. /*
  90. * We split one 64 bit write for two 32 bit write as some HW doesn't
  91. * support 64 bit access.
  92. */
  93. iowrite32(lower_32_bits(val), chan->chan_regs + reg);
  94. iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
  95. }
  96. static inline void axi_dma_disable(struct axi_dma_chip *chip)
  97. {
  98. u32 val;
  99. val = axi_dma_ioread32(chip, DMAC_CFG);
  100. val &= ~DMAC_EN_MASK;
  101. axi_dma_iowrite32(chip, DMAC_CFG, val);
  102. }
  103. static inline void axi_dma_enable(struct axi_dma_chip *chip)
  104. {
  105. u32 val;
  106. val = axi_dma_ioread32(chip, DMAC_CFG);
  107. val |= DMAC_EN_MASK;
  108. axi_dma_iowrite32(chip, DMAC_CFG, val);
  109. }
  110. static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
  111. {
  112. u32 val;
  113. val = axi_dma_ioread32(chip, DMAC_CFG);
  114. val &= ~INT_EN_MASK;
  115. axi_dma_iowrite32(chip, DMAC_CFG, val);
  116. }
  117. static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
  118. {
  119. u32 val;
  120. val = axi_dma_ioread32(chip, DMAC_CFG);
  121. val |= INT_EN_MASK;
  122. axi_dma_iowrite32(chip, DMAC_CFG, val);
  123. }
  124. static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
  125. {
  126. u32 val;
  127. if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
  128. axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
  129. } else {
  130. val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
  131. val &= ~irq_mask;
  132. axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
  133. }
  134. }
  135. static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
  136. {
  137. axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
  138. }
  139. static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
  140. {
  141. axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
  142. }
  143. static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
  144. {
  145. axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
  146. }
  147. static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
  148. {
  149. u32 val;
  150. val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
  151. return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
  152. }
  153. static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
  154. {
  155. return axi_chan_ioread32(chan, CH_INTSTATUS);
  156. }
  157. static inline void axi_chan_disable(struct axi_dma_chan *chan)
  158. {
  159. u32 val;
  160. val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
  161. val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
  162. val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
  163. axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
  164. }
  165. static inline void axi_chan_enable(struct axi_dma_chan *chan)
  166. {
  167. u32 val;
  168. val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
  169. val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
  170. BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
  171. axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
  172. }
  173. static void axi_dma_hw_init(struct axi_dma_chip *chip)
  174. {
  175. u32 i;
  176. for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
  177. axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
  178. axi_chan_disable(&chip->dw->chan[i]);
  179. }
  180. }
  181. static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
  182. dma_addr_t dst, size_t len)
  183. {
  184. u32 max_width = chan->chip->dw->hdata->m_data_width;
  185. return __ffs(src | dst | len | BIT(max_width));
  186. }
  187. static inline const char *axi_chan_name(struct axi_dma_chan *chan)
  188. {
  189. return dma_chan_name(&chan->vc.chan);
  190. }
  191. static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan)
  192. {
  193. struct dw_axi_dma *dw = chan->chip->dw;
  194. struct axi_dma_desc *desc;
  195. dma_addr_t phys;
  196. desc = dma_pool_zalloc(dw->desc_pool, GFP_NOWAIT, &phys);
  197. if (unlikely(!desc)) {
  198. dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
  199. axi_chan_name(chan));
  200. return NULL;
  201. }
  202. atomic_inc(&chan->descs_allocated);
  203. INIT_LIST_HEAD(&desc->xfer_list);
  204. desc->vd.tx.phys = phys;
  205. desc->chan = chan;
  206. return desc;
  207. }
  208. static void axi_desc_put(struct axi_dma_desc *desc)
  209. {
  210. struct axi_dma_chan *chan = desc->chan;
  211. struct dw_axi_dma *dw = chan->chip->dw;
  212. struct axi_dma_desc *child, *_next;
  213. unsigned int descs_put = 0;
  214. list_for_each_entry_safe(child, _next, &desc->xfer_list, xfer_list) {
  215. list_del(&child->xfer_list);
  216. dma_pool_free(dw->desc_pool, child, child->vd.tx.phys);
  217. descs_put++;
  218. }
  219. dma_pool_free(dw->desc_pool, desc, desc->vd.tx.phys);
  220. descs_put++;
  221. atomic_sub(descs_put, &chan->descs_allocated);
  222. dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
  223. axi_chan_name(chan), descs_put,
  224. atomic_read(&chan->descs_allocated));
  225. }
  226. static void vchan_desc_put(struct virt_dma_desc *vdesc)
  227. {
  228. axi_desc_put(vd_to_axi_desc(vdesc));
  229. }
  230. /* Returns how many bytes were already received from source */
  231. static inline u32 dma_chan_get_sent(struct axi_dma_chan *chan)
  232. {
  233. u32 block_ts = axi_chan_ioread32(chan, CH_STATUS);
  234. u32 ctllo = axi_chan_ioread32(chan, CH_CTL_L);
  235. //printk("dma_chan_get_sent %d.\n", (block_ts & DMAX_MAX_BLK_MASK) * (1 << (ctllo >> CH_CTL_L_SRC_WIDTH_POS & 7)));
  236. return (block_ts & DMAX_MAX_BLK_MASK) * (1 << (ctllo >> CH_CTL_L_SRC_WIDTH_POS & 7));
  237. }
  238. static u32 dma_chan_get_residue(struct axi_dma_chan *chan, dma_cookie_t cookie)
  239. {
  240. struct axi_dma_desc *first, *desc;
  241. struct virt_dma_desc *vd;
  242. unsigned long flags;
  243. u32 residue = 0;
  244. u32 llplo, llphi;
  245. u64 llp;
  246. llplo = axi_chan_ioread32(chan, CH_LLP);
  247. llphi = axi_chan_ioread32(chan, CH_LLP + 4);
  248. llp = ((u64)llphi << 32) | llplo;
  249. spin_lock_irqsave(&chan->vc.lock, flags);
  250. vd = vchan_next_desc(&chan->vc);
  251. if (vd) {
  252. first = vd_to_axi_desc(vd);
  253. residue = first->total_len;
  254. if (llp == DWC_LLP_LOC(first->lli.llp)) {
  255. /* Currently in progress */
  256. residue -= dma_chan_get_sent(chan);
  257. } else {
  258. residue -= first->len;
  259. list_for_each_entry(desc, &first->xfer_list, xfer_list) {
  260. if (llp == DWC_LLP_LOC(desc->lli.llp)) {
  261. /* Currently in progress */
  262. residue -= dma_chan_get_sent(chan);
  263. break;
  264. } else {
  265. residue -= desc->len;
  266. }
  267. }
  268. }
  269. } else {
  270. residue = 0;
  271. }
  272. //printk("dma_chan_get_residue %d.\n", residue);
  273. spin_unlock_irqrestore(&chan->vc.lock, flags);
  274. return residue;
  275. }
  276. static enum dma_status
  277. dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
  278. struct dma_tx_state *txstate)
  279. {
  280. struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
  281. enum dma_status ret;
  282. ret = dma_cookie_status(dchan, cookie, txstate);
  283. if (ret == DMA_COMPLETE)
  284. return ret;
  285. dma_set_residue(txstate, dma_chan_get_residue(chan, cookie));
  286. if (chan->is_paused && ret == DMA_IN_PROGRESS)
  287. ret = DMA_PAUSED;
  288. return ret;
  289. }
  290. static void write_desc_llp(struct axi_dma_desc *desc, dma_addr_t adr)
  291. {
  292. desc->lli.llp = cpu_to_le64(adr);
  293. }
  294. static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
  295. {
  296. axi_chan_iowrite64(chan, CH_LLP, adr);
  297. }
  298. /* Called in chan locked context */
  299. static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
  300. struct axi_dma_desc *first)
  301. {
  302. struct dma_slave_config *sconfig = &chan->dma_sconfig;
  303. u32 priority = chan->chip->dw->hdata->priority[chan->id];
  304. u32 reg, irq_mask;
  305. u8 lms = 0; /* Select AXI0 master for LLI fetching */
  306. if (unlikely(axi_chan_is_hw_enable(chan))) {
  307. dev_err(chan2dev(chan), "%s is non-idle!\n",
  308. axi_chan_name(chan));
  309. return;
  310. }
  311. axi_dma_enable(chan->chip);
  312. reg = (chan->dws.dst_id << CH_CFG_L_DST_PER_POS |
  313. chan->dws.src_id << CH_CFG_L_SRC_PER_POS |
  314. DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS |
  315. DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
  316. axi_chan_iowrite32(chan, CH_CFG_L, reg);
  317. reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS |
  318. priority << CH_CFG_H_PRIORITY_POS |
  319. DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
  320. DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
  321. if (sconfig->direction == DMA_MEM_TO_DEV)
  322. reg |= sconfig->device_fc ? DWC_CFGH_FC(DWAXIDMAC_TT_FC_MEM_TO_PER_DST) :
  323. DWC_CFGH_FC(DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC);
  324. else if (sconfig->direction == DMA_DEV_TO_MEM)
  325. reg |= sconfig->device_fc ? DWC_CFGH_FC(DWAXIDMAC_TT_FC_PER_TO_MEM_SRC) :
  326. DWC_CFGH_FC(DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC);
  327. axi_chan_iowrite32(chan, CH_CFG_H, reg);
  328. write_chan_llp(chan, first->vd.tx.phys | lms);
  329. irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
  330. if (chan->cyclic)
  331. irq_mask |= DWAXIDMAC_IRQ_BLOCK_TRF;
  332. axi_chan_irq_sig_set(chan, irq_mask);
  333. /* Generate 'suspend' status but don't generate interrupt */
  334. irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
  335. axi_chan_irq_set(chan, irq_mask);
  336. axi_chan_enable(chan);
  337. }
  338. static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
  339. {
  340. struct axi_dma_desc *desc;
  341. struct virt_dma_desc *vd;
  342. vd = vchan_next_desc(&chan->vc);
  343. if (!vd)
  344. return;
  345. desc = vd_to_axi_desc(vd);
  346. dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
  347. vd->tx.cookie);
  348. axi_chan_block_xfer_start(chan, desc);
  349. }
  350. static void dma_chan_issue_pending(struct dma_chan *dchan)
  351. {
  352. struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
  353. unsigned long flags;
  354. spin_lock_irqsave(&chan->vc.lock, flags);
  355. if (vchan_issue_pending(&chan->vc))
  356. axi_chan_start_first_queued(chan);
  357. spin_unlock_irqrestore(&chan->vc.lock, flags);
  358. }
  359. static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
  360. {
  361. struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
  362. /* ASSERT: channel is idle */
  363. if (axi_chan_is_hw_enable(chan)) {
  364. dev_err(chan2dev(chan), "%s is non-idle!\n",
  365. axi_chan_name(chan));
  366. return -EBUSY;
  367. }
  368. dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
  369. pm_runtime_get(chan->chip->dev);
  370. return 0;
  371. }
  372. static void dma_chan_free_chan_resources(struct dma_chan *dchan)
  373. {
  374. struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
  375. /* ASSERT: channel is idle */
  376. if (axi_chan_is_hw_enable(chan))
  377. dev_err(dchan2dev(dchan), "%s is non-idle!\n",
  378. axi_chan_name(chan));
  379. axi_chan_disable(chan);
  380. axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
  381. vchan_free_chan_resources(&chan->vc);
  382. dev_vdbg(dchan2dev(dchan),
  383. "%s: free resources, descriptor still allocated: %u\n",
  384. axi_chan_name(chan), atomic_read(&chan->descs_allocated));
  385. pm_runtime_put(chan->chip->dev);
  386. }
  387. /*
  388. * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
  389. * as 1, it understands that the current block is the final block in the
  390. * transfer and completes the DMA transfer operation at the end of current
  391. * block transfer.
  392. */
  393. static void set_desc_last(struct axi_dma_desc *desc)
  394. {
  395. u32 val;
  396. write_desc_llp(desc, 0);
  397. val = le32_to_cpu(desc->lli.ctl_hi);
  398. val |= CH_CTL_H_LLI_LAST;
  399. desc->lli.ctl_hi = cpu_to_le32(val);
  400. }
  401. static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr)
  402. {
  403. desc->lli.sar = cpu_to_le64(adr);
  404. }
  405. static void write_desc_dar(struct axi_dma_desc *desc, dma_addr_t adr)
  406. {
  407. desc->lli.dar = cpu_to_le64(adr);
  408. }
  409. static void set_desc_src_master(struct axi_dma_desc *desc)
  410. {
  411. u32 val;
  412. /* Select AXI0 for source master */
  413. val = le32_to_cpu(desc->lli.ctl_lo);
  414. val &= ~CH_CTL_L_SRC_MAST;
  415. desc->lli.ctl_lo = cpu_to_le32(val);
  416. }
  417. static void set_desc_dest_master(struct axi_dma_desc *desc)
  418. {
  419. u32 val;
  420. /* Select AXI1 for source master if available */
  421. val = le32_to_cpu(desc->lli.ctl_lo);
  422. if (desc->chan->chip->dw->hdata->nr_masters > 1)
  423. val |= CH_CTL_L_DST_MAST;
  424. else
  425. val &= ~CH_CTL_L_DST_MAST;
  426. desc->lli.ctl_lo = cpu_to_le32(val);
  427. }
  428. static struct dma_async_tx_descriptor *
  429. dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
  430. dma_addr_t src_adr, size_t len, unsigned long flags)
  431. {
  432. struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL;
  433. struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
  434. size_t block_ts, max_block_ts, xfer_len;
  435. u32 xfer_width, reg;
  436. u8 lms = 0; /* Select AXI0 master for LLI fetching */
  437. dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
  438. axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
  439. max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
  440. while (len) {
  441. xfer_len = len;
  442. /*
  443. * Take care for the alignment.
  444. * Actually source and destination widths can be different, but
  445. * make them same to be simpler.
  446. */
  447. xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
  448. /*
  449. * block_ts indicates the total number of data of width
  450. * to be transferred in a DMA block transfer.
  451. * BLOCK_TS register should be set to block_ts - 1
  452. */
  453. block_ts = xfer_len >> xfer_width;
  454. if (block_ts > max_block_ts) {
  455. block_ts = max_block_ts;
  456. xfer_len = max_block_ts << xfer_width;
  457. }
  458. desc = axi_desc_get(chan);
  459. if (unlikely(!desc))
  460. goto err_desc_get;
  461. write_desc_sar(desc, src_adr);
  462. write_desc_dar(desc, dst_adr);
  463. desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1);
  464. reg = CH_CTL_H_LLI_VALID;
  465. if (chan->chip->dw->hdata->restrict_axi_burst_len) {
  466. u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
  467. reg |= (CH_CTL_H_ARLEN_EN |
  468. burst_len << CH_CTL_H_ARLEN_POS |
  469. CH_CTL_H_AWLEN_EN |
  470. burst_len << CH_CTL_H_AWLEN_POS);
  471. }
  472. desc->lli.ctl_hi = cpu_to_le32(reg);
  473. reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
  474. DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
  475. xfer_width << CH_CTL_L_DST_WIDTH_POS |
  476. xfer_width << CH_CTL_L_SRC_WIDTH_POS |
  477. DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
  478. DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
  479. desc->lli.ctl_lo = cpu_to_le32(reg);
  480. set_desc_src_master(desc);
  481. set_desc_dest_master(desc);
  482. desc->len = xfer_len;
  483. /* Manage transfer list (xfer_list) */
  484. if (!first) {
  485. first = desc;
  486. } else {
  487. list_add_tail(&desc->xfer_list, &first->xfer_list);
  488. write_desc_llp(prev, desc->vd.tx.phys | lms);
  489. }
  490. prev = desc;
  491. /* update the length and addresses for the next loop cycle */
  492. len -= xfer_len;
  493. dst_adr += xfer_len;
  494. src_adr += xfer_len;
  495. }
  496. /* Total len of src/dest sg == 0, so no descriptor were allocated */
  497. if (unlikely(!first))
  498. return NULL;
  499. /* Set end-of-link to the last link descriptor of list */
  500. set_desc_last(desc);
  501. first->total_len = len;
  502. return vchan_tx_prep(&chan->vc, &first->vd, flags);
  503. err_desc_get:
  504. if (first)
  505. axi_desc_put(first);
  506. return NULL;
  507. }
  508. static struct dma_async_tx_descriptor *
  509. dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
  510. unsigned int sg_len, enum dma_transfer_direction direction,
  511. unsigned long flags, void *context)
  512. {
  513. struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
  514. struct dma_slave_config *sconfig = &chan->dma_sconfig;
  515. struct axi_dma_desc *prev;
  516. struct axi_dma_desc *first;
  517. struct axi_dma_desc *desc;
  518. u32 ctllo, ctlhi;
  519. u8 m_master = chan->dws.m_master;
  520. u8 lms = DWC_LLP_LMS(m_master);
  521. dma_addr_t reg;
  522. unsigned int reg_width;
  523. unsigned int i;
  524. struct scatterlist *sg;
  525. size_t total_len = 0;
  526. size_t max_block_ts;
  527. dev_vdbg(chan2dev(chan), "%s\n", __func__);
  528. if (unlikely(!is_slave_direction(direction) || !sg_len))
  529. return NULL;
  530. max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
  531. chan->direction = direction;
  532. prev = first = NULL;
  533. ctlhi = CH_CTL_H_LLI_VALID;
  534. if (chan->chip->dw->hdata->restrict_axi_burst_len) {
  535. u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
  536. ctlhi |= (CH_CTL_H_ARLEN_EN |
  537. burst_len << CH_CTL_H_ARLEN_POS |
  538. CH_CTL_H_AWLEN_EN |
  539. burst_len << CH_CTL_H_AWLEN_POS);
  540. }
  541. switch (direction) {
  542. case DMA_MEM_TO_DEV:
  543. reg_width = __ffs(sconfig->dst_addr_width);
  544. reg = sconfig->dst_addr;
  545. ctllo = (DWC_DEFAULT_CTLLO(dchan)
  546. | DWC_CTLL_DST_WIDTH(reg_width)
  547. | DWC_CTLL_DST_FIX
  548. | DWC_CTLL_SRC_INC);
  549. for_each_sg(sgl, sg, sg_len, i) {
  550. u32 len, dlen, mem;
  551. mem = sg_dma_address(sg);
  552. len = sg_dma_len(sg);
  553. slave_sg_todev_fill_desc:
  554. desc = axi_desc_get(chan);
  555. if (!desc)
  556. goto err_desc_get;
  557. write_desc_sar(desc, mem);
  558. write_desc_dar(desc, reg);
  559. desc->lli.ctl_hi = cpu_to_le32(ctlhi);
  560. desc->lli.ctl_lo = cpu_to_le32(ctllo | DWC_CTLL_SRC_WIDTH(reg_width));
  561. if ((len >> reg_width) > max_block_ts) {
  562. dlen = max_block_ts << reg_width;
  563. mem += dlen;
  564. len -= dlen;
  565. } else {
  566. dlen = len;
  567. len = 0;
  568. }
  569. desc->lli.block_ts_lo = cpu_to_le32((dlen >> reg_width) - 1);
  570. desc->len = dlen;
  571. if (!first) {
  572. first = desc;
  573. } else {
  574. list_add_tail(&desc->xfer_list, &first->xfer_list);
  575. write_desc_llp(prev, desc->vd.tx.phys | lms);
  576. }
  577. prev = desc;
  578. total_len += dlen;
  579. if (len)
  580. goto slave_sg_todev_fill_desc;
  581. }
  582. break;
  583. case DMA_DEV_TO_MEM:
  584. reg_width = __ffs(sconfig->src_addr_width);
  585. reg = sconfig->src_addr;
  586. ctllo = (DWC_DEFAULT_CTLLO(dchan)
  587. | DWC_CTLL_SRC_WIDTH(reg_width)
  588. | DWC_CTLL_DST_INC
  589. | DWC_CTLL_SRC_FIX);
  590. for_each_sg(sgl, sg, sg_len, i) {
  591. u32 len, dlen, mem;
  592. mem = sg_dma_address(sg);
  593. len = sg_dma_len(sg);
  594. slave_sg_fromdev_fill_desc:
  595. desc = axi_desc_get(chan);
  596. if (!desc)
  597. goto err_desc_get;
  598. write_desc_sar(desc, reg);
  599. write_desc_dar(desc, mem);
  600. desc->lli.ctl_hi = cpu_to_le32(ctlhi);
  601. desc->lli.ctl_lo = cpu_to_le32(ctllo | DWC_CTLL_DST_WIDTH(reg_width));
  602. if ((len >> reg_width) > max_block_ts) {
  603. dlen = max_block_ts << reg_width;
  604. mem += dlen;
  605. len -= dlen;
  606. } else {
  607. dlen = len;
  608. len = 0;
  609. }
  610. desc->lli.block_ts_lo = cpu_to_le32((dlen >> reg_width) - 1);
  611. desc->len = dlen;
  612. if (!first) {
  613. first = desc;
  614. } else {
  615. list_add_tail(&desc->xfer_list, &first->xfer_list);
  616. write_desc_llp(prev, desc->vd.tx.phys | lms);
  617. }
  618. prev = desc;
  619. total_len += dlen;
  620. if (len)
  621. goto slave_sg_fromdev_fill_desc;
  622. }
  623. break;
  624. default:
  625. return NULL;
  626. }
  627. /* Total len of src/dest sg == 0, so no descriptor were allocated */
  628. if (unlikely(!first))
  629. return NULL;
  630. /* Set end-of-link to the last link descriptor of list */
  631. set_desc_last(desc);
  632. first->total_len = total_len;
  633. return vchan_tx_prep(&chan->vc, &first->vd, flags);
  634. err_desc_get:
  635. dev_err(chan2dev(chan),
  636. "not enough descriptors available. Direction %d\n", direction);
  637. axi_desc_put(first);
  638. return NULL;
  639. }
  640. static struct dma_async_tx_descriptor *dma_chan_prep_dma_cyclic(
  641. struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
  642. size_t period_len, enum dma_transfer_direction direction,
  643. unsigned long flags)
  644. {
  645. struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
  646. struct dma_slave_config *sconfig = &chan->dma_sconfig;
  647. struct axi_dma_desc *prev;
  648. struct axi_dma_desc *first;
  649. struct axi_dma_desc *desc;
  650. u8 m_master = chan->dws.m_master;
  651. u8 lms = DWC_LLP_LMS(m_master);
  652. unsigned int reg_width;
  653. size_t max_block_ts;
  654. u32 ctlhi;
  655. unsigned int periods;
  656. unsigned int i;
  657. chan->direction = direction;
  658. max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
  659. if (direction == DMA_MEM_TO_DEV)
  660. reg_width = __ffs(sconfig->dst_addr_width);
  661. else
  662. reg_width = __ffs(sconfig->src_addr_width);
  663. periods = len / period_len;
  664. /* Check for too big/unaligned periods and unaligned DMA buffer. */
  665. if (period_len > (max_block_ts << reg_width))
  666. goto out_err;
  667. if (unlikely(period_len & ((1 << reg_width) - 1)))
  668. goto out_err;
  669. if (unlikely(buf_addr & ((1 << reg_width) - 1)))
  670. goto out_err;
  671. prev = first = NULL;
  672. ctlhi = CH_CTL_H_LLI_VALID | CH_CTL_H_IOC_BLKTFR_EN;
  673. if (chan->chip->dw->hdata->restrict_axi_burst_len) {
  674. u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
  675. ctlhi |= (CH_CTL_H_ARLEN_EN |
  676. burst_len << CH_CTL_H_ARLEN_POS |
  677. CH_CTL_H_AWLEN_EN |
  678. burst_len << CH_CTL_H_AWLEN_POS);
  679. }
  680. for (i = 0; i < periods; i++) {
  681. desc = axi_desc_get(chan);
  682. if (!desc)
  683. goto out_err_desc_get;
  684. switch (direction) {
  685. case DMA_MEM_TO_DEV:
  686. write_desc_sar(desc, buf_addr + period_len * i);
  687. write_desc_dar(desc, sconfig->dst_addr);
  688. desc->lli.ctl_lo = (DWC_DEFAULT_CTLLO(dchan)
  689. | DWC_CTLL_DST_WIDTH(reg_width)
  690. | DWC_CTLL_SRC_WIDTH(reg_width)
  691. | DWC_CTLL_DST_FIX
  692. | DWC_CTLL_SRC_INC);
  693. desc->lli.ctl_hi = cpu_to_le32(ctlhi);
  694. desc->lli.block_ts_lo = cpu_to_le32((period_len >> reg_width) - 1);
  695. break;
  696. case DMA_DEV_TO_MEM:
  697. write_desc_sar(desc, sconfig->src_addr);
  698. write_desc_dar(desc, buf_addr + period_len * i);
  699. desc->lli.ctl_lo = (DWC_DEFAULT_CTLLO(dchan)
  700. | DWC_CTLL_SRC_WIDTH(reg_width)
  701. | DWC_CTLL_DST_WIDTH(reg_width)
  702. | DWC_CTLL_DST_INC
  703. | DWC_CTLL_SRC_FIX);
  704. desc->lli.ctl_hi = cpu_to_le32(ctlhi);
  705. desc->lli.block_ts_lo = cpu_to_le32((period_len >> reg_width) - 1);
  706. break;
  707. default:
  708. break;
  709. }
  710. desc->len = period_len;
  711. if (!first) {
  712. first = desc;
  713. } else {
  714. list_add_tail(&desc->xfer_list, &first->xfer_list);
  715. write_desc_llp(prev, desc->vd.tx.phys | lms);
  716. }
  717. prev = desc;
  718. }
  719. first->total_len = len;
  720. /* Let's make a cyclic list */
  721. write_desc_llp(prev, first->vd.tx.phys | lms);
  722. dev_dbg(chan2dev(chan),
  723. "cyclic prepared buf %pad len %zu period %zu periods %d\n",
  724. &buf_addr, len, period_len, periods);
  725. chan->cyclic = true;
  726. return vchan_tx_prep(&chan->vc, &first->vd, flags);
  727. out_err_desc_get:
  728. axi_desc_put(first);
  729. out_err:
  730. return NULL;
  731. }
  732. bool dw_axi_dma_filter(struct dma_chan *dchan, void *param)
  733. {
  734. struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
  735. struct dw_axi_dma_slave *dws = param;
  736. if (dws->dma_dev != dchan->device->dev)
  737. return false;
  738. /* We have to copy data since dws can be temporary storage */
  739. memcpy(&chan->dws, dws, sizeof(struct dw_axi_dma_slave));
  740. return true;
  741. }
  742. EXPORT_SYMBOL_GPL(dw_axi_dma_filter);
  743. static void axi_chan_dump_lli(struct axi_dma_chan *chan,
  744. struct axi_dma_desc *desc)
  745. {
  746. dev_err(dchan2dev(&chan->vc.chan),
  747. "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
  748. le64_to_cpu(desc->lli.sar),
  749. le64_to_cpu(desc->lli.dar),
  750. le64_to_cpu(desc->lli.llp),
  751. le32_to_cpu(desc->lli.block_ts_lo),
  752. le32_to_cpu(desc->lli.ctl_hi),
  753. le32_to_cpu(desc->lli.ctl_lo));
  754. }
  755. static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
  756. struct axi_dma_desc *desc_head)
  757. {
  758. struct axi_dma_desc *desc;
  759. axi_chan_dump_lli(chan, desc_head);
  760. list_for_each_entry(desc, &desc_head->xfer_list, xfer_list)
  761. axi_chan_dump_lli(chan, desc);
  762. }
  763. static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
  764. {
  765. struct virt_dma_desc *vd;
  766. unsigned long flags;
  767. spin_lock_irqsave(&chan->vc.lock, flags);
  768. axi_chan_disable(chan);
  769. /* The bad descriptor currently is in the head of vc list */
  770. vd = vchan_next_desc(&chan->vc);
  771. /* Remove the completed descriptor from issued list */
  772. list_del(&vd->node);
  773. /* WARN about bad descriptor */
  774. dev_err(chan2dev(chan),
  775. "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
  776. axi_chan_name(chan), vd->tx.cookie, status);
  777. axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
  778. vchan_cookie_complete(vd);
  779. /* Try to restart the controller */
  780. axi_chan_start_first_queued(chan);
  781. spin_unlock_irqrestore(&chan->vc.lock, flags);
  782. }
  783. static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
  784. {
  785. struct virt_dma_desc *vd;
  786. unsigned long flags;
  787. spin_lock_irqsave(&chan->vc.lock, flags);
  788. /* The completed descriptor currently is in the head of vc list */
  789. vd = vchan_next_desc(&chan->vc);
  790. if (chan->cyclic) {
  791. struct axi_dma_desc *first, *desc, *cur_desc;
  792. u32 llplo, llphi;
  793. u64 llp;
  794. first = vd_to_axi_desc(vd);
  795. llplo = axi_chan_ioread32(chan, CH_LLP);
  796. llphi = axi_chan_ioread32(chan, CH_LLP + 4);
  797. llp = ((u64)llphi << 32) | llplo;
  798. //printk("llp=0x%llx, first->lli.llp=0x%llx.\n", llp, first->lli.llp);
  799. if (llp == DWC_LLP_LOC(first->lli.llp)) {
  800. cur_desc = list_prev_entry(first, xfer_list);
  801. cur_desc->lli.ctl_hi |= CH_CTL_H_LLI_VALID;
  802. } else {
  803. list_for_each_entry(desc, &first->xfer_list, xfer_list) {
  804. //printk("llp=0x%llx, desc->lli.llp=0x%llx.\n", llp, desc->lli.llp);
  805. if (llp == DWC_LLP_LOC(desc->lli.llp)) {
  806. cur_desc = list_prev_entry(desc, xfer_list);
  807. cur_desc->lli.ctl_hi |= CH_CTL_H_LLI_VALID;
  808. break;
  809. }
  810. }
  811. }
  812. vchan_cyclic_callback(vd);
  813. } else {
  814. if (unlikely(axi_chan_is_hw_enable(chan))) {
  815. dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
  816. axi_chan_name(chan));
  817. axi_chan_disable(chan);
  818. }
  819. /* Remove the completed descriptor from issued list before completing */
  820. list_del(&vd->node);
  821. vchan_cookie_complete(vd);
  822. /* Submit queued descriptors after processing the completed ones */
  823. axi_chan_start_first_queued(chan);
  824. }
  825. spin_unlock_irqrestore(&chan->vc.lock, flags);
  826. }
  827. static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
  828. {
  829. struct axi_dma_chip *chip = dev_id;
  830. struct dw_axi_dma *dw = chip->dw;
  831. struct axi_dma_chan *chan;
  832. u32 status, i;
  833. /* Disable DMAC inerrupts. We'll enable them after processing chanels */
  834. axi_dma_irq_disable(chip);
  835. /* Poll, clear and process every chanel interrupt status */
  836. for (i = 0; i < dw->hdata->nr_channels; i++) {
  837. chan = &dw->chan[i];
  838. status = axi_chan_irq_read(chan);
  839. axi_chan_irq_clear(chan, status);
  840. //if (status) printk("ch %d status 0x%x.\n", i, status);
  841. dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
  842. axi_chan_name(chan), i, status);
  843. if (status & DWAXIDMAC_IRQ_ALL_ERR)
  844. axi_chan_handle_err(chan, status);
  845. else if (status & (DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_BLOCK_TRF))
  846. axi_chan_block_xfer_complete(chan);
  847. }
  848. /* Re-enable interrupts */
  849. axi_dma_irq_enable(chip);
  850. return IRQ_HANDLED;
  851. }
  852. /*
  853. * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
  854. * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
  855. *
  856. * NOTE: burst size 2 is not supported by controller.
  857. *
  858. * This can be done by finding least significant bit set: n & (n - 1)
  859. */
  860. static inline void convert_burst(u32 *maxburst)
  861. {
  862. if (*maxburst > 1)
  863. *maxburst = fls(*maxburst) - 2;
  864. else
  865. *maxburst = 0;
  866. }
  867. static int dma_chan_config(struct dma_chan *dchan, struct dma_slave_config *sconfig)
  868. {
  869. struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
  870. /* Check if chan will be configured for slave transfers */
  871. if (!is_slave_direction(sconfig->direction))
  872. return -EINVAL;
  873. memcpy(&chan->dma_sconfig, sconfig, sizeof(*sconfig));
  874. chan->direction = sconfig->direction;
  875. if (chan->dma_sconfig.src_maxburst == 0)
  876. chan->dma_sconfig.src_maxburst = chan->dma_sconfig.dst_maxburst;
  877. if (chan->dma_sconfig.dst_maxburst == 0)
  878. chan->dma_sconfig.dst_maxburst = chan->dma_sconfig.src_maxburst;
  879. convert_burst(&chan->dma_sconfig.src_maxburst);
  880. convert_burst(&chan->dma_sconfig.dst_maxburst);
  881. return 0;
  882. }
  883. static int dma_chan_terminate_all(struct dma_chan *dchan)
  884. {
  885. struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
  886. u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
  887. unsigned long flags;
  888. u32 val;
  889. int ret;
  890. LIST_HEAD(head);
  891. axi_chan_disable(chan);
  892. ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
  893. !(val & chan_active), 1000, 10000);
  894. if (ret == -ETIMEDOUT)
  895. dev_warn(dchan2dev(dchan),
  896. "%s failed to stop\n", axi_chan_name(chan));
  897. spin_lock_irqsave(&chan->vc.lock, flags);
  898. vchan_get_all_descriptors(&chan->vc, &head);
  899. chan->cyclic = false;
  900. spin_unlock_irqrestore(&chan->vc.lock, flags);
  901. vchan_dma_desc_free_list(&chan->vc, &head);
  902. dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
  903. return 0;
  904. }
  905. static int dma_chan_pause(struct dma_chan *dchan)
  906. {
  907. struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
  908. unsigned long flags;
  909. unsigned int timeout = 20; /* timeout iterations */
  910. u32 val;
  911. spin_lock_irqsave(&chan->vc.lock, flags);
  912. val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
  913. val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
  914. BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
  915. axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
  916. do {
  917. if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
  918. break;
  919. udelay(2);
  920. } while (--timeout);
  921. axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
  922. chan->is_paused = true;
  923. spin_unlock_irqrestore(&chan->vc.lock, flags);
  924. return timeout ? 0 : -EAGAIN;
  925. }
  926. /* Called in chan locked context */
  927. static inline void axi_chan_resume(struct axi_dma_chan *chan)
  928. {
  929. u32 val;
  930. val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
  931. val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
  932. val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
  933. axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
  934. chan->is_paused = false;
  935. }
  936. static int dma_chan_resume(struct dma_chan *dchan)
  937. {
  938. struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
  939. unsigned long flags;
  940. spin_lock_irqsave(&chan->vc.lock, flags);
  941. if (chan->is_paused)
  942. axi_chan_resume(chan);
  943. spin_unlock_irqrestore(&chan->vc.lock, flags);
  944. return 0;
  945. }
  946. static int axi_dma_suspend(struct axi_dma_chip *chip)
  947. {
  948. axi_dma_irq_disable(chip);
  949. axi_dma_disable(chip);
  950. clk_disable_unprepare(chip->core_clk);
  951. clk_disable_unprepare(chip->cfgr_clk);
  952. return 0;
  953. }
  954. static int axi_dma_resume(struct axi_dma_chip *chip)
  955. {
  956. int ret;
  957. ret = clk_prepare_enable(chip->cfgr_clk);
  958. if (ret < 0)
  959. return ret;
  960. ret = clk_prepare_enable(chip->core_clk);
  961. if (ret < 0)
  962. return ret;
  963. axi_dma_enable(chip);
  964. axi_dma_irq_enable(chip);
  965. return 0;
  966. }
  967. static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
  968. {
  969. struct axi_dma_chip *chip = dev_get_drvdata(dev);
  970. return axi_dma_suspend(chip);
  971. }
  972. static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
  973. {
  974. struct axi_dma_chip *chip = dev_get_drvdata(dev);
  975. return axi_dma_resume(chip);
  976. }
  977. static int parse_device_properties(struct axi_dma_chip *chip)
  978. {
  979. struct device *dev = chip->dev;
  980. u32 tmp, carr[DMAC_MAX_CHANNELS];
  981. int ret;
  982. ret = device_property_read_u32(dev, "dma-channels", &tmp);
  983. if (ret)
  984. return ret;
  985. if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
  986. return -EINVAL;
  987. chip->dw->hdata->nr_channels = tmp;
  988. ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
  989. if (ret)
  990. return ret;
  991. if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
  992. return -EINVAL;
  993. chip->dw->hdata->nr_masters = tmp;
  994. ret = device_property_read_u32(dev, "snps,data-width", &tmp);
  995. if (ret)
  996. return ret;
  997. if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
  998. return -EINVAL;
  999. chip->dw->hdata->m_data_width = tmp;
  1000. ret = device_property_read_u32_array(dev, "snps,block-size", carr,
  1001. chip->dw->hdata->nr_channels);
  1002. if (ret)
  1003. return ret;
  1004. for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
  1005. if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
  1006. return -EINVAL;
  1007. chip->dw->hdata->block_size[tmp] = carr[tmp];
  1008. }
  1009. ret = device_property_read_u32_array(dev, "snps,priority", carr,
  1010. chip->dw->hdata->nr_channels);
  1011. if (ret)
  1012. return ret;
  1013. /* Priority value must be programmed within [0:nr_channels-1] range */
  1014. for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
  1015. if (carr[tmp] >= chip->dw->hdata->nr_channels)
  1016. return -EINVAL;
  1017. chip->dw->hdata->priority[tmp] = carr[tmp];
  1018. }
  1019. /* axi-max-burst-len is optional property */
  1020. ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
  1021. if (!ret) {
  1022. if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
  1023. return -EINVAL;
  1024. if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
  1025. return -EINVAL;
  1026. chip->dw->hdata->restrict_axi_burst_len = true;
  1027. chip->dw->hdata->axi_rw_burst_len = tmp - 1;
  1028. }
  1029. return 0;
  1030. }
  1031. static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
  1032. struct of_dma *ofdma)
  1033. {
  1034. struct dw_axi_dma *dw = ofdma->of_dma_data;
  1035. struct dw_axi_dma_slave slave = {
  1036. .dma_dev = dw->dma.dev,
  1037. };
  1038. dma_cap_mask_t cap;
  1039. if (dma_spec->args_count != 3)
  1040. return NULL;
  1041. slave.src_id = dma_spec->args[0];
  1042. slave.dst_id = dma_spec->args[0];
  1043. slave.m_master = dma_spec->args[1];
  1044. slave.p_master = dma_spec->args[2];
  1045. if (WARN_ON(slave.src_id >= DMAC_MAX_NR_REQUESTS ||
  1046. slave.dst_id >= DMAC_MAX_NR_REQUESTS ||
  1047. slave.m_master >= dw->hdata->nr_masters ||
  1048. slave.p_master >= dw->hdata->nr_masters))
  1049. return NULL;
  1050. dma_cap_zero(cap);
  1051. dma_cap_set(DMA_SLAVE, cap);
  1052. /* TODO: there should be a simpler way to do this */
  1053. return dma_request_channel(cap, dw_axi_dma_filter, &slave);
  1054. }
  1055. static int dw_probe(struct platform_device *pdev)
  1056. {
  1057. struct axi_dma_chip *chip;
  1058. struct resource *mem;
  1059. struct dw_axi_dma *dw;
  1060. struct dw_axi_dma_hcfg *hdata;
  1061. u32 i;
  1062. int ret;
  1063. chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
  1064. if (!chip)
  1065. return -ENOMEM;
  1066. dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
  1067. if (!dw)
  1068. return -ENOMEM;
  1069. hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
  1070. if (!hdata)
  1071. return -ENOMEM;
  1072. chip->dw = dw;
  1073. chip->dev = &pdev->dev;
  1074. chip->dw->hdata = hdata;
  1075. chip->irq = platform_get_irq(pdev, 0);
  1076. if (chip->irq < 0)
  1077. return chip->irq;
  1078. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1079. chip->regs = devm_ioremap_resource(chip->dev, mem);
  1080. if (IS_ERR(chip->regs))
  1081. return PTR_ERR(chip->regs);
  1082. chip->core_clk = devm_clk_get(chip->dev, "core-clk");
  1083. if (IS_ERR(chip->core_clk))
  1084. return PTR_ERR(chip->core_clk);
  1085. chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
  1086. if (IS_ERR(chip->cfgr_clk))
  1087. return PTR_ERR(chip->cfgr_clk);
  1088. ret = parse_device_properties(chip);
  1089. if (ret)
  1090. return ret;
  1091. dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
  1092. sizeof(*dw->chan), GFP_KERNEL);
  1093. if (!dw->chan)
  1094. return -ENOMEM;
  1095. ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
  1096. IRQF_SHARED, KBUILD_MODNAME, chip);
  1097. if (ret)
  1098. return ret;
  1099. /* Lli address must be aligned to a 64-byte boundary */
  1100. dw->desc_pool = dmam_pool_create(KBUILD_MODNAME, chip->dev,
  1101. sizeof(struct axi_dma_desc), 64, 0);
  1102. if (!dw->desc_pool) {
  1103. dev_err(chip->dev, "No memory for descriptors dma pool\n");
  1104. return -ENOMEM;
  1105. }
  1106. INIT_LIST_HEAD(&dw->dma.channels);
  1107. for (i = 0; i < hdata->nr_channels; i++) {
  1108. struct axi_dma_chan *chan = &dw->chan[i];
  1109. chan->chip = chip;
  1110. chan->id = i;
  1111. chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
  1112. atomic_set(&chan->descs_allocated, 0);
  1113. chan->vc.desc_free = vchan_desc_put;
  1114. vchan_init(&chan->vc, &dw->dma);
  1115. }
  1116. /* Set capabilities */
  1117. dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
  1118. dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
  1119. /* DMA capabilities */
  1120. dw->dma.chancnt = hdata->nr_channels;
  1121. dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
  1122. dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
  1123. dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
  1124. BIT(DMA_MEM_TO_MEM);
  1125. dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;//DMA_RESIDUE_GRANULARITY_DESCRIPTOR;//DMA_RESIDUE_GRANULARITY_BURST;
  1126. dw->dma.dev = chip->dev;
  1127. dw->dma.device_config = dma_chan_config;
  1128. dw->dma.device_tx_status = dma_chan_tx_status;
  1129. dw->dma.device_issue_pending = dma_chan_issue_pending;
  1130. dw->dma.device_terminate_all = dma_chan_terminate_all;
  1131. dw->dma.device_pause = dma_chan_pause;
  1132. dw->dma.device_resume = dma_chan_resume;
  1133. dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
  1134. dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
  1135. dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
  1136. dw->dma.device_prep_slave_sg = dma_chan_prep_slave_sg;
  1137. dw->dma.device_prep_dma_cyclic = dma_chan_prep_dma_cyclic;
  1138. platform_set_drvdata(pdev, chip);
  1139. if (pdev->dev.of_node) {
  1140. ret = of_dma_controller_register(pdev->dev.of_node,
  1141. dw_axi_dma_of_xlate, dw);
  1142. if (ret)
  1143. dev_err(&pdev->dev,
  1144. "could not register of_dma_controller\n");
  1145. }
  1146. pm_runtime_enable(chip->dev);
  1147. /*
  1148. * We can't just call pm_runtime_get here instead of
  1149. * pm_runtime_get_noresume + axi_dma_resume because we need
  1150. * driver to work also without Runtime PM.
  1151. */
  1152. pm_runtime_get_noresume(chip->dev);
  1153. ret = axi_dma_resume(chip);
  1154. if (ret < 0)
  1155. goto err_pm_disable;
  1156. axi_dma_hw_init(chip);
  1157. pm_runtime_put(chip->dev);
  1158. ret = dmaenginem_async_device_register(&dw->dma);
  1159. if (ret)
  1160. goto err_pm_disable;
  1161. dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
  1162. dw->hdata->nr_channels);
  1163. return 0;
  1164. err_pm_disable:
  1165. pm_runtime_disable(chip->dev);
  1166. return ret;
  1167. }
  1168. static int dw_remove(struct platform_device *pdev)
  1169. {
  1170. struct axi_dma_chip *chip = platform_get_drvdata(pdev);
  1171. struct dw_axi_dma *dw = chip->dw;
  1172. struct axi_dma_chan *chan, *_chan;
  1173. u32 i;
  1174. /* Enable clk before accessing to registers */
  1175. clk_prepare_enable(chip->cfgr_clk);
  1176. clk_prepare_enable(chip->core_clk);
  1177. axi_dma_irq_disable(chip);
  1178. for (i = 0; i < dw->hdata->nr_channels; i++) {
  1179. axi_chan_disable(&chip->dw->chan[i]);
  1180. axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
  1181. }
  1182. axi_dma_disable(chip);
  1183. pm_runtime_disable(chip->dev);
  1184. axi_dma_suspend(chip);
  1185. devm_free_irq(chip->dev, chip->irq, chip);
  1186. list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
  1187. vc.chan.device_node) {
  1188. list_del(&chan->vc.chan.device_node);
  1189. tasklet_kill(&chan->vc.task);
  1190. }
  1191. return 0;
  1192. }
  1193. static const struct dev_pm_ops dw_axi_dma_pm_ops = {
  1194. SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
  1195. };
  1196. static const struct of_device_id dw_dma_of_id_table[] = {
  1197. { .compatible = "snps,axi-dma-1.01a" },
  1198. {}
  1199. };
  1200. MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
  1201. static struct platform_driver dw_driver = {
  1202. .probe = dw_probe,
  1203. .remove = dw_remove,
  1204. .driver = {
  1205. .name = KBUILD_MODNAME,
  1206. .of_match_table = of_match_ptr(dw_dma_of_id_table),
  1207. .pm = &dw_axi_dma_pm_ops,
  1208. },
  1209. };
  1210. module_platform_driver(dw_driver);
  1211. MODULE_LICENSE("GPL v2");
  1212. MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
  1213. MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");