stm32-dma.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348
  1. /*
  2. * Driver for STM32 DMA controller
  3. *
  4. * Inspired by dma-jz4740.c and tegra20-apb-dma.c
  5. *
  6. * Copyright (C) M'boumba Cedric Madianga 2015
  7. * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
  8. * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
  9. *
  10. * License terms: GNU General Public License (GPL), version 2
  11. */
  12. #include <linux/clk.h>
  13. #include <linux/delay.h>
  14. #include <linux/dmaengine.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/err.h>
  17. #include <linux/init.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/list.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/of_dma.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/reset.h>
  26. #include <linux/sched.h>
  27. #include <linux/slab.h>
  28. #include "virt-dma.h"
  29. #define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */
  30. #define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */
  31. #define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */
  32. #define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */
  33. #define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */
  34. #define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */
  35. #define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */
  36. #define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */
  37. #define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */
  38. #define STM32_DMA_MASKI (STM32_DMA_TCI \
  39. | STM32_DMA_TEI \
  40. | STM32_DMA_DMEI \
  41. | STM32_DMA_FEI)
  42. /* DMA Stream x Configuration Register */
  43. #define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */
  44. #define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25)
  45. #define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23)
  46. #define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23)
  47. #define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21)
  48. #define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21)
  49. #define STM32_DMA_SCR_PL_MASK GENMASK(17, 16)
  50. #define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16)
  51. #define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13)
  52. #define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13)
  53. #define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11)
  54. #define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11)
  55. #define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
  56. #define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6)
  57. #define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6)
  58. #define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */
  59. #define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */
  60. #define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */
  61. #define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */
  62. #define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */
  63. #define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */
  64. #define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */
  65. #define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Complete Int Enable
  66. */
  67. #define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */
  68. #define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */
  69. #define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */
  70. #define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \
  71. | STM32_DMA_SCR_MINC \
  72. | STM32_DMA_SCR_PINCOS \
  73. | STM32_DMA_SCR_PL_MASK)
  74. #define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \
  75. | STM32_DMA_SCR_TEIE \
  76. | STM32_DMA_SCR_DMEIE)
  77. /* DMA Stream x number of data register */
  78. #define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x))
  79. /* DMA stream peripheral address register */
  80. #define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x))
  81. /* DMA stream x memory 0 address register */
  82. #define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x))
  83. /* DMA stream x memory 1 address register */
  84. #define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x))
  85. /* DMA stream x FIFO control register */
  86. #define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x))
  87. #define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0)
  88. #define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK)
  89. #define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */
  90. #define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */
  91. #define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \
  92. | STM32_DMA_SFCR_DMDIS)
  93. /* DMA direction */
  94. #define STM32_DMA_DEV_TO_MEM 0x00
  95. #define STM32_DMA_MEM_TO_DEV 0x01
  96. #define STM32_DMA_MEM_TO_MEM 0x02
  97. /* DMA priority level */
  98. #define STM32_DMA_PRIORITY_LOW 0x00
  99. #define STM32_DMA_PRIORITY_MEDIUM 0x01
  100. #define STM32_DMA_PRIORITY_HIGH 0x02
  101. #define STM32_DMA_PRIORITY_VERY_HIGH 0x03
  102. /* DMA FIFO threshold selection */
  103. #define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00
  104. #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01
  105. #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02
  106. #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
  107. #define STM32_DMA_MAX_DATA_ITEMS 0xffff
  108. /*
  109. * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter
  110. * gather at boundary. Thus it's safer to round down this value on FIFO
  111. * size (16 Bytes)
  112. */
  113. #define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \
  114. ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16)
  115. #define STM32_DMA_MAX_CHANNELS 0x08
  116. #define STM32_DMA_MAX_REQUEST_ID 0x08
  117. #define STM32_DMA_MAX_DATA_PARAM 0x03
  118. #define STM32_DMA_FIFO_SIZE 16 /* FIFO is 16 bytes */
  119. #define STM32_DMA_MIN_BURST 4
  120. #define STM32_DMA_MAX_BURST 16
  121. /* DMA Features */
  122. #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
  123. #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
  124. enum stm32_dma_width {
  125. STM32_DMA_BYTE,
  126. STM32_DMA_HALF_WORD,
  127. STM32_DMA_WORD,
  128. };
  129. enum stm32_dma_burst_size {
  130. STM32_DMA_BURST_SINGLE,
  131. STM32_DMA_BURST_INCR4,
  132. STM32_DMA_BURST_INCR8,
  133. STM32_DMA_BURST_INCR16,
  134. };
  135. /**
  136. * struct stm32_dma_cfg - STM32 DMA custom configuration
  137. * @channel_id: channel ID
  138. * @request_line: DMA request
  139. * @stream_config: 32bit mask specifying the DMA channel configuration
  140. * @features: 32bit mask specifying the DMA Feature list
  141. */
  142. struct stm32_dma_cfg {
  143. u32 channel_id;
  144. u32 request_line;
  145. u32 stream_config;
  146. u32 features;
  147. };
  148. struct stm32_dma_chan_reg {
  149. u32 dma_lisr;
  150. u32 dma_hisr;
  151. u32 dma_lifcr;
  152. u32 dma_hifcr;
  153. u32 dma_scr;
  154. u32 dma_sndtr;
  155. u32 dma_spar;
  156. u32 dma_sm0ar;
  157. u32 dma_sm1ar;
  158. u32 dma_sfcr;
  159. };
  160. struct stm32_dma_sg_req {
  161. u32 len;
  162. struct stm32_dma_chan_reg chan_reg;
  163. };
  164. struct stm32_dma_desc {
  165. struct virt_dma_desc vdesc;
  166. bool cyclic;
  167. u32 num_sgs;
  168. struct stm32_dma_sg_req sg_req[];
  169. };
  170. struct stm32_dma_chan {
  171. struct virt_dma_chan vchan;
  172. bool config_init;
  173. bool busy;
  174. u32 id;
  175. u32 irq;
  176. struct stm32_dma_desc *desc;
  177. u32 next_sg;
  178. struct dma_slave_config dma_sconfig;
  179. struct stm32_dma_chan_reg chan_reg;
  180. u32 threshold;
  181. u32 mem_burst;
  182. u32 mem_width;
  183. };
  184. struct stm32_dma_device {
  185. struct dma_device ddev;
  186. void __iomem *base;
  187. struct clk *clk;
  188. struct reset_control *rst;
  189. bool mem2mem;
  190. struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
  191. };
  192. static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan)
  193. {
  194. return container_of(chan->vchan.chan.device, struct stm32_dma_device,
  195. ddev);
  196. }
  197. static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c)
  198. {
  199. return container_of(c, struct stm32_dma_chan, vchan.chan);
  200. }
  201. static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc)
  202. {
  203. return container_of(vdesc, struct stm32_dma_desc, vdesc);
  204. }
  205. static struct device *chan2dev(struct stm32_dma_chan *chan)
  206. {
  207. return &chan->vchan.chan.dev->device;
  208. }
  209. static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg)
  210. {
  211. return readl_relaxed(dmadev->base + reg);
  212. }
  213. static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val)
  214. {
  215. writel_relaxed(val, dmadev->base + reg);
  216. }
  217. static struct stm32_dma_desc *stm32_dma_alloc_desc(u32 num_sgs)
  218. {
  219. return kzalloc(sizeof(struct stm32_dma_desc) +
  220. sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_NOWAIT);
  221. }
  222. static int stm32_dma_get_width(struct stm32_dma_chan *chan,
  223. enum dma_slave_buswidth width)
  224. {
  225. switch (width) {
  226. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  227. return STM32_DMA_BYTE;
  228. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  229. return STM32_DMA_HALF_WORD;
  230. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  231. return STM32_DMA_WORD;
  232. default:
  233. dev_err(chan2dev(chan), "Dma bus width not supported\n");
  234. return -EINVAL;
  235. }
  236. }
  237. static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
  238. u32 threshold)
  239. {
  240. enum dma_slave_buswidth max_width;
  241. if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
  242. max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  243. else
  244. max_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
  245. while ((buf_len < max_width || buf_len % max_width) &&
  246. max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
  247. max_width = max_width >> 1;
  248. return max_width;
  249. }
  250. static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
  251. enum dma_slave_buswidth width)
  252. {
  253. u32 remaining;
  254. if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) {
  255. if (burst != 0) {
  256. /*
  257. * If number of beats fit in several whole bursts
  258. * this configuration is allowed.
  259. */
  260. remaining = ((STM32_DMA_FIFO_SIZE / width) *
  261. (threshold + 1) / 4) % burst;
  262. if (remaining == 0)
  263. return true;
  264. } else {
  265. return true;
  266. }
  267. }
  268. return false;
  269. }
  270. static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
  271. {
  272. /*
  273. * Buffer or period length has to be aligned on FIFO depth.
  274. * Otherwise bytes may be stuck within FIFO at buffer or period
  275. * length.
  276. */
  277. return ((buf_len % ((threshold + 1) * 4)) == 0);
  278. }
  279. static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,
  280. enum dma_slave_buswidth width)
  281. {
  282. u32 best_burst = max_burst;
  283. if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold))
  284. return 0;
  285. while ((buf_len < best_burst * width && best_burst > 1) ||
  286. !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold,
  287. width)) {
  288. if (best_burst > STM32_DMA_MIN_BURST)
  289. best_burst = best_burst >> 1;
  290. else
  291. best_burst = 0;
  292. }
  293. return best_burst;
  294. }
  295. static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
  296. {
  297. switch (maxburst) {
  298. case 0:
  299. case 1:
  300. return STM32_DMA_BURST_SINGLE;
  301. case 4:
  302. return STM32_DMA_BURST_INCR4;
  303. case 8:
  304. return STM32_DMA_BURST_INCR8;
  305. case 16:
  306. return STM32_DMA_BURST_INCR16;
  307. default:
  308. dev_err(chan2dev(chan), "Dma burst size not supported\n");
  309. return -EINVAL;
  310. }
  311. }
  312. static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan,
  313. u32 src_burst, u32 dst_burst)
  314. {
  315. chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK;
  316. chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE;
  317. if (!src_burst && !dst_burst) {
  318. /* Using direct mode */
  319. chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE;
  320. } else {
  321. /* Using FIFO mode */
  322. chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
  323. }
  324. }
  325. static int stm32_dma_slave_config(struct dma_chan *c,
  326. struct dma_slave_config *config)
  327. {
  328. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  329. memcpy(&chan->dma_sconfig, config, sizeof(*config));
  330. chan->config_init = true;
  331. return 0;
  332. }
  333. static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
  334. {
  335. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  336. u32 flags, dma_isr;
  337. /*
  338. * Read "flags" from DMA_xISR register corresponding to the selected
  339. * DMA channel at the correct bit offset inside that register.
  340. *
  341. * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
  342. * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
  343. */
  344. if (chan->id & 4)
  345. dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR);
  346. else
  347. dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR);
  348. flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
  349. return flags & STM32_DMA_MASKI;
  350. }
  351. static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
  352. {
  353. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  354. u32 dma_ifcr;
  355. /*
  356. * Write "flags" to the DMA_xIFCR register corresponding to the selected
  357. * DMA channel at the correct bit offset inside that register.
  358. *
  359. * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
  360. * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
  361. */
  362. flags &= STM32_DMA_MASKI;
  363. dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
  364. if (chan->id & 4)
  365. stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr);
  366. else
  367. stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr);
  368. }
  369. static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
  370. {
  371. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  372. unsigned long timeout = jiffies + msecs_to_jiffies(5000);
  373. u32 dma_scr, id;
  374. id = chan->id;
  375. dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
  376. if (dma_scr & STM32_DMA_SCR_EN) {
  377. dma_scr &= ~STM32_DMA_SCR_EN;
  378. stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr);
  379. do {
  380. dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
  381. dma_scr &= STM32_DMA_SCR_EN;
  382. if (!dma_scr)
  383. break;
  384. if (time_after_eq(jiffies, timeout)) {
  385. dev_err(chan2dev(chan), "%s: timeout!\n",
  386. __func__);
  387. return -EBUSY;
  388. }
  389. cond_resched();
  390. } while (1);
  391. }
  392. return 0;
  393. }
  394. static void stm32_dma_stop(struct stm32_dma_chan *chan)
  395. {
  396. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  397. u32 dma_scr, dma_sfcr, status;
  398. int ret;
  399. /* Disable interrupts */
  400. dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
  401. dma_scr &= ~STM32_DMA_SCR_IRQ_MASK;
  402. stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
  403. dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
  404. dma_sfcr &= ~STM32_DMA_SFCR_FEIE;
  405. stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr);
  406. /* Disable DMA */
  407. ret = stm32_dma_disable_chan(chan);
  408. if (ret < 0)
  409. return;
  410. /* Clear interrupt status if it is there */
  411. status = stm32_dma_irq_status(chan);
  412. if (status) {
  413. dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
  414. __func__, status);
  415. stm32_dma_irq_clear(chan, status);
  416. }
  417. chan->busy = false;
  418. }
  419. static int stm32_dma_terminate_all(struct dma_chan *c)
  420. {
  421. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  422. unsigned long flags;
  423. LIST_HEAD(head);
  424. spin_lock_irqsave(&chan->vchan.lock, flags);
  425. if (chan->desc) {
  426. vchan_terminate_vdesc(&chan->desc->vdesc);
  427. if (chan->busy)
  428. stm32_dma_stop(chan);
  429. chan->desc = NULL;
  430. }
  431. vchan_get_all_descriptors(&chan->vchan, &head);
  432. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  433. vchan_dma_desc_free_list(&chan->vchan, &head);
  434. return 0;
  435. }
  436. static void stm32_dma_synchronize(struct dma_chan *c)
  437. {
  438. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  439. vchan_synchronize(&chan->vchan);
  440. }
  441. static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
  442. {
  443. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  444. u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
  445. u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
  446. u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id));
  447. u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id));
  448. u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id));
  449. u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
  450. dev_dbg(chan2dev(chan), "SCR: 0x%08x\n", scr);
  451. dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n", ndtr);
  452. dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n", spar);
  453. dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar);
  454. dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar);
  455. dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr);
  456. }
  457. static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
  458. static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
  459. {
  460. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  461. struct virt_dma_desc *vdesc;
  462. struct stm32_dma_sg_req *sg_req;
  463. struct stm32_dma_chan_reg *reg;
  464. u32 status;
  465. int ret;
  466. ret = stm32_dma_disable_chan(chan);
  467. if (ret < 0)
  468. return;
  469. if (!chan->desc) {
  470. vdesc = vchan_next_desc(&chan->vchan);
  471. if (!vdesc)
  472. return;
  473. list_del(&vdesc->node);
  474. chan->desc = to_stm32_dma_desc(vdesc);
  475. chan->next_sg = 0;
  476. }
  477. if (chan->next_sg == chan->desc->num_sgs)
  478. chan->next_sg = 0;
  479. sg_req = &chan->desc->sg_req[chan->next_sg];
  480. reg = &sg_req->chan_reg;
  481. stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
  482. stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
  483. stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
  484. stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr);
  485. stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
  486. stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
  487. chan->next_sg++;
  488. /* Clear interrupt status if it is there */
  489. status = stm32_dma_irq_status(chan);
  490. if (status)
  491. stm32_dma_irq_clear(chan, status);
  492. if (chan->desc->cyclic)
  493. stm32_dma_configure_next_sg(chan);
  494. stm32_dma_dump_reg(chan);
  495. /* Start DMA */
  496. reg->dma_scr |= STM32_DMA_SCR_EN;
  497. stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
  498. chan->busy = true;
  499. dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
  500. }
  501. static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
  502. {
  503. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  504. struct stm32_dma_sg_req *sg_req;
  505. u32 dma_scr, dma_sm0ar, dma_sm1ar, id;
  506. id = chan->id;
  507. dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
  508. if (dma_scr & STM32_DMA_SCR_DBM) {
  509. if (chan->next_sg == chan->desc->num_sgs)
  510. chan->next_sg = 0;
  511. sg_req = &chan->desc->sg_req[chan->next_sg];
  512. if (dma_scr & STM32_DMA_SCR_CT) {
  513. dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
  514. stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
  515. dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
  516. stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
  517. } else {
  518. dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
  519. stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
  520. dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
  521. stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
  522. }
  523. }
  524. }
  525. static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
  526. {
  527. if (chan->desc) {
  528. if (chan->desc->cyclic) {
  529. vchan_cyclic_callback(&chan->desc->vdesc);
  530. chan->next_sg++;
  531. stm32_dma_configure_next_sg(chan);
  532. } else {
  533. chan->busy = false;
  534. if (chan->next_sg == chan->desc->num_sgs) {
  535. vchan_cookie_complete(&chan->desc->vdesc);
  536. chan->desc = NULL;
  537. }
  538. stm32_dma_start_transfer(chan);
  539. }
  540. }
  541. }
  542. static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
  543. {
  544. struct stm32_dma_chan *chan = devid;
  545. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  546. u32 status, scr;
  547. spin_lock(&chan->vchan.lock);
  548. status = stm32_dma_irq_status(chan);
  549. scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
  550. if (status & STM32_DMA_TCI) {
  551. stm32_dma_irq_clear(chan, STM32_DMA_TCI);
  552. if (scr & STM32_DMA_SCR_TCIE)
  553. stm32_dma_handle_chan_done(chan);
  554. status &= ~STM32_DMA_TCI;
  555. }
  556. if (status & STM32_DMA_HTI) {
  557. stm32_dma_irq_clear(chan, STM32_DMA_HTI);
  558. status &= ~STM32_DMA_HTI;
  559. }
  560. if (status & STM32_DMA_FEI) {
  561. stm32_dma_irq_clear(chan, STM32_DMA_FEI);
  562. status &= ~STM32_DMA_FEI;
  563. if (!(scr & STM32_DMA_SCR_EN))
  564. dev_err(chan2dev(chan), "FIFO Error\n");
  565. else
  566. dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
  567. }
  568. if (status) {
  569. stm32_dma_irq_clear(chan, status);
  570. dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
  571. if (!(scr & STM32_DMA_SCR_EN))
  572. dev_err(chan2dev(chan), "chan disabled by HW\n");
  573. }
  574. spin_unlock(&chan->vchan.lock);
  575. return IRQ_HANDLED;
  576. }
  577. static void stm32_dma_issue_pending(struct dma_chan *c)
  578. {
  579. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  580. unsigned long flags;
  581. spin_lock_irqsave(&chan->vchan.lock, flags);
  582. if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
  583. dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
  584. stm32_dma_start_transfer(chan);
  585. }
  586. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  587. }
  588. static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
  589. enum dma_transfer_direction direction,
  590. enum dma_slave_buswidth *buswidth,
  591. u32 buf_len)
  592. {
  593. enum dma_slave_buswidth src_addr_width, dst_addr_width;
  594. int src_bus_width, dst_bus_width;
  595. int src_burst_size, dst_burst_size;
  596. u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
  597. u32 dma_scr, threshold;
  598. src_addr_width = chan->dma_sconfig.src_addr_width;
  599. dst_addr_width = chan->dma_sconfig.dst_addr_width;
  600. src_maxburst = chan->dma_sconfig.src_maxburst;
  601. dst_maxburst = chan->dma_sconfig.dst_maxburst;
  602. threshold = chan->threshold;
  603. switch (direction) {
  604. case DMA_MEM_TO_DEV:
  605. /* Set device data size */
  606. dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
  607. if (dst_bus_width < 0)
  608. return dst_bus_width;
  609. /* Set device burst size */
  610. dst_best_burst = stm32_dma_get_best_burst(buf_len,
  611. dst_maxburst,
  612. threshold,
  613. dst_addr_width);
  614. dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
  615. if (dst_burst_size < 0)
  616. return dst_burst_size;
  617. /* Set memory data size */
  618. src_addr_width = stm32_dma_get_max_width(buf_len, threshold);
  619. chan->mem_width = src_addr_width;
  620. src_bus_width = stm32_dma_get_width(chan, src_addr_width);
  621. if (src_bus_width < 0)
  622. return src_bus_width;
  623. /* Set memory burst size */
  624. src_maxburst = STM32_DMA_MAX_BURST;
  625. src_best_burst = stm32_dma_get_best_burst(buf_len,
  626. src_maxburst,
  627. threshold,
  628. src_addr_width);
  629. src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
  630. if (src_burst_size < 0)
  631. return src_burst_size;
  632. dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) |
  633. STM32_DMA_SCR_PSIZE(dst_bus_width) |
  634. STM32_DMA_SCR_MSIZE(src_bus_width) |
  635. STM32_DMA_SCR_PBURST(dst_burst_size) |
  636. STM32_DMA_SCR_MBURST(src_burst_size);
  637. /* Set FIFO threshold */
  638. chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
  639. chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
  640. /* Set peripheral address */
  641. chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
  642. *buswidth = dst_addr_width;
  643. break;
  644. case DMA_DEV_TO_MEM:
  645. /* Set device data size */
  646. src_bus_width = stm32_dma_get_width(chan, src_addr_width);
  647. if (src_bus_width < 0)
  648. return src_bus_width;
  649. /* Set device burst size */
  650. src_best_burst = stm32_dma_get_best_burst(buf_len,
  651. src_maxburst,
  652. threshold,
  653. src_addr_width);
  654. chan->mem_burst = src_best_burst;
  655. src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
  656. if (src_burst_size < 0)
  657. return src_burst_size;
  658. /* Set memory data size */
  659. dst_addr_width = stm32_dma_get_max_width(buf_len, threshold);
  660. chan->mem_width = dst_addr_width;
  661. dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
  662. if (dst_bus_width < 0)
  663. return dst_bus_width;
  664. /* Set memory burst size */
  665. dst_maxburst = STM32_DMA_MAX_BURST;
  666. dst_best_burst = stm32_dma_get_best_burst(buf_len,
  667. dst_maxburst,
  668. threshold,
  669. dst_addr_width);
  670. chan->mem_burst = dst_best_burst;
  671. dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
  672. if (dst_burst_size < 0)
  673. return dst_burst_size;
  674. dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) |
  675. STM32_DMA_SCR_PSIZE(src_bus_width) |
  676. STM32_DMA_SCR_MSIZE(dst_bus_width) |
  677. STM32_DMA_SCR_PBURST(src_burst_size) |
  678. STM32_DMA_SCR_MBURST(dst_burst_size);
  679. /* Set FIFO threshold */
  680. chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
  681. chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
  682. /* Set peripheral address */
  683. chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
  684. *buswidth = chan->dma_sconfig.src_addr_width;
  685. break;
  686. default:
  687. dev_err(chan2dev(chan), "Dma direction is not supported\n");
  688. return -EINVAL;
  689. }
  690. stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst);
  691. /* Set DMA control register */
  692. chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK |
  693. STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK |
  694. STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK);
  695. chan->chan_reg.dma_scr |= dma_scr;
  696. return 0;
  697. }
  698. static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs)
  699. {
  700. memset(regs, 0, sizeof(struct stm32_dma_chan_reg));
  701. }
  702. static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
  703. struct dma_chan *c, struct scatterlist *sgl,
  704. u32 sg_len, enum dma_transfer_direction direction,
  705. unsigned long flags, void *context)
  706. {
  707. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  708. struct stm32_dma_desc *desc;
  709. struct scatterlist *sg;
  710. enum dma_slave_buswidth buswidth;
  711. u32 nb_data_items;
  712. int i, ret;
  713. if (!chan->config_init) {
  714. dev_err(chan2dev(chan), "dma channel is not configured\n");
  715. return NULL;
  716. }
  717. if (sg_len < 1) {
  718. dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len);
  719. return NULL;
  720. }
  721. desc = stm32_dma_alloc_desc(sg_len);
  722. if (!desc)
  723. return NULL;
  724. /* Set peripheral flow controller */
  725. if (chan->dma_sconfig.device_fc)
  726. chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL;
  727. else
  728. chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
  729. for_each_sg(sgl, sg, sg_len, i) {
  730. ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
  731. sg_dma_len(sg));
  732. if (ret < 0)
  733. goto err;
  734. desc->sg_req[i].len = sg_dma_len(sg);
  735. nb_data_items = desc->sg_req[i].len / buswidth;
  736. if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
  737. dev_err(chan2dev(chan), "nb items not supported\n");
  738. goto err;
  739. }
  740. stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
  741. desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
  742. desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
  743. desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
  744. desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg);
  745. desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg);
  746. desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
  747. }
  748. desc->num_sgs = sg_len;
  749. desc->cyclic = false;
  750. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  751. err:
  752. kfree(desc);
  753. return NULL;
  754. }
  755. static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
  756. struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
  757. size_t period_len, enum dma_transfer_direction direction,
  758. unsigned long flags)
  759. {
  760. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  761. struct stm32_dma_desc *desc;
  762. enum dma_slave_buswidth buswidth;
  763. u32 num_periods, nb_data_items;
  764. int i, ret;
  765. if (!buf_len || !period_len) {
  766. dev_err(chan2dev(chan), "Invalid buffer/period len\n");
  767. return NULL;
  768. }
  769. if (!chan->config_init) {
  770. dev_err(chan2dev(chan), "dma channel is not configured\n");
  771. return NULL;
  772. }
  773. if (buf_len % period_len) {
  774. dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
  775. return NULL;
  776. }
  777. /*
  778. * We allow to take more number of requests till DMA is
  779. * not started. The driver will loop over all requests.
  780. * Once DMA is started then new requests can be queued only after
  781. * terminating the DMA.
  782. */
  783. if (chan->busy) {
  784. dev_err(chan2dev(chan), "Request not allowed when dma busy\n");
  785. return NULL;
  786. }
  787. ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len);
  788. if (ret < 0)
  789. return NULL;
  790. nb_data_items = period_len / buswidth;
  791. if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
  792. dev_err(chan2dev(chan), "number of items not supported\n");
  793. return NULL;
  794. }
  795. /* Enable Circular mode or double buffer mode */
  796. if (buf_len == period_len)
  797. chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
  798. else
  799. chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
  800. /* Clear periph ctrl if client set it */
  801. chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
  802. num_periods = buf_len / period_len;
  803. desc = stm32_dma_alloc_desc(num_periods);
  804. if (!desc)
  805. return NULL;
  806. for (i = 0; i < num_periods; i++) {
  807. desc->sg_req[i].len = period_len;
  808. stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
  809. desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
  810. desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
  811. desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
  812. desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr;
  813. desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr;
  814. desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
  815. buf_addr += period_len;
  816. }
  817. desc->num_sgs = num_periods;
  818. desc->cyclic = true;
  819. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  820. }
  821. static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
  822. struct dma_chan *c, dma_addr_t dest,
  823. dma_addr_t src, size_t len, unsigned long flags)
  824. {
  825. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  826. enum dma_slave_buswidth max_width;
  827. struct stm32_dma_desc *desc;
  828. size_t xfer_count, offset;
  829. u32 num_sgs, best_burst, dma_burst, threshold;
  830. int i;
  831. num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
  832. desc = stm32_dma_alloc_desc(num_sgs);
  833. if (!desc)
  834. return NULL;
  835. threshold = chan->threshold;
  836. for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) {
  837. xfer_count = min_t(size_t, len - offset,
  838. STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
  839. /* Compute best burst size */
  840. max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  841. best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
  842. threshold, max_width);
  843. dma_burst = stm32_dma_get_burst(chan, best_burst);
  844. stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
  845. desc->sg_req[i].chan_reg.dma_scr =
  846. STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) |
  847. STM32_DMA_SCR_PBURST(dma_burst) |
  848. STM32_DMA_SCR_MBURST(dma_burst) |
  849. STM32_DMA_SCR_MINC |
  850. STM32_DMA_SCR_PINC |
  851. STM32_DMA_SCR_TCIE |
  852. STM32_DMA_SCR_TEIE;
  853. desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
  854. desc->sg_req[i].chan_reg.dma_sfcr |=
  855. STM32_DMA_SFCR_FTH(threshold);
  856. desc->sg_req[i].chan_reg.dma_spar = src + offset;
  857. desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset;
  858. desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
  859. desc->sg_req[i].len = xfer_count;
  860. }
  861. desc->num_sgs = num_sgs;
  862. desc->cyclic = false;
  863. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  864. }
  865. static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
  866. {
  867. u32 dma_scr, width, ndtr;
  868. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  869. dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
  870. width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
  871. ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
  872. return ndtr << width;
  873. }
  874. static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
  875. struct stm32_dma_desc *desc,
  876. u32 next_sg)
  877. {
  878. u32 modulo, burst_size;
  879. u32 residue = 0;
  880. int i;
  881. /*
  882. * In cyclic mode, for the last period, residue = remaining bytes from
  883. * NDTR
  884. */
  885. if (chan->desc->cyclic && next_sg == 0) {
  886. residue = stm32_dma_get_remaining_bytes(chan);
  887. goto end;
  888. }
  889. /*
  890. * For all other periods in cyclic mode, and in sg mode,
  891. * residue = remaining bytes from NDTR + remaining periods/sg to be
  892. * transferred
  893. */
  894. for (i = next_sg; i < desc->num_sgs; i++)
  895. residue += desc->sg_req[i].len;
  896. residue += stm32_dma_get_remaining_bytes(chan);
  897. end:
  898. if (!chan->mem_burst)
  899. return residue;
  900. burst_size = chan->mem_burst * chan->mem_width;
  901. modulo = residue % burst_size;
  902. if (modulo)
  903. residue = residue - modulo + burst_size;
  904. return residue;
  905. }
  906. static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
  907. dma_cookie_t cookie,
  908. struct dma_tx_state *state)
  909. {
  910. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  911. struct virt_dma_desc *vdesc;
  912. enum dma_status status;
  913. unsigned long flags;
  914. u32 residue = 0;
  915. status = dma_cookie_status(c, cookie, state);
  916. if (status == DMA_COMPLETE || !state)
  917. return status;
  918. spin_lock_irqsave(&chan->vchan.lock, flags);
  919. vdesc = vchan_find_desc(&chan->vchan, cookie);
  920. if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
  921. residue = stm32_dma_desc_residue(chan, chan->desc,
  922. chan->next_sg);
  923. else if (vdesc)
  924. residue = stm32_dma_desc_residue(chan,
  925. to_stm32_dma_desc(vdesc), 0);
  926. dma_set_residue(state, residue);
  927. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  928. return status;
  929. }
  930. static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
  931. {
  932. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  933. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  934. int ret;
  935. chan->config_init = false;
  936. ret = clk_prepare_enable(dmadev->clk);
  937. if (ret < 0) {
  938. dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret);
  939. return ret;
  940. }
  941. ret = stm32_dma_disable_chan(chan);
  942. if (ret < 0)
  943. clk_disable_unprepare(dmadev->clk);
  944. return ret;
  945. }
  946. static void stm32_dma_free_chan_resources(struct dma_chan *c)
  947. {
  948. struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
  949. struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
  950. unsigned long flags;
  951. dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
  952. if (chan->busy) {
  953. spin_lock_irqsave(&chan->vchan.lock, flags);
  954. stm32_dma_stop(chan);
  955. chan->desc = NULL;
  956. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  957. }
  958. clk_disable_unprepare(dmadev->clk);
  959. vchan_free_chan_resources(to_virt_chan(c));
  960. }
  961. static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
  962. {
  963. kfree(container_of(vdesc, struct stm32_dma_desc, vdesc));
  964. }
  965. static void stm32_dma_set_config(struct stm32_dma_chan *chan,
  966. struct stm32_dma_cfg *cfg)
  967. {
  968. stm32_dma_clear_reg(&chan->chan_reg);
  969. chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK;
  970. chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line);
  971. /* Enable Interrupts */
  972. chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
  973. chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
  974. }
  975. static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
  976. struct of_dma *ofdma)
  977. {
  978. struct stm32_dma_device *dmadev = ofdma->of_dma_data;
  979. struct device *dev = dmadev->ddev.dev;
  980. struct stm32_dma_cfg cfg;
  981. struct stm32_dma_chan *chan;
  982. struct dma_chan *c;
  983. if (dma_spec->args_count < 4) {
  984. dev_err(dev, "Bad number of cells\n");
  985. return NULL;
  986. }
  987. cfg.channel_id = dma_spec->args[0];
  988. cfg.request_line = dma_spec->args[1];
  989. cfg.stream_config = dma_spec->args[2];
  990. cfg.features = dma_spec->args[3];
  991. if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS ||
  992. cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) {
  993. dev_err(dev, "Bad channel and/or request id\n");
  994. return NULL;
  995. }
  996. chan = &dmadev->chan[cfg.channel_id];
  997. c = dma_get_slave_channel(&chan->vchan.chan);
  998. if (!c) {
  999. dev_err(dev, "No more channels available\n");
  1000. return NULL;
  1001. }
  1002. stm32_dma_set_config(chan, &cfg);
  1003. return c;
  1004. }
  1005. static const struct of_device_id stm32_dma_of_match[] = {
  1006. { .compatible = "st,stm32-dma", },
  1007. { /* sentinel */ },
  1008. };
  1009. MODULE_DEVICE_TABLE(of, stm32_dma_of_match);
  1010. static int stm32_dma_probe(struct platform_device *pdev)
  1011. {
  1012. struct stm32_dma_chan *chan;
  1013. struct stm32_dma_device *dmadev;
  1014. struct dma_device *dd;
  1015. const struct of_device_id *match;
  1016. struct resource *res;
  1017. int i, ret;
  1018. match = of_match_device(stm32_dma_of_match, &pdev->dev);
  1019. if (!match) {
  1020. dev_err(&pdev->dev, "Error: No device match found\n");
  1021. return -ENODEV;
  1022. }
  1023. dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
  1024. if (!dmadev)
  1025. return -ENOMEM;
  1026. dd = &dmadev->ddev;
  1027. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1028. dmadev->base = devm_ioremap_resource(&pdev->dev, res);
  1029. if (IS_ERR(dmadev->base))
  1030. return PTR_ERR(dmadev->base);
  1031. dmadev->clk = devm_clk_get(&pdev->dev, NULL);
  1032. if (IS_ERR(dmadev->clk)) {
  1033. dev_err(&pdev->dev, "Error: Missing controller clock\n");
  1034. return PTR_ERR(dmadev->clk);
  1035. }
  1036. dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
  1037. "st,mem2mem");
  1038. dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
  1039. if (!IS_ERR(dmadev->rst)) {
  1040. reset_control_assert(dmadev->rst);
  1041. udelay(2);
  1042. reset_control_deassert(dmadev->rst);
  1043. }
  1044. dma_cap_set(DMA_SLAVE, dd->cap_mask);
  1045. dma_cap_set(DMA_PRIVATE, dd->cap_mask);
  1046. dma_cap_set(DMA_CYCLIC, dd->cap_mask);
  1047. dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources;
  1048. dd->device_free_chan_resources = stm32_dma_free_chan_resources;
  1049. dd->device_tx_status = stm32_dma_tx_status;
  1050. dd->device_issue_pending = stm32_dma_issue_pending;
  1051. dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
  1052. dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
  1053. dd->device_config = stm32_dma_slave_config;
  1054. dd->device_terminate_all = stm32_dma_terminate_all;
  1055. dd->device_synchronize = stm32_dma_synchronize;
  1056. dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  1057. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  1058. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  1059. dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  1060. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  1061. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  1062. dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  1063. dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1064. dd->max_burst = STM32_DMA_MAX_BURST;
  1065. dd->dev = &pdev->dev;
  1066. INIT_LIST_HEAD(&dd->channels);
  1067. if (dmadev->mem2mem) {
  1068. dma_cap_set(DMA_MEMCPY, dd->cap_mask);
  1069. dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy;
  1070. dd->directions |= BIT(DMA_MEM_TO_MEM);
  1071. }
  1072. for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
  1073. chan = &dmadev->chan[i];
  1074. chan->id = i;
  1075. chan->vchan.desc_free = stm32_dma_desc_free;
  1076. vchan_init(&chan->vchan, dd);
  1077. }
  1078. ret = dma_async_device_register(dd);
  1079. if (ret)
  1080. return ret;
  1081. for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
  1082. chan = &dmadev->chan[i];
  1083. res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
  1084. if (!res) {
  1085. ret = -EINVAL;
  1086. dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
  1087. goto err_unregister;
  1088. }
  1089. chan->irq = res->start;
  1090. ret = devm_request_irq(&pdev->dev, chan->irq,
  1091. stm32_dma_chan_irq, 0,
  1092. dev_name(chan2dev(chan)), chan);
  1093. if (ret) {
  1094. dev_err(&pdev->dev,
  1095. "request_irq failed with err %d channel %d\n",
  1096. ret, i);
  1097. goto err_unregister;
  1098. }
  1099. }
  1100. ret = of_dma_controller_register(pdev->dev.of_node,
  1101. stm32_dma_of_xlate, dmadev);
  1102. if (ret < 0) {
  1103. dev_err(&pdev->dev,
  1104. "STM32 DMA DMA OF registration failed %d\n", ret);
  1105. goto err_unregister;
  1106. }
  1107. platform_set_drvdata(pdev, dmadev);
  1108. dev_info(&pdev->dev, "STM32 DMA driver registered\n");
  1109. return 0;
  1110. err_unregister:
  1111. dma_async_device_unregister(dd);
  1112. return ret;
  1113. }
  1114. static struct platform_driver stm32_dma_driver = {
  1115. .driver = {
  1116. .name = "stm32-dma",
  1117. .of_match_table = stm32_dma_of_match,
  1118. },
  1119. };
  1120. static int __init stm32_dma_init(void)
  1121. {
  1122. return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe);
  1123. }
  1124. subsys_initcall(stm32_dma_init);