sprd-dma.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314
  1. /*
  2. * Copyright (C) 2017 Spreadtrum Communications Inc.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/dma/sprd-dma.h>
  9. #include <linux/errno.h>
  10. #include <linux/init.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/of_dma.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/slab.h>
  20. #include "virt-dma.h"
  21. #define SPRD_DMA_CHN_REG_OFFSET 0x1000
  22. #define SPRD_DMA_CHN_REG_LENGTH 0x40
  23. #define SPRD_DMA_MEMCPY_MIN_SIZE 64
  24. /* DMA global registers definition */
  25. #define SPRD_DMA_GLB_PAUSE 0x0
  26. #define SPRD_DMA_GLB_FRAG_WAIT 0x4
  27. #define SPRD_DMA_GLB_REQ_PEND0_EN 0x8
  28. #define SPRD_DMA_GLB_REQ_PEND1_EN 0xc
  29. #define SPRD_DMA_GLB_INT_RAW_STS 0x10
  30. #define SPRD_DMA_GLB_INT_MSK_STS 0x14
  31. #define SPRD_DMA_GLB_REQ_STS 0x18
  32. #define SPRD_DMA_GLB_CHN_EN_STS 0x1c
  33. #define SPRD_DMA_GLB_DEBUG_STS 0x20
  34. #define SPRD_DMA_GLB_ARB_SEL_STS 0x24
  35. #define SPRD_DMA_GLB_2STAGE_GRP1 0x28
  36. #define SPRD_DMA_GLB_2STAGE_GRP2 0x2c
  37. #define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1))
  38. #define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000
  39. /* DMA channel registers definition */
  40. #define SPRD_DMA_CHN_PAUSE 0x0
  41. #define SPRD_DMA_CHN_REQ 0x4
  42. #define SPRD_DMA_CHN_CFG 0x8
  43. #define SPRD_DMA_CHN_INTC 0xc
  44. #define SPRD_DMA_CHN_SRC_ADDR 0x10
  45. #define SPRD_DMA_CHN_DES_ADDR 0x14
  46. #define SPRD_DMA_CHN_FRG_LEN 0x18
  47. #define SPRD_DMA_CHN_BLK_LEN 0x1c
  48. #define SPRD_DMA_CHN_TRSC_LEN 0x20
  49. #define SPRD_DMA_CHN_TRSF_STEP 0x24
  50. #define SPRD_DMA_CHN_WARP_PTR 0x28
  51. #define SPRD_DMA_CHN_WARP_TO 0x2c
  52. #define SPRD_DMA_CHN_LLIST_PTR 0x30
  53. #define SPRD_DMA_CHN_FRAG_STEP 0x34
  54. #define SPRD_DMA_CHN_SRC_BLK_STEP 0x38
  55. #define SPRD_DMA_CHN_DES_BLK_STEP 0x3c
  56. /* SPRD_DMA_GLB_2STAGE_GRP register definition */
  57. #define SPRD_DMA_GLB_2STAGE_EN BIT(24)
  58. #define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20)
  59. #define SPRD_DMA_GLB_DEST_INT BIT(22)
  60. #define SPRD_DMA_GLB_SRC_INT BIT(20)
  61. #define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19)
  62. #define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18)
  63. #define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17)
  64. #define SPRD_DMA_GLB_FRAG_DONE_TRG BIT(16)
  65. #define SPRD_DMA_GLB_TRG_OFFSET 16
  66. #define SPRD_DMA_GLB_DEST_CHN_MASK GENMASK(13, 8)
  67. #define SPRD_DMA_GLB_DEST_CHN_OFFSET 8
  68. #define SPRD_DMA_GLB_SRC_CHN_MASK GENMASK(5, 0)
  69. /* SPRD_DMA_CHN_INTC register definition */
  70. #define SPRD_DMA_INT_MASK GENMASK(4, 0)
  71. #define SPRD_DMA_INT_CLR_OFFSET 24
  72. #define SPRD_DMA_FRAG_INT_EN BIT(0)
  73. #define SPRD_DMA_BLK_INT_EN BIT(1)
  74. #define SPRD_DMA_TRANS_INT_EN BIT(2)
  75. #define SPRD_DMA_LIST_INT_EN BIT(3)
  76. #define SPRD_DMA_CFG_ERR_INT_EN BIT(4)
  77. /* SPRD_DMA_CHN_CFG register definition */
  78. #define SPRD_DMA_CHN_EN BIT(0)
  79. #define SPRD_DMA_LINKLIST_EN BIT(4)
  80. #define SPRD_DMA_WAIT_BDONE_OFFSET 24
  81. #define SPRD_DMA_DONOT_WAIT_BDONE 1
  82. /* SPRD_DMA_CHN_REQ register definition */
  83. #define SPRD_DMA_REQ_EN BIT(0)
  84. /* SPRD_DMA_CHN_PAUSE register definition */
  85. #define SPRD_DMA_PAUSE_EN BIT(0)
  86. #define SPRD_DMA_PAUSE_STS BIT(2)
  87. #define SPRD_DMA_PAUSE_CNT 0x2000
  88. /* DMA_CHN_WARP_* register definition */
  89. #define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
  90. #define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
  91. #define SPRD_DMA_WRAP_ADDR_MASK GENMASK(27, 0)
  92. #define SPRD_DMA_HIGH_ADDR_OFFSET 4
  93. /* SPRD_DMA_CHN_INTC register definition */
  94. #define SPRD_DMA_FRAG_INT_STS BIT(16)
  95. #define SPRD_DMA_BLK_INT_STS BIT(17)
  96. #define SPRD_DMA_TRSC_INT_STS BIT(18)
  97. #define SPRD_DMA_LIST_INT_STS BIT(19)
  98. #define SPRD_DMA_CFGERR_INT_STS BIT(20)
  99. #define SPRD_DMA_CHN_INT_STS \
  100. (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \
  101. SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \
  102. SPRD_DMA_CFGERR_INT_STS)
  103. /* SPRD_DMA_CHN_FRG_LEN register definition */
  104. #define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30
  105. #define SPRD_DMA_DES_DATAWIDTH_OFFSET 28
  106. #define SPRD_DMA_SWT_MODE_OFFSET 26
  107. #define SPRD_DMA_REQ_MODE_OFFSET 24
  108. #define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
  109. #define SPRD_DMA_WRAP_SEL_DEST BIT(23)
  110. #define SPRD_DMA_WRAP_EN BIT(22)
  111. #define SPRD_DMA_FIX_SEL_OFFSET 21
  112. #define SPRD_DMA_FIX_EN_OFFSET 20
  113. #define SPRD_DMA_LLIST_END BIT(19)
  114. #define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
  115. /* SPRD_DMA_CHN_BLK_LEN register definition */
  116. #define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0)
  117. /* SPRD_DMA_CHN_TRSC_LEN register definition */
  118. #define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0)
  119. /* SPRD_DMA_CHN_TRSF_STEP register definition */
  120. #define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16
  121. #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
  122. #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
  123. /* SPRD DMA_SRC_BLK_STEP register definition */
  124. #define SPRD_DMA_LLIST_HIGH_MASK GENMASK(31, 28)
  125. #define SPRD_DMA_LLIST_HIGH_SHIFT 28
  126. /* define DMA channel mode & trigger mode mask */
  127. #define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0)
  128. #define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0)
  129. #define SPRD_DMA_INT_TYPE_MASK GENMASK(7, 0)
  130. /* define the DMA transfer step type */
  131. #define SPRD_DMA_NONE_STEP 0
  132. #define SPRD_DMA_BYTE_STEP 1
  133. #define SPRD_DMA_SHORT_STEP 2
  134. #define SPRD_DMA_WORD_STEP 4
  135. #define SPRD_DMA_DWORD_STEP 8
  136. #define SPRD_DMA_SOFTWARE_UID 0
  137. /* dma data width values */
  138. enum sprd_dma_datawidth {
  139. SPRD_DMA_DATAWIDTH_1_BYTE,
  140. SPRD_DMA_DATAWIDTH_2_BYTES,
  141. SPRD_DMA_DATAWIDTH_4_BYTES,
  142. SPRD_DMA_DATAWIDTH_8_BYTES,
  143. };
  144. /* dma channel hardware configuration */
  145. struct sprd_dma_chn_hw {
  146. u32 pause;
  147. u32 req;
  148. u32 cfg;
  149. u32 intc;
  150. u32 src_addr;
  151. u32 des_addr;
  152. u32 frg_len;
  153. u32 blk_len;
  154. u32 trsc_len;
  155. u32 trsf_step;
  156. u32 wrap_ptr;
  157. u32 wrap_to;
  158. u32 llist_ptr;
  159. u32 frg_step;
  160. u32 src_blk_step;
  161. u32 des_blk_step;
  162. };
  163. /* dma request description */
  164. struct sprd_dma_desc {
  165. struct virt_dma_desc vd;
  166. struct sprd_dma_chn_hw chn_hw;
  167. enum dma_transfer_direction dir;
  168. };
  169. /* dma channel description */
  170. struct sprd_dma_chn {
  171. struct virt_dma_chan vc;
  172. void __iomem *chn_base;
  173. struct sprd_dma_linklist linklist;
  174. struct dma_slave_config slave_cfg;
  175. u32 chn_num;
  176. u32 dev_id;
  177. enum sprd_dma_chn_mode chn_mode;
  178. enum sprd_dma_trg_mode trg_mode;
  179. enum sprd_dma_int_type int_type;
  180. struct sprd_dma_desc *cur_desc;
  181. };
  182. /* SPRD dma device */
  183. struct sprd_dma_dev {
  184. struct dma_device dma_dev;
  185. void __iomem *glb_base;
  186. struct clk *clk;
  187. struct clk *ashb_clk;
  188. int irq;
  189. u32 total_chns;
  190. struct sprd_dma_chn channels[] __counted_by(total_chns);
  191. };
  192. static void sprd_dma_free_desc(struct virt_dma_desc *vd);
  193. static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
  194. static struct of_dma_filter_info sprd_dma_info = {
  195. .filter_fn = sprd_dma_filter_fn,
  196. };
  197. static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
  198. {
  199. return container_of(c, struct sprd_dma_chn, vc.chan);
  200. }
  201. static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
  202. {
  203. struct sprd_dma_chn *schan = to_sprd_dma_chan(c);
  204. return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]);
  205. }
  206. static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd)
  207. {
  208. return container_of(vd, struct sprd_dma_desc, vd);
  209. }
  210. static void sprd_dma_glb_update(struct sprd_dma_dev *sdev, u32 reg,
  211. u32 mask, u32 val)
  212. {
  213. u32 orig = readl(sdev->glb_base + reg);
  214. u32 tmp;
  215. tmp = (orig & ~mask) | val;
  216. writel(tmp, sdev->glb_base + reg);
  217. }
  218. static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg,
  219. u32 mask, u32 val)
  220. {
  221. u32 orig = readl(schan->chn_base + reg);
  222. u32 tmp;
  223. tmp = (orig & ~mask) | val;
  224. writel(tmp, schan->chn_base + reg);
  225. }
  226. static int sprd_dma_enable(struct sprd_dma_dev *sdev)
  227. {
  228. int ret;
  229. ret = clk_prepare_enable(sdev->clk);
  230. if (ret)
  231. return ret;
  232. /*
  233. * The ashb_clk is optional and only for AGCP DMA controller, so we
  234. * need add one condition to check if the ashb_clk need enable.
  235. */
  236. if (!IS_ERR(sdev->ashb_clk))
  237. ret = clk_prepare_enable(sdev->ashb_clk);
  238. return ret;
  239. }
  240. static void sprd_dma_disable(struct sprd_dma_dev *sdev)
  241. {
  242. clk_disable_unprepare(sdev->clk);
  243. /*
  244. * Need to check if we need disable the optional ashb_clk for AGCP DMA.
  245. */
  246. if (!IS_ERR(sdev->ashb_clk))
  247. clk_disable_unprepare(sdev->ashb_clk);
  248. }
  249. static void sprd_dma_set_uid(struct sprd_dma_chn *schan)
  250. {
  251. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  252. u32 dev_id = schan->dev_id;
  253. if (dev_id != SPRD_DMA_SOFTWARE_UID) {
  254. u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
  255. SPRD_DMA_GLB_REQ_UID(dev_id);
  256. writel(schan->chn_num + 1, sdev->glb_base + uid_offset);
  257. }
  258. }
  259. static void sprd_dma_unset_uid(struct sprd_dma_chn *schan)
  260. {
  261. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  262. u32 dev_id = schan->dev_id;
  263. if (dev_id != SPRD_DMA_SOFTWARE_UID) {
  264. u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
  265. SPRD_DMA_GLB_REQ_UID(dev_id);
  266. writel(0, sdev->glb_base + uid_offset);
  267. }
  268. }
  269. static void sprd_dma_clear_int(struct sprd_dma_chn *schan)
  270. {
  271. sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC,
  272. SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET,
  273. SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET);
  274. }
  275. static void sprd_dma_enable_chn(struct sprd_dma_chn *schan)
  276. {
  277. sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN,
  278. SPRD_DMA_CHN_EN);
  279. }
  280. static void sprd_dma_disable_chn(struct sprd_dma_chn *schan)
  281. {
  282. sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0);
  283. }
  284. static void sprd_dma_soft_request(struct sprd_dma_chn *schan)
  285. {
  286. sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN,
  287. SPRD_DMA_REQ_EN);
  288. }
  289. static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable)
  290. {
  291. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  292. u32 pause, timeout = SPRD_DMA_PAUSE_CNT;
  293. if (enable) {
  294. sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
  295. SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN);
  296. do {
  297. pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE);
  298. if (pause & SPRD_DMA_PAUSE_STS)
  299. break;
  300. cpu_relax();
  301. } while (--timeout > 0);
  302. if (!timeout)
  303. dev_warn(sdev->dma_dev.dev,
  304. "pause dma controller timeout\n");
  305. } else {
  306. sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
  307. SPRD_DMA_PAUSE_EN, 0);
  308. }
  309. }
  310. static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan)
  311. {
  312. u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG);
  313. if (!(cfg & SPRD_DMA_CHN_EN))
  314. return;
  315. sprd_dma_pause_resume(schan, true);
  316. sprd_dma_disable_chn(schan);
  317. }
  318. static unsigned long sprd_dma_get_src_addr(struct sprd_dma_chn *schan)
  319. {
  320. unsigned long addr, addr_high;
  321. addr = readl(schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
  322. addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_PTR) &
  323. SPRD_DMA_HIGH_ADDR_MASK;
  324. return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
  325. }
  326. static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan)
  327. {
  328. unsigned long addr, addr_high;
  329. addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
  330. addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) &
  331. SPRD_DMA_HIGH_ADDR_MASK;
  332. return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
  333. }
  334. static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan)
  335. {
  336. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  337. u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) &
  338. SPRD_DMA_CHN_INT_STS;
  339. switch (intc_sts) {
  340. case SPRD_DMA_CFGERR_INT_STS:
  341. return SPRD_DMA_CFGERR_INT;
  342. case SPRD_DMA_LIST_INT_STS:
  343. return SPRD_DMA_LIST_INT;
  344. case SPRD_DMA_TRSC_INT_STS:
  345. return SPRD_DMA_TRANS_INT;
  346. case SPRD_DMA_BLK_INT_STS:
  347. return SPRD_DMA_BLK_INT;
  348. case SPRD_DMA_FRAG_INT_STS:
  349. return SPRD_DMA_FRAG_INT;
  350. default:
  351. dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n");
  352. return SPRD_DMA_NO_INT;
  353. }
  354. }
  355. static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan)
  356. {
  357. u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
  358. return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK;
  359. }
  360. static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
  361. {
  362. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  363. u32 val, chn = schan->chn_num + 1;
  364. switch (schan->chn_mode) {
  365. case SPRD_DMA_SRC_CHN0:
  366. val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
  367. val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
  368. val |= SPRD_DMA_GLB_2STAGE_EN;
  369. if (schan->int_type != SPRD_DMA_NO_INT)
  370. val |= SPRD_DMA_GLB_SRC_INT;
  371. sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
  372. break;
  373. case SPRD_DMA_SRC_CHN1:
  374. val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
  375. val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
  376. val |= SPRD_DMA_GLB_2STAGE_EN;
  377. if (schan->int_type != SPRD_DMA_NO_INT)
  378. val |= SPRD_DMA_GLB_SRC_INT;
  379. sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
  380. break;
  381. case SPRD_DMA_DST_CHN0:
  382. val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
  383. SPRD_DMA_GLB_DEST_CHN_MASK;
  384. val |= SPRD_DMA_GLB_2STAGE_EN;
  385. if (schan->int_type != SPRD_DMA_NO_INT)
  386. val |= SPRD_DMA_GLB_DEST_INT;
  387. sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
  388. break;
  389. case SPRD_DMA_DST_CHN1:
  390. val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
  391. SPRD_DMA_GLB_DEST_CHN_MASK;
  392. val |= SPRD_DMA_GLB_2STAGE_EN;
  393. if (schan->int_type != SPRD_DMA_NO_INT)
  394. val |= SPRD_DMA_GLB_DEST_INT;
  395. sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
  396. break;
  397. default:
  398. dev_err(sdev->dma_dev.dev, "invalid channel mode setting %d\n",
  399. schan->chn_mode);
  400. return -EINVAL;
  401. }
  402. return 0;
  403. }
  404. static void sprd_dma_set_pending(struct sprd_dma_chn *schan, bool enable)
  405. {
  406. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  407. u32 reg, val, req_id;
  408. if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
  409. return;
  410. /* The DMA request id always starts from 0. */
  411. req_id = schan->dev_id - 1;
  412. if (req_id < 32) {
  413. reg = SPRD_DMA_GLB_REQ_PEND0_EN;
  414. val = BIT(req_id);
  415. } else {
  416. reg = SPRD_DMA_GLB_REQ_PEND1_EN;
  417. val = BIT(req_id - 32);
  418. }
  419. sprd_dma_glb_update(sdev, reg, val, enable ? val : 0);
  420. }
  421. static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
  422. struct sprd_dma_desc *sdesc)
  423. {
  424. struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw;
  425. writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE);
  426. writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG);
  427. writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC);
  428. writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
  429. writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
  430. writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
  431. writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN);
  432. writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN);
  433. writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP);
  434. writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR);
  435. writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO);
  436. writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR);
  437. writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP);
  438. writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP);
  439. writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP);
  440. writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
  441. }
  442. static void sprd_dma_start(struct sprd_dma_chn *schan)
  443. {
  444. struct virt_dma_desc *vd = vchan_next_desc(&schan->vc);
  445. if (!vd)
  446. return;
  447. list_del(&vd->node);
  448. schan->cur_desc = to_sprd_dma_desc(vd);
  449. /*
  450. * Set 2-stage configuration if the channel starts one 2-stage
  451. * transfer.
  452. */
  453. if (schan->chn_mode && sprd_dma_set_2stage_config(schan))
  454. return;
  455. /*
  456. * Copy the DMA configuration from DMA descriptor to this hardware
  457. * channel.
  458. */
  459. sprd_dma_set_chn_config(schan, schan->cur_desc);
  460. sprd_dma_set_uid(schan);
  461. sprd_dma_set_pending(schan, true);
  462. sprd_dma_enable_chn(schan);
  463. if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
  464. schan->chn_mode != SPRD_DMA_DST_CHN0 &&
  465. schan->chn_mode != SPRD_DMA_DST_CHN1)
  466. sprd_dma_soft_request(schan);
  467. }
  468. static void sprd_dma_stop(struct sprd_dma_chn *schan)
  469. {
  470. sprd_dma_stop_and_disable(schan);
  471. sprd_dma_set_pending(schan, false);
  472. sprd_dma_unset_uid(schan);
  473. sprd_dma_clear_int(schan);
  474. schan->cur_desc = NULL;
  475. }
  476. static bool sprd_dma_check_trans_done(enum sprd_dma_int_type int_type,
  477. enum sprd_dma_req_mode req_mode)
  478. {
  479. if (int_type == SPRD_DMA_NO_INT)
  480. return false;
  481. if (int_type >= req_mode + 1)
  482. return true;
  483. else
  484. return false;
  485. }
  486. static irqreturn_t dma_irq_handle(int irq, void *dev_id)
  487. {
  488. struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
  489. u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS);
  490. struct sprd_dma_chn *schan;
  491. struct sprd_dma_desc *sdesc;
  492. enum sprd_dma_req_mode req_type;
  493. enum sprd_dma_int_type int_type;
  494. bool trans_done = false, cyclic = false;
  495. u32 i;
  496. while (irq_status) {
  497. i = __ffs(irq_status);
  498. irq_status &= (irq_status - 1);
  499. schan = &sdev->channels[i];
  500. spin_lock(&schan->vc.lock);
  501. sdesc = schan->cur_desc;
  502. if (!sdesc) {
  503. spin_unlock(&schan->vc.lock);
  504. return IRQ_HANDLED;
  505. }
  506. int_type = sprd_dma_get_int_type(schan);
  507. req_type = sprd_dma_get_req_type(schan);
  508. sprd_dma_clear_int(schan);
  509. /* cyclic mode schedule callback */
  510. cyclic = schan->linklist.phy_addr ? true : false;
  511. if (cyclic == true) {
  512. vchan_cyclic_callback(&sdesc->vd);
  513. } else {
  514. /* Check if the dma request descriptor is done. */
  515. trans_done = sprd_dma_check_trans_done(int_type, req_type);
  516. if (trans_done == true) {
  517. vchan_cookie_complete(&sdesc->vd);
  518. schan->cur_desc = NULL;
  519. sprd_dma_start(schan);
  520. }
  521. }
  522. spin_unlock(&schan->vc.lock);
  523. }
  524. return IRQ_HANDLED;
  525. }
  526. static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
  527. {
  528. return pm_runtime_get_sync(chan->device->dev);
  529. }
  530. static void sprd_dma_free_chan_resources(struct dma_chan *chan)
  531. {
  532. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  533. struct virt_dma_desc *cur_vd = NULL;
  534. unsigned long flags;
  535. spin_lock_irqsave(&schan->vc.lock, flags);
  536. if (schan->cur_desc)
  537. cur_vd = &schan->cur_desc->vd;
  538. sprd_dma_stop(schan);
  539. spin_unlock_irqrestore(&schan->vc.lock, flags);
  540. if (cur_vd)
  541. sprd_dma_free_desc(cur_vd);
  542. vchan_free_chan_resources(&schan->vc);
  543. pm_runtime_put(chan->device->dev);
  544. }
  545. static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
  546. dma_cookie_t cookie,
  547. struct dma_tx_state *txstate)
  548. {
  549. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  550. struct virt_dma_desc *vd;
  551. unsigned long flags;
  552. enum dma_status ret;
  553. u32 pos;
  554. ret = dma_cookie_status(chan, cookie, txstate);
  555. if (ret == DMA_COMPLETE || !txstate)
  556. return ret;
  557. spin_lock_irqsave(&schan->vc.lock, flags);
  558. vd = vchan_find_desc(&schan->vc, cookie);
  559. if (vd) {
  560. struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
  561. struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
  562. if (hw->trsc_len > 0)
  563. pos = hw->trsc_len;
  564. else if (hw->blk_len > 0)
  565. pos = hw->blk_len;
  566. else if (hw->frg_len > 0)
  567. pos = hw->frg_len;
  568. else
  569. pos = 0;
  570. } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
  571. struct sprd_dma_desc *sdesc = schan->cur_desc;
  572. if (sdesc->dir == DMA_DEV_TO_MEM)
  573. pos = sprd_dma_get_dst_addr(schan);
  574. else
  575. pos = sprd_dma_get_src_addr(schan);
  576. } else {
  577. pos = 0;
  578. }
  579. spin_unlock_irqrestore(&schan->vc.lock, flags);
  580. dma_set_residue(txstate, pos);
  581. return ret;
  582. }
  583. static void sprd_dma_issue_pending(struct dma_chan *chan)
  584. {
  585. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  586. unsigned long flags;
  587. spin_lock_irqsave(&schan->vc.lock, flags);
  588. if (vchan_issue_pending(&schan->vc) && !schan->cur_desc)
  589. sprd_dma_start(schan);
  590. spin_unlock_irqrestore(&schan->vc.lock, flags);
  591. }
  592. static int sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
  593. {
  594. switch (buswidth) {
  595. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  596. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  597. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  598. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  599. return ffs(buswidth) - 1;
  600. default:
  601. return -EINVAL;
  602. }
  603. }
  604. static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
  605. {
  606. switch (buswidth) {
  607. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  608. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  609. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  610. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  611. return buswidth;
  612. default:
  613. return -EINVAL;
  614. }
  615. }
  616. static int sprd_dma_fill_desc(struct dma_chan *chan,
  617. struct sprd_dma_chn_hw *hw,
  618. unsigned int sglen, int sg_index,
  619. dma_addr_t src, dma_addr_t dst, u32 len,
  620. enum dma_transfer_direction dir,
  621. unsigned long flags,
  622. struct dma_slave_config *slave_cfg)
  623. {
  624. struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
  625. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  626. enum sprd_dma_chn_mode chn_mode = schan->chn_mode;
  627. u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
  628. u32 int_mode = flags & SPRD_DMA_INT_MASK;
  629. int src_datawidth, dst_datawidth, src_step, dst_step;
  630. u32 temp, fix_mode = 0, fix_en = 0;
  631. phys_addr_t llist_ptr;
  632. if (dir == DMA_MEM_TO_DEV) {
  633. src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
  634. if (src_step < 0) {
  635. dev_err(sdev->dma_dev.dev, "invalid source step\n");
  636. return src_step;
  637. }
  638. /*
  639. * For 2-stage transfer, destination channel step can not be 0,
  640. * since destination device is AON IRAM.
  641. */
  642. if (chn_mode == SPRD_DMA_DST_CHN0 ||
  643. chn_mode == SPRD_DMA_DST_CHN1)
  644. dst_step = src_step;
  645. else
  646. dst_step = SPRD_DMA_NONE_STEP;
  647. } else {
  648. dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width);
  649. if (dst_step < 0) {
  650. dev_err(sdev->dma_dev.dev, "invalid destination step\n");
  651. return dst_step;
  652. }
  653. src_step = SPRD_DMA_NONE_STEP;
  654. }
  655. src_datawidth = sprd_dma_get_datawidth(slave_cfg->src_addr_width);
  656. if (src_datawidth < 0) {
  657. dev_err(sdev->dma_dev.dev, "invalid source datawidth\n");
  658. return src_datawidth;
  659. }
  660. dst_datawidth = sprd_dma_get_datawidth(slave_cfg->dst_addr_width);
  661. if (dst_datawidth < 0) {
  662. dev_err(sdev->dma_dev.dev, "invalid destination datawidth\n");
  663. return dst_datawidth;
  664. }
  665. hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
  666. /*
  667. * wrap_ptr and wrap_to will save the high 4 bits source address and
  668. * destination address.
  669. */
  670. hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
  671. hw->wrap_to = (dst >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
  672. hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
  673. hw->des_addr = dst & SPRD_DMA_LOW_ADDR_MASK;
  674. /*
  675. * If the src step and dst step both are 0 or both are not 0, that means
  676. * we can not enable the fix mode. If one is 0 and another one is not,
  677. * we can enable the fix mode.
  678. */
  679. if ((src_step != 0 && dst_step != 0) || (src_step | dst_step) == 0) {
  680. fix_en = 0;
  681. } else {
  682. fix_en = 1;
  683. if (src_step)
  684. fix_mode = 1;
  685. else
  686. fix_mode = 0;
  687. }
  688. hw->intc = int_mode | SPRD_DMA_CFG_ERR_INT_EN;
  689. temp = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
  690. temp |= dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
  691. temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET;
  692. temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET;
  693. temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET;
  694. temp |= schan->linklist.wrap_addr ?
  695. SPRD_DMA_WRAP_EN | SPRD_DMA_WRAP_SEL_DEST : 0;
  696. temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
  697. hw->frg_len = temp;
  698. hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK;
  699. hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
  700. temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
  701. temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
  702. hw->trsf_step = temp;
  703. /* link-list configuration */
  704. if (schan->linklist.phy_addr) {
  705. hw->cfg |= SPRD_DMA_LINKLIST_EN;
  706. /* link-list index */
  707. temp = sglen ? (sg_index + 1) % sglen : 0;
  708. /* Next link-list configuration's physical address offset */
  709. temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR;
  710. /*
  711. * Set the link-list pointer point to next link-list
  712. * configuration's physical address.
  713. */
  714. llist_ptr = schan->linklist.phy_addr + temp;
  715. hw->llist_ptr = lower_32_bits(llist_ptr);
  716. hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) &
  717. SPRD_DMA_LLIST_HIGH_MASK;
  718. if (schan->linklist.wrap_addr) {
  719. hw->wrap_ptr |= schan->linklist.wrap_addr &
  720. SPRD_DMA_WRAP_ADDR_MASK;
  721. hw->wrap_to |= dst & SPRD_DMA_WRAP_ADDR_MASK;
  722. }
  723. } else {
  724. hw->llist_ptr = 0;
  725. hw->src_blk_step = 0;
  726. }
  727. hw->frg_step = 0;
  728. hw->des_blk_step = 0;
  729. return 0;
  730. }
  731. static int sprd_dma_fill_linklist_desc(struct dma_chan *chan,
  732. unsigned int sglen, int sg_index,
  733. dma_addr_t src, dma_addr_t dst, u32 len,
  734. enum dma_transfer_direction dir,
  735. unsigned long flags,
  736. struct dma_slave_config *slave_cfg)
  737. {
  738. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  739. struct sprd_dma_chn_hw *hw;
  740. if (!schan->linklist.virt_addr)
  741. return -EINVAL;
  742. hw = (struct sprd_dma_chn_hw *)(schan->linklist.virt_addr +
  743. sg_index * sizeof(*hw));
  744. return sprd_dma_fill_desc(chan, hw, sglen, sg_index, src, dst, len,
  745. dir, flags, slave_cfg);
  746. }
  747. static struct dma_async_tx_descriptor *
  748. sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  749. size_t len, unsigned long flags)
  750. {
  751. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  752. struct sprd_dma_desc *sdesc;
  753. struct sprd_dma_chn_hw *hw;
  754. enum sprd_dma_datawidth datawidth;
  755. u32 step, temp;
  756. sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
  757. if (!sdesc)
  758. return NULL;
  759. hw = &sdesc->chn_hw;
  760. hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
  761. hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
  762. hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
  763. hw->des_addr = dest & SPRD_DMA_LOW_ADDR_MASK;
  764. hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
  765. SPRD_DMA_HIGH_ADDR_MASK;
  766. hw->wrap_to = (dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
  767. SPRD_DMA_HIGH_ADDR_MASK;
  768. if (IS_ALIGNED(len, 8)) {
  769. datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
  770. step = SPRD_DMA_DWORD_STEP;
  771. } else if (IS_ALIGNED(len, 4)) {
  772. datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
  773. step = SPRD_DMA_WORD_STEP;
  774. } else if (IS_ALIGNED(len, 2)) {
  775. datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
  776. step = SPRD_DMA_SHORT_STEP;
  777. } else {
  778. datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
  779. step = SPRD_DMA_BYTE_STEP;
  780. }
  781. temp = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
  782. temp |= datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
  783. temp |= SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET;
  784. temp |= len & SPRD_DMA_FRG_LEN_MASK;
  785. hw->frg_len = temp;
  786. hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
  787. hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
  788. temp = (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
  789. temp |= (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
  790. hw->trsf_step = temp;
  791. return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
  792. }
  793. static struct dma_async_tx_descriptor *
  794. sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  795. unsigned int sglen, enum dma_transfer_direction dir,
  796. unsigned long flags, void *context)
  797. {
  798. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  799. struct dma_slave_config *slave_cfg = &schan->slave_cfg;
  800. dma_addr_t src = 0, dst = 0;
  801. dma_addr_t start_src = 0, start_dst = 0;
  802. struct sprd_dma_desc *sdesc;
  803. struct scatterlist *sg;
  804. u32 len = 0;
  805. int ret, i;
  806. if (!is_slave_direction(dir))
  807. return NULL;
  808. if (context) {
  809. struct sprd_dma_linklist *ll_cfg =
  810. (struct sprd_dma_linklist *)context;
  811. schan->linklist.phy_addr = ll_cfg->phy_addr;
  812. schan->linklist.virt_addr = ll_cfg->virt_addr;
  813. schan->linklist.wrap_addr = ll_cfg->wrap_addr;
  814. } else {
  815. schan->linklist.phy_addr = 0;
  816. schan->linklist.virt_addr = 0;
  817. schan->linklist.wrap_addr = 0;
  818. }
  819. /*
  820. * Set channel mode, interrupt mode and trigger mode for 2-stage
  821. * transfer.
  822. */
  823. schan->chn_mode =
  824. (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
  825. schan->trg_mode =
  826. (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
  827. schan->int_type = flags & SPRD_DMA_INT_TYPE_MASK;
  828. sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
  829. if (!sdesc)
  830. return NULL;
  831. sdesc->dir = dir;
  832. for_each_sg(sgl, sg, sglen, i) {
  833. len = sg_dma_len(sg);
  834. if (dir == DMA_MEM_TO_DEV) {
  835. src = sg_dma_address(sg);
  836. dst = slave_cfg->dst_addr;
  837. } else {
  838. src = slave_cfg->src_addr;
  839. dst = sg_dma_address(sg);
  840. }
  841. if (!i) {
  842. start_src = src;
  843. start_dst = dst;
  844. }
  845. /*
  846. * The link-list mode needs at least 2 link-list
  847. * configurations. If there is only one sg, it doesn't
  848. * need to fill the link-list configuration.
  849. */
  850. if (sglen < 2)
  851. break;
  852. ret = sprd_dma_fill_linklist_desc(chan, sglen, i, src, dst, len,
  853. dir, flags, slave_cfg);
  854. if (ret) {
  855. kfree(sdesc);
  856. return NULL;
  857. }
  858. }
  859. ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
  860. start_dst, len, dir, flags, slave_cfg);
  861. if (ret) {
  862. kfree(sdesc);
  863. return NULL;
  864. }
  865. return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
  866. }
  867. static int sprd_dma_slave_config(struct dma_chan *chan,
  868. struct dma_slave_config *config)
  869. {
  870. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  871. struct dma_slave_config *slave_cfg = &schan->slave_cfg;
  872. memcpy(slave_cfg, config, sizeof(*config));
  873. return 0;
  874. }
  875. static int sprd_dma_pause(struct dma_chan *chan)
  876. {
  877. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  878. unsigned long flags;
  879. spin_lock_irqsave(&schan->vc.lock, flags);
  880. sprd_dma_pause_resume(schan, true);
  881. spin_unlock_irqrestore(&schan->vc.lock, flags);
  882. return 0;
  883. }
  884. static int sprd_dma_resume(struct dma_chan *chan)
  885. {
  886. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  887. unsigned long flags;
  888. spin_lock_irqsave(&schan->vc.lock, flags);
  889. sprd_dma_pause_resume(schan, false);
  890. spin_unlock_irqrestore(&schan->vc.lock, flags);
  891. return 0;
  892. }
  893. static int sprd_dma_terminate_all(struct dma_chan *chan)
  894. {
  895. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  896. struct virt_dma_desc *cur_vd = NULL;
  897. unsigned long flags;
  898. LIST_HEAD(head);
  899. spin_lock_irqsave(&schan->vc.lock, flags);
  900. if (schan->cur_desc)
  901. cur_vd = &schan->cur_desc->vd;
  902. sprd_dma_stop(schan);
  903. vchan_get_all_descriptors(&schan->vc, &head);
  904. spin_unlock_irqrestore(&schan->vc.lock, flags);
  905. if (cur_vd)
  906. sprd_dma_free_desc(cur_vd);
  907. vchan_dma_desc_free_list(&schan->vc, &head);
  908. return 0;
  909. }
  910. static void sprd_dma_free_desc(struct virt_dma_desc *vd)
  911. {
  912. struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
  913. kfree(sdesc);
  914. }
  915. static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
  916. {
  917. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  918. u32 slave_id = *(u32 *)param;
  919. schan->dev_id = slave_id;
  920. return true;
  921. }
  922. static int sprd_dma_probe(struct platform_device *pdev)
  923. {
  924. struct device_node *np = pdev->dev.of_node;
  925. struct sprd_dma_dev *sdev;
  926. struct sprd_dma_chn *dma_chn;
  927. u32 chn_count;
  928. int ret, i;
  929. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
  930. if (ret) {
  931. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  932. if (ret) {
  933. dev_err(&pdev->dev, "unable to set coherent mask to 32\n");
  934. return ret;
  935. }
  936. }
  937. /* Parse new and deprecated dma-channels properties */
  938. ret = device_property_read_u32(&pdev->dev, "dma-channels", &chn_count);
  939. if (ret)
  940. ret = device_property_read_u32(&pdev->dev, "#dma-channels",
  941. &chn_count);
  942. if (ret) {
  943. dev_err(&pdev->dev, "get dma channels count failed\n");
  944. return ret;
  945. }
  946. sdev = devm_kzalloc(&pdev->dev,
  947. struct_size(sdev, channels, chn_count),
  948. GFP_KERNEL);
  949. if (!sdev)
  950. return -ENOMEM;
  951. sdev->clk = devm_clk_get(&pdev->dev, "enable");
  952. if (IS_ERR(sdev->clk)) {
  953. dev_err(&pdev->dev, "get enable clock failed\n");
  954. return PTR_ERR(sdev->clk);
  955. }
  956. /* ashb clock is optional for AGCP DMA */
  957. sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb");
  958. if (IS_ERR(sdev->ashb_clk))
  959. dev_warn(&pdev->dev, "no optional ashb eb clock\n");
  960. /*
  961. * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
  962. * DMA controller, it can or do not request the irq, which will save
  963. * system power without resuming system by DMA interrupts if AGCP DMA
  964. * does not request the irq. Thus the DMA interrupts property should
  965. * be optional.
  966. */
  967. sdev->irq = platform_get_irq(pdev, 0);
  968. if (sdev->irq > 0) {
  969. ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle,
  970. 0, "sprd_dma", (void *)sdev);
  971. if (ret < 0) {
  972. dev_err(&pdev->dev, "request dma irq failed\n");
  973. return ret;
  974. }
  975. } else {
  976. dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
  977. }
  978. sdev->glb_base = devm_platform_ioremap_resource(pdev, 0);
  979. if (IS_ERR(sdev->glb_base))
  980. return PTR_ERR(sdev->glb_base);
  981. dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
  982. sdev->total_chns = chn_count;
  983. INIT_LIST_HEAD(&sdev->dma_dev.channels);
  984. INIT_LIST_HEAD(&sdev->dma_dev.global_node);
  985. sdev->dma_dev.dev = &pdev->dev;
  986. sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
  987. sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
  988. sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
  989. sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
  990. sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
  991. sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
  992. sdev->dma_dev.device_config = sprd_dma_slave_config;
  993. sdev->dma_dev.device_pause = sprd_dma_pause;
  994. sdev->dma_dev.device_resume = sprd_dma_resume;
  995. sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
  996. for (i = 0; i < chn_count; i++) {
  997. dma_chn = &sdev->channels[i];
  998. dma_chn->chn_num = i;
  999. dma_chn->cur_desc = NULL;
  1000. /* get each channel's registers base address. */
  1001. dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET +
  1002. SPRD_DMA_CHN_REG_LENGTH * i;
  1003. dma_chn->vc.desc_free = sprd_dma_free_desc;
  1004. vchan_init(&dma_chn->vc, &sdev->dma_dev);
  1005. }
  1006. platform_set_drvdata(pdev, sdev);
  1007. ret = sprd_dma_enable(sdev);
  1008. if (ret)
  1009. return ret;
  1010. pm_runtime_set_active(&pdev->dev);
  1011. pm_runtime_enable(&pdev->dev);
  1012. ret = pm_runtime_get_sync(&pdev->dev);
  1013. if (ret < 0)
  1014. goto err_rpm;
  1015. ret = dma_async_device_register(&sdev->dma_dev);
  1016. if (ret < 0) {
  1017. dev_err(&pdev->dev, "register dma device failed:%d\n", ret);
  1018. goto err_register;
  1019. }
  1020. sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
  1021. ret = of_dma_controller_register(np, of_dma_simple_xlate,
  1022. &sprd_dma_info);
  1023. if (ret)
  1024. goto err_of_register;
  1025. pm_runtime_put(&pdev->dev);
  1026. return 0;
  1027. err_of_register:
  1028. dma_async_device_unregister(&sdev->dma_dev);
  1029. err_register:
  1030. pm_runtime_put_noidle(&pdev->dev);
  1031. pm_runtime_disable(&pdev->dev);
  1032. err_rpm:
  1033. sprd_dma_disable(sdev);
  1034. return ret;
  1035. }
  1036. static void sprd_dma_remove(struct platform_device *pdev)
  1037. {
  1038. struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
  1039. struct sprd_dma_chn *c, *cn;
  1040. pm_runtime_get_sync(&pdev->dev);
  1041. /* explicitly free the irq */
  1042. if (sdev->irq > 0)
  1043. devm_free_irq(&pdev->dev, sdev->irq, sdev);
  1044. list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
  1045. vc.chan.device_node) {
  1046. list_del(&c->vc.chan.device_node);
  1047. tasklet_kill(&c->vc.task);
  1048. }
  1049. of_dma_controller_free(pdev->dev.of_node);
  1050. dma_async_device_unregister(&sdev->dma_dev);
  1051. sprd_dma_disable(sdev);
  1052. pm_runtime_put_noidle(&pdev->dev);
  1053. pm_runtime_disable(&pdev->dev);
  1054. }
  1055. static const struct of_device_id sprd_dma_match[] = {
  1056. { .compatible = "sprd,sc9860-dma", },
  1057. {},
  1058. };
  1059. MODULE_DEVICE_TABLE(of, sprd_dma_match);
  1060. static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
  1061. {
  1062. struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
  1063. sprd_dma_disable(sdev);
  1064. return 0;
  1065. }
  1066. static int __maybe_unused sprd_dma_runtime_resume(struct device *dev)
  1067. {
  1068. struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
  1069. int ret;
  1070. ret = sprd_dma_enable(sdev);
  1071. if (ret)
  1072. dev_err(sdev->dma_dev.dev, "enable dma failed\n");
  1073. return ret;
  1074. }
  1075. static const struct dev_pm_ops sprd_dma_pm_ops = {
  1076. SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend,
  1077. sprd_dma_runtime_resume,
  1078. NULL)
  1079. };
  1080. static struct platform_driver sprd_dma_driver = {
  1081. .probe = sprd_dma_probe,
  1082. .remove_new = sprd_dma_remove,
  1083. .driver = {
  1084. .name = "sprd-dma",
  1085. .of_match_table = sprd_dma_match,
  1086. .pm = &sprd_dma_pm_ops,
  1087. },
  1088. };
  1089. module_platform_driver(sprd_dma_driver);
  1090. MODULE_LICENSE("GPL v2");
  1091. MODULE_DESCRIPTION("DMA driver for Spreadtrum");
  1092. MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
  1093. MODULE_AUTHOR("Eric Long <eric.long@spreadtrum.com>");
  1094. MODULE_ALIAS("platform:sprd-dma");