img-mdc-dma.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * IMG Multi-threaded DMA Controller (MDC)
  4. *
  5. * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd.
  6. * Copyright (C) 2014 Google, Inc.
  7. */
  8. #include <linux/clk.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/dmaengine.h>
  11. #include <linux/dmapool.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/io.h>
  14. #include <linux/irq.h>
  15. #include <linux/kernel.h>
  16. #include <linux/mfd/syscon.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/of_dma.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/regmap.h>
  23. #include <linux/slab.h>
  24. #include <linux/spinlock.h>
  25. #include "dmaengine.h"
  26. #include "virt-dma.h"
  27. #define MDC_MAX_DMA_CHANNELS 32
  28. #define MDC_GENERAL_CONFIG 0x000
  29. #define MDC_GENERAL_CONFIG_LIST_IEN BIT(31)
  30. #define MDC_GENERAL_CONFIG_IEN BIT(29)
  31. #define MDC_GENERAL_CONFIG_LEVEL_INT BIT(28)
  32. #define MDC_GENERAL_CONFIG_INC_W BIT(12)
  33. #define MDC_GENERAL_CONFIG_INC_R BIT(8)
  34. #define MDC_GENERAL_CONFIG_PHYSICAL_W BIT(7)
  35. #define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT 4
  36. #define MDC_GENERAL_CONFIG_WIDTH_W_MASK 0x7
  37. #define MDC_GENERAL_CONFIG_PHYSICAL_R BIT(3)
  38. #define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT 0
  39. #define MDC_GENERAL_CONFIG_WIDTH_R_MASK 0x7
  40. #define MDC_READ_PORT_CONFIG 0x004
  41. #define MDC_READ_PORT_CONFIG_STHREAD_SHIFT 28
  42. #define MDC_READ_PORT_CONFIG_STHREAD_MASK 0xf
  43. #define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT 24
  44. #define MDC_READ_PORT_CONFIG_RTHREAD_MASK 0xf
  45. #define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT 16
  46. #define MDC_READ_PORT_CONFIG_WTHREAD_MASK 0xf
  47. #define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT 4
  48. #define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK 0xff
  49. #define MDC_READ_PORT_CONFIG_DREQ_ENABLE BIT(1)
  50. #define MDC_READ_ADDRESS 0x008
  51. #define MDC_WRITE_ADDRESS 0x00c
  52. #define MDC_TRANSFER_SIZE 0x010
  53. #define MDC_TRANSFER_SIZE_MASK 0xffffff
  54. #define MDC_LIST_NODE_ADDRESS 0x014
  55. #define MDC_CMDS_PROCESSED 0x018
  56. #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT 16
  57. #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK 0x3f
  58. #define MDC_CMDS_PROCESSED_INT_ACTIVE BIT(8)
  59. #define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT 0
  60. #define MDC_CMDS_PROCESSED_CMDS_DONE_MASK 0x3f
  61. #define MDC_CONTROL_AND_STATUS 0x01c
  62. #define MDC_CONTROL_AND_STATUS_CANCEL BIT(20)
  63. #define MDC_CONTROL_AND_STATUS_LIST_EN BIT(4)
  64. #define MDC_CONTROL_AND_STATUS_EN BIT(0)
  65. #define MDC_ACTIVE_TRANSFER_SIZE 0x030
  66. #define MDC_GLOBAL_CONFIG_A 0x900
  67. #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT 16
  68. #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK 0xff
  69. #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT 8
  70. #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK 0xff
  71. #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT 0
  72. #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK 0xff
  73. struct mdc_hw_list_desc {
  74. u32 gen_conf;
  75. u32 readport_conf;
  76. u32 read_addr;
  77. u32 write_addr;
  78. u32 xfer_size;
  79. u32 node_addr;
  80. u32 cmds_done;
  81. u32 ctrl_status;
  82. /*
  83. * Not part of the list descriptor, but instead used by the CPU to
  84. * traverse the list.
  85. */
  86. struct mdc_hw_list_desc *next_desc;
  87. };
  88. struct mdc_tx_desc {
  89. struct mdc_chan *chan;
  90. struct virt_dma_desc vd;
  91. dma_addr_t list_phys;
  92. struct mdc_hw_list_desc *list;
  93. bool cyclic;
  94. bool cmd_loaded;
  95. unsigned int list_len;
  96. unsigned int list_period_len;
  97. size_t list_xfer_size;
  98. unsigned int list_cmds_done;
  99. };
  100. struct mdc_chan {
  101. struct mdc_dma *mdma;
  102. struct virt_dma_chan vc;
  103. struct dma_slave_config config;
  104. struct mdc_tx_desc *desc;
  105. int irq;
  106. unsigned int periph;
  107. unsigned int thread;
  108. unsigned int chan_nr;
  109. };
  110. struct mdc_dma_soc_data {
  111. void (*enable_chan)(struct mdc_chan *mchan);
  112. void (*disable_chan)(struct mdc_chan *mchan);
  113. };
  114. struct mdc_dma {
  115. struct dma_device dma_dev;
  116. void __iomem *regs;
  117. struct clk *clk;
  118. struct dma_pool *desc_pool;
  119. struct regmap *periph_regs;
  120. spinlock_t lock;
  121. unsigned int nr_threads;
  122. unsigned int nr_channels;
  123. unsigned int bus_width;
  124. unsigned int max_burst_mult;
  125. unsigned int max_xfer_size;
  126. const struct mdc_dma_soc_data *soc;
  127. struct mdc_chan channels[MDC_MAX_DMA_CHANNELS];
  128. };
  129. static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg)
  130. {
  131. return readl(mdma->regs + reg);
  132. }
  133. static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg)
  134. {
  135. writel(val, mdma->regs + reg);
  136. }
  137. static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg)
  138. {
  139. return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg);
  140. }
  141. static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg)
  142. {
  143. mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg);
  144. }
  145. static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c)
  146. {
  147. return container_of(to_virt_chan(c), struct mdc_chan, vc);
  148. }
  149. static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t)
  150. {
  151. struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx);
  152. return container_of(vdesc, struct mdc_tx_desc, vd);
  153. }
  154. static inline struct device *mdma2dev(struct mdc_dma *mdma)
  155. {
  156. return mdma->dma_dev.dev;
  157. }
  158. static inline unsigned int to_mdc_width(unsigned int bytes)
  159. {
  160. return ffs(bytes) - 1;
  161. }
  162. static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc,
  163. unsigned int bytes)
  164. {
  165. ldesc->gen_conf |= to_mdc_width(bytes) <<
  166. MDC_GENERAL_CONFIG_WIDTH_R_SHIFT;
  167. }
  168. static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc,
  169. unsigned int bytes)
  170. {
  171. ldesc->gen_conf |= to_mdc_width(bytes) <<
  172. MDC_GENERAL_CONFIG_WIDTH_W_SHIFT;
  173. }
  174. static void mdc_list_desc_config(struct mdc_chan *mchan,
  175. struct mdc_hw_list_desc *ldesc,
  176. enum dma_transfer_direction dir,
  177. dma_addr_t src, dma_addr_t dst, size_t len)
  178. {
  179. struct mdc_dma *mdma = mchan->mdma;
  180. unsigned int max_burst, burst_size;
  181. ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN |
  182. MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
  183. MDC_GENERAL_CONFIG_PHYSICAL_R;
  184. ldesc->readport_conf =
  185. (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
  186. (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
  187. (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
  188. ldesc->read_addr = src;
  189. ldesc->write_addr = dst;
  190. ldesc->xfer_size = len - 1;
  191. ldesc->node_addr = 0;
  192. ldesc->cmds_done = 0;
  193. ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN |
  194. MDC_CONTROL_AND_STATUS_EN;
  195. ldesc->next_desc = NULL;
  196. if (IS_ALIGNED(dst, mdma->bus_width) &&
  197. IS_ALIGNED(src, mdma->bus_width))
  198. max_burst = mdma->bus_width * mdma->max_burst_mult;
  199. else
  200. max_burst = mdma->bus_width * (mdma->max_burst_mult - 1);
  201. if (dir == DMA_MEM_TO_DEV) {
  202. ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R;
  203. ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
  204. mdc_set_read_width(ldesc, mdma->bus_width);
  205. mdc_set_write_width(ldesc, mchan->config.dst_addr_width);
  206. burst_size = min(max_burst, mchan->config.dst_maxburst *
  207. mchan->config.dst_addr_width);
  208. } else if (dir == DMA_DEV_TO_MEM) {
  209. ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W;
  210. ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
  211. mdc_set_read_width(ldesc, mchan->config.src_addr_width);
  212. mdc_set_write_width(ldesc, mdma->bus_width);
  213. burst_size = min(max_burst, mchan->config.src_maxburst *
  214. mchan->config.src_addr_width);
  215. } else {
  216. ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R |
  217. MDC_GENERAL_CONFIG_INC_W;
  218. mdc_set_read_width(ldesc, mdma->bus_width);
  219. mdc_set_write_width(ldesc, mdma->bus_width);
  220. burst_size = max_burst;
  221. }
  222. ldesc->readport_conf |= (burst_size - 1) <<
  223. MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT;
  224. }
  225. static void mdc_list_desc_free(struct mdc_tx_desc *mdesc)
  226. {
  227. struct mdc_dma *mdma = mdesc->chan->mdma;
  228. struct mdc_hw_list_desc *curr, *next;
  229. dma_addr_t curr_phys, next_phys;
  230. curr = mdesc->list;
  231. curr_phys = mdesc->list_phys;
  232. while (curr) {
  233. next = curr->next_desc;
  234. next_phys = curr->node_addr;
  235. dma_pool_free(mdma->desc_pool, curr, curr_phys);
  236. curr = next;
  237. curr_phys = next_phys;
  238. }
  239. }
  240. static void mdc_desc_free(struct virt_dma_desc *vd)
  241. {
  242. struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx);
  243. mdc_list_desc_free(mdesc);
  244. kfree(mdesc);
  245. }
  246. static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
  247. struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len,
  248. unsigned long flags)
  249. {
  250. struct mdc_chan *mchan = to_mdc_chan(chan);
  251. struct mdc_dma *mdma = mchan->mdma;
  252. struct mdc_tx_desc *mdesc;
  253. struct mdc_hw_list_desc *curr, *prev = NULL;
  254. dma_addr_t curr_phys;
  255. if (!len)
  256. return NULL;
  257. mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
  258. if (!mdesc)
  259. return NULL;
  260. mdesc->chan = mchan;
  261. mdesc->list_xfer_size = len;
  262. while (len > 0) {
  263. size_t xfer_size;
  264. curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys);
  265. if (!curr)
  266. goto free_desc;
  267. if (prev) {
  268. prev->node_addr = curr_phys;
  269. prev->next_desc = curr;
  270. } else {
  271. mdesc->list_phys = curr_phys;
  272. mdesc->list = curr;
  273. }
  274. xfer_size = min_t(size_t, mdma->max_xfer_size, len);
  275. mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest,
  276. xfer_size);
  277. prev = curr;
  278. mdesc->list_len++;
  279. src += xfer_size;
  280. dest += xfer_size;
  281. len -= xfer_size;
  282. }
  283. return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
  284. free_desc:
  285. mdc_desc_free(&mdesc->vd);
  286. return NULL;
  287. }
  288. static int mdc_check_slave_width(struct mdc_chan *mchan,
  289. enum dma_transfer_direction dir)
  290. {
  291. enum dma_slave_buswidth width;
  292. if (dir == DMA_MEM_TO_DEV)
  293. width = mchan->config.dst_addr_width;
  294. else
  295. width = mchan->config.src_addr_width;
  296. switch (width) {
  297. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  298. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  299. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  300. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  301. break;
  302. default:
  303. return -EINVAL;
  304. }
  305. if (width > mchan->mdma->bus_width)
  306. return -EINVAL;
  307. return 0;
  308. }
  309. static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
  310. struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  311. size_t period_len, enum dma_transfer_direction dir,
  312. unsigned long flags)
  313. {
  314. struct mdc_chan *mchan = to_mdc_chan(chan);
  315. struct mdc_dma *mdma = mchan->mdma;
  316. struct mdc_tx_desc *mdesc;
  317. struct mdc_hw_list_desc *curr, *prev = NULL;
  318. dma_addr_t curr_phys;
  319. if (!buf_len && !period_len)
  320. return NULL;
  321. if (!is_slave_direction(dir))
  322. return NULL;
  323. if (mdc_check_slave_width(mchan, dir) < 0)
  324. return NULL;
  325. mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
  326. if (!mdesc)
  327. return NULL;
  328. mdesc->chan = mchan;
  329. mdesc->cyclic = true;
  330. mdesc->list_xfer_size = buf_len;
  331. mdesc->list_period_len = DIV_ROUND_UP(period_len,
  332. mdma->max_xfer_size);
  333. while (buf_len > 0) {
  334. size_t remainder = min(period_len, buf_len);
  335. while (remainder > 0) {
  336. size_t xfer_size;
  337. curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
  338. &curr_phys);
  339. if (!curr)
  340. goto free_desc;
  341. if (!prev) {
  342. mdesc->list_phys = curr_phys;
  343. mdesc->list = curr;
  344. } else {
  345. prev->node_addr = curr_phys;
  346. prev->next_desc = curr;
  347. }
  348. xfer_size = min_t(size_t, mdma->max_xfer_size,
  349. remainder);
  350. if (dir == DMA_MEM_TO_DEV) {
  351. mdc_list_desc_config(mchan, curr, dir,
  352. buf_addr,
  353. mchan->config.dst_addr,
  354. xfer_size);
  355. } else {
  356. mdc_list_desc_config(mchan, curr, dir,
  357. mchan->config.src_addr,
  358. buf_addr,
  359. xfer_size);
  360. }
  361. prev = curr;
  362. mdesc->list_len++;
  363. buf_addr += xfer_size;
  364. buf_len -= xfer_size;
  365. remainder -= xfer_size;
  366. }
  367. }
  368. prev->node_addr = mdesc->list_phys;
  369. return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
  370. free_desc:
  371. mdc_desc_free(&mdesc->vd);
  372. return NULL;
  373. }
  374. static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
  375. struct dma_chan *chan, struct scatterlist *sgl,
  376. unsigned int sg_len, enum dma_transfer_direction dir,
  377. unsigned long flags, void *context)
  378. {
  379. struct mdc_chan *mchan = to_mdc_chan(chan);
  380. struct mdc_dma *mdma = mchan->mdma;
  381. struct mdc_tx_desc *mdesc;
  382. struct scatterlist *sg;
  383. struct mdc_hw_list_desc *curr, *prev = NULL;
  384. dma_addr_t curr_phys;
  385. unsigned int i;
  386. if (!sgl)
  387. return NULL;
  388. if (!is_slave_direction(dir))
  389. return NULL;
  390. if (mdc_check_slave_width(mchan, dir) < 0)
  391. return NULL;
  392. mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
  393. if (!mdesc)
  394. return NULL;
  395. mdesc->chan = mchan;
  396. for_each_sg(sgl, sg, sg_len, i) {
  397. dma_addr_t buf = sg_dma_address(sg);
  398. size_t buf_len = sg_dma_len(sg);
  399. while (buf_len > 0) {
  400. size_t xfer_size;
  401. curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
  402. &curr_phys);
  403. if (!curr)
  404. goto free_desc;
  405. if (!prev) {
  406. mdesc->list_phys = curr_phys;
  407. mdesc->list = curr;
  408. } else {
  409. prev->node_addr = curr_phys;
  410. prev->next_desc = curr;
  411. }
  412. xfer_size = min_t(size_t, mdma->max_xfer_size,
  413. buf_len);
  414. if (dir == DMA_MEM_TO_DEV) {
  415. mdc_list_desc_config(mchan, curr, dir, buf,
  416. mchan->config.dst_addr,
  417. xfer_size);
  418. } else {
  419. mdc_list_desc_config(mchan, curr, dir,
  420. mchan->config.src_addr,
  421. buf, xfer_size);
  422. }
  423. prev = curr;
  424. mdesc->list_len++;
  425. mdesc->list_xfer_size += xfer_size;
  426. buf += xfer_size;
  427. buf_len -= xfer_size;
  428. }
  429. }
  430. return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
  431. free_desc:
  432. mdc_desc_free(&mdesc->vd);
  433. return NULL;
  434. }
  435. static void mdc_issue_desc(struct mdc_chan *mchan)
  436. {
  437. struct mdc_dma *mdma = mchan->mdma;
  438. struct virt_dma_desc *vd;
  439. struct mdc_tx_desc *mdesc;
  440. u32 val;
  441. vd = vchan_next_desc(&mchan->vc);
  442. if (!vd)
  443. return;
  444. list_del(&vd->node);
  445. mdesc = to_mdc_desc(&vd->tx);
  446. mchan->desc = mdesc;
  447. dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n",
  448. mchan->chan_nr);
  449. mdma->soc->enable_chan(mchan);
  450. val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG);
  451. val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN |
  452. MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
  453. MDC_GENERAL_CONFIG_PHYSICAL_R;
  454. mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG);
  455. val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
  456. (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
  457. (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
  458. mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG);
  459. mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS);
  460. val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS);
  461. val |= MDC_CONTROL_AND_STATUS_LIST_EN;
  462. mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS);
  463. }
  464. static void mdc_issue_pending(struct dma_chan *chan)
  465. {
  466. struct mdc_chan *mchan = to_mdc_chan(chan);
  467. unsigned long flags;
  468. spin_lock_irqsave(&mchan->vc.lock, flags);
  469. if (vchan_issue_pending(&mchan->vc) && !mchan->desc)
  470. mdc_issue_desc(mchan);
  471. spin_unlock_irqrestore(&mchan->vc.lock, flags);
  472. }
  473. static enum dma_status mdc_tx_status(struct dma_chan *chan,
  474. dma_cookie_t cookie, struct dma_tx_state *txstate)
  475. {
  476. struct mdc_chan *mchan = to_mdc_chan(chan);
  477. struct mdc_tx_desc *mdesc;
  478. struct virt_dma_desc *vd;
  479. unsigned long flags;
  480. size_t bytes = 0;
  481. int ret;
  482. ret = dma_cookie_status(chan, cookie, txstate);
  483. if (ret == DMA_COMPLETE)
  484. return ret;
  485. if (!txstate)
  486. return ret;
  487. spin_lock_irqsave(&mchan->vc.lock, flags);
  488. vd = vchan_find_desc(&mchan->vc, cookie);
  489. if (vd) {
  490. mdesc = to_mdc_desc(&vd->tx);
  491. bytes = mdesc->list_xfer_size;
  492. } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) {
  493. struct mdc_hw_list_desc *ldesc;
  494. u32 val1, val2, done, processed, residue;
  495. int i, cmds;
  496. mdesc = mchan->desc;
  497. /*
  498. * Determine the number of commands that haven't been
  499. * processed (handled by the IRQ handler) yet.
  500. */
  501. do {
  502. val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
  503. ~MDC_CMDS_PROCESSED_INT_ACTIVE;
  504. residue = mdc_chan_readl(mchan,
  505. MDC_ACTIVE_TRANSFER_SIZE);
  506. val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
  507. ~MDC_CMDS_PROCESSED_INT_ACTIVE;
  508. } while (val1 != val2);
  509. done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
  510. MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
  511. processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
  512. MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
  513. cmds = (done - processed) %
  514. (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1);
  515. /*
  516. * If the command loaded event hasn't been processed yet, then
  517. * the difference above includes an extra command.
  518. */
  519. if (!mdesc->cmd_loaded)
  520. cmds--;
  521. else
  522. cmds += mdesc->list_cmds_done;
  523. bytes = mdesc->list_xfer_size;
  524. ldesc = mdesc->list;
  525. for (i = 0; i < cmds; i++) {
  526. bytes -= ldesc->xfer_size + 1;
  527. ldesc = ldesc->next_desc;
  528. }
  529. if (ldesc) {
  530. if (residue != MDC_TRANSFER_SIZE_MASK)
  531. bytes -= ldesc->xfer_size - residue;
  532. else
  533. bytes -= ldesc->xfer_size + 1;
  534. }
  535. }
  536. spin_unlock_irqrestore(&mchan->vc.lock, flags);
  537. dma_set_residue(txstate, bytes);
  538. return ret;
  539. }
  540. static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
  541. {
  542. u32 val, processed, done1, done2;
  543. unsigned int ret;
  544. val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
  545. processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
  546. MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
  547. /*
  548. * CMDS_DONE may have incremented between reading CMDS_PROCESSED
  549. * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we
  550. * didn't miss a command completion.
  551. */
  552. do {
  553. val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
  554. done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
  555. MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
  556. val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
  557. MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
  558. MDC_CMDS_PROCESSED_INT_ACTIVE);
  559. val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
  560. mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
  561. val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
  562. done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
  563. MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
  564. } while (done1 != done2);
  565. if (done1 >= processed)
  566. ret = done1 - processed;
  567. else
  568. ret = ((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1) -
  569. processed) + done1;
  570. return ret;
  571. }
  572. static int mdc_terminate_all(struct dma_chan *chan)
  573. {
  574. struct mdc_chan *mchan = to_mdc_chan(chan);
  575. unsigned long flags;
  576. LIST_HEAD(head);
  577. spin_lock_irqsave(&mchan->vc.lock, flags);
  578. mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
  579. MDC_CONTROL_AND_STATUS);
  580. if (mchan->desc) {
  581. vchan_terminate_vdesc(&mchan->desc->vd);
  582. mchan->desc = NULL;
  583. }
  584. vchan_get_all_descriptors(&mchan->vc, &head);
  585. mdc_get_new_events(mchan);
  586. spin_unlock_irqrestore(&mchan->vc.lock, flags);
  587. vchan_dma_desc_free_list(&mchan->vc, &head);
  588. return 0;
  589. }
  590. static void mdc_synchronize(struct dma_chan *chan)
  591. {
  592. struct mdc_chan *mchan = to_mdc_chan(chan);
  593. vchan_synchronize(&mchan->vc);
  594. }
  595. static int mdc_slave_config(struct dma_chan *chan,
  596. struct dma_slave_config *config)
  597. {
  598. struct mdc_chan *mchan = to_mdc_chan(chan);
  599. unsigned long flags;
  600. spin_lock_irqsave(&mchan->vc.lock, flags);
  601. mchan->config = *config;
  602. spin_unlock_irqrestore(&mchan->vc.lock, flags);
  603. return 0;
  604. }
  605. static int mdc_alloc_chan_resources(struct dma_chan *chan)
  606. {
  607. struct mdc_chan *mchan = to_mdc_chan(chan);
  608. struct device *dev = mdma2dev(mchan->mdma);
  609. return pm_runtime_get_sync(dev);
  610. }
  611. static void mdc_free_chan_resources(struct dma_chan *chan)
  612. {
  613. struct mdc_chan *mchan = to_mdc_chan(chan);
  614. struct mdc_dma *mdma = mchan->mdma;
  615. struct device *dev = mdma2dev(mdma);
  616. mdc_terminate_all(chan);
  617. mdma->soc->disable_chan(mchan);
  618. pm_runtime_put(dev);
  619. }
  620. static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
  621. {
  622. struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
  623. struct mdc_tx_desc *mdesc;
  624. unsigned int i, new_events;
  625. spin_lock(&mchan->vc.lock);
  626. dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
  627. new_events = mdc_get_new_events(mchan);
  628. if (!new_events)
  629. goto out;
  630. mdesc = mchan->desc;
  631. if (!mdesc) {
  632. dev_warn(mdma2dev(mchan->mdma),
  633. "IRQ with no active descriptor on channel %d\n",
  634. mchan->chan_nr);
  635. goto out;
  636. }
  637. for (i = 0; i < new_events; i++) {
  638. /*
  639. * The first interrupt in a transfer indicates that the
  640. * command list has been loaded, not that a command has
  641. * been completed.
  642. */
  643. if (!mdesc->cmd_loaded) {
  644. mdesc->cmd_loaded = true;
  645. continue;
  646. }
  647. mdesc->list_cmds_done++;
  648. if (mdesc->cyclic) {
  649. mdesc->list_cmds_done %= mdesc->list_len;
  650. if (mdesc->list_cmds_done % mdesc->list_period_len == 0)
  651. vchan_cyclic_callback(&mdesc->vd);
  652. } else if (mdesc->list_cmds_done == mdesc->list_len) {
  653. mchan->desc = NULL;
  654. vchan_cookie_complete(&mdesc->vd);
  655. mdc_issue_desc(mchan);
  656. break;
  657. }
  658. }
  659. out:
  660. spin_unlock(&mchan->vc.lock);
  661. return IRQ_HANDLED;
  662. }
  663. static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec,
  664. struct of_dma *ofdma)
  665. {
  666. struct mdc_dma *mdma = ofdma->of_dma_data;
  667. struct dma_chan *chan;
  668. if (dma_spec->args_count != 3)
  669. return NULL;
  670. list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) {
  671. struct mdc_chan *mchan = to_mdc_chan(chan);
  672. if (!(dma_spec->args[1] & BIT(mchan->chan_nr)))
  673. continue;
  674. if (dma_get_slave_channel(chan)) {
  675. mchan->periph = dma_spec->args[0];
  676. mchan->thread = dma_spec->args[2];
  677. return chan;
  678. }
  679. }
  680. return NULL;
  681. }
  682. #define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch) (0x120 + 0x4 * ((ch) / 4))
  683. #define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4))
  684. #define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK 0x3f
  685. static void pistachio_mdc_enable_chan(struct mdc_chan *mchan)
  686. {
  687. struct mdc_dma *mdma = mchan->mdma;
  688. regmap_update_bits(mdma->periph_regs,
  689. PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
  690. PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
  691. PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
  692. mchan->periph <<
  693. PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr));
  694. }
  695. static void pistachio_mdc_disable_chan(struct mdc_chan *mchan)
  696. {
  697. struct mdc_dma *mdma = mchan->mdma;
  698. regmap_update_bits(mdma->periph_regs,
  699. PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
  700. PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
  701. PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
  702. 0);
  703. }
  704. static const struct mdc_dma_soc_data pistachio_mdc_data = {
  705. .enable_chan = pistachio_mdc_enable_chan,
  706. .disable_chan = pistachio_mdc_disable_chan,
  707. };
  708. static const struct of_device_id mdc_dma_of_match[] = {
  709. { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, },
  710. { },
  711. };
  712. MODULE_DEVICE_TABLE(of, mdc_dma_of_match);
  713. static int img_mdc_runtime_suspend(struct device *dev)
  714. {
  715. struct mdc_dma *mdma = dev_get_drvdata(dev);
  716. clk_disable_unprepare(mdma->clk);
  717. return 0;
  718. }
  719. static int img_mdc_runtime_resume(struct device *dev)
  720. {
  721. struct mdc_dma *mdma = dev_get_drvdata(dev);
  722. return clk_prepare_enable(mdma->clk);
  723. }
  724. static int mdc_dma_probe(struct platform_device *pdev)
  725. {
  726. struct mdc_dma *mdma;
  727. unsigned int i;
  728. u32 val;
  729. int ret;
  730. mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL);
  731. if (!mdma)
  732. return -ENOMEM;
  733. platform_set_drvdata(pdev, mdma);
  734. mdma->soc = of_device_get_match_data(&pdev->dev);
  735. mdma->regs = devm_platform_ioremap_resource(pdev, 0);
  736. if (IS_ERR(mdma->regs))
  737. return PTR_ERR(mdma->regs);
  738. mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  739. "img,cr-periph");
  740. if (IS_ERR(mdma->periph_regs))
  741. return PTR_ERR(mdma->periph_regs);
  742. mdma->clk = devm_clk_get(&pdev->dev, "sys");
  743. if (IS_ERR(mdma->clk))
  744. return PTR_ERR(mdma->clk);
  745. dma_cap_zero(mdma->dma_dev.cap_mask);
  746. dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
  747. dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
  748. dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask);
  749. dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask);
  750. val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A);
  751. mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) &
  752. MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK;
  753. mdma->nr_threads =
  754. 1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) &
  755. MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK);
  756. mdma->bus_width =
  757. (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) &
  758. MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8;
  759. /*
  760. * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes
  761. * are supported, this makes it possible for the value reported in
  762. * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size
  763. * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or
  764. * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining. To eliminate this
  765. * ambiguity, restrict transfer sizes to one bus-width less than the
  766. * actual maximum.
  767. */
  768. mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width;
  769. of_property_read_u32(pdev->dev.of_node, "dma-channels",
  770. &mdma->nr_channels);
  771. ret = of_property_read_u32(pdev->dev.of_node,
  772. "img,max-burst-multiplier",
  773. &mdma->max_burst_mult);
  774. if (ret)
  775. return ret;
  776. mdma->dma_dev.dev = &pdev->dev;
  777. mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
  778. mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
  779. mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
  780. mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources;
  781. mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
  782. mdma->dma_dev.device_tx_status = mdc_tx_status;
  783. mdma->dma_dev.device_issue_pending = mdc_issue_pending;
  784. mdma->dma_dev.device_terminate_all = mdc_terminate_all;
  785. mdma->dma_dev.device_synchronize = mdc_synchronize;
  786. mdma->dma_dev.device_config = mdc_slave_config;
  787. mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  788. mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  789. for (i = 1; i <= mdma->bus_width; i <<= 1) {
  790. mdma->dma_dev.src_addr_widths |= BIT(i);
  791. mdma->dma_dev.dst_addr_widths |= BIT(i);
  792. }
  793. INIT_LIST_HEAD(&mdma->dma_dev.channels);
  794. for (i = 0; i < mdma->nr_channels; i++) {
  795. struct mdc_chan *mchan = &mdma->channels[i];
  796. mchan->mdma = mdma;
  797. mchan->chan_nr = i;
  798. mchan->irq = platform_get_irq(pdev, i);
  799. if (mchan->irq < 0)
  800. return mchan->irq;
  801. ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
  802. IRQ_TYPE_LEVEL_HIGH,
  803. dev_name(&pdev->dev), mchan);
  804. if (ret < 0)
  805. return ret;
  806. mchan->vc.desc_free = mdc_desc_free;
  807. vchan_init(&mchan->vc, &mdma->dma_dev);
  808. }
  809. mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
  810. sizeof(struct mdc_hw_list_desc),
  811. 4, 0);
  812. if (!mdma->desc_pool)
  813. return -ENOMEM;
  814. pm_runtime_enable(&pdev->dev);
  815. if (!pm_runtime_enabled(&pdev->dev)) {
  816. ret = img_mdc_runtime_resume(&pdev->dev);
  817. if (ret)
  818. return ret;
  819. }
  820. ret = dma_async_device_register(&mdma->dma_dev);
  821. if (ret)
  822. goto suspend;
  823. ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
  824. if (ret)
  825. goto unregister;
  826. dev_info(&pdev->dev, "MDC with %u channels and %u threads\n",
  827. mdma->nr_channels, mdma->nr_threads);
  828. return 0;
  829. unregister:
  830. dma_async_device_unregister(&mdma->dma_dev);
  831. suspend:
  832. if (!pm_runtime_enabled(&pdev->dev))
  833. img_mdc_runtime_suspend(&pdev->dev);
  834. pm_runtime_disable(&pdev->dev);
  835. return ret;
  836. }
  837. static void mdc_dma_remove(struct platform_device *pdev)
  838. {
  839. struct mdc_dma *mdma = platform_get_drvdata(pdev);
  840. struct mdc_chan *mchan, *next;
  841. of_dma_controller_free(pdev->dev.of_node);
  842. dma_async_device_unregister(&mdma->dma_dev);
  843. list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels,
  844. vc.chan.device_node) {
  845. list_del(&mchan->vc.chan.device_node);
  846. devm_free_irq(&pdev->dev, mchan->irq, mchan);
  847. tasklet_kill(&mchan->vc.task);
  848. }
  849. pm_runtime_disable(&pdev->dev);
  850. if (!pm_runtime_status_suspended(&pdev->dev))
  851. img_mdc_runtime_suspend(&pdev->dev);
  852. }
  853. #ifdef CONFIG_PM_SLEEP
  854. static int img_mdc_suspend_late(struct device *dev)
  855. {
  856. struct mdc_dma *mdma = dev_get_drvdata(dev);
  857. int i;
  858. /* Check that all channels are idle */
  859. for (i = 0; i < mdma->nr_channels; i++) {
  860. struct mdc_chan *mchan = &mdma->channels[i];
  861. if (unlikely(mchan->desc))
  862. return -EBUSY;
  863. }
  864. return pm_runtime_force_suspend(dev);
  865. }
  866. static int img_mdc_resume_early(struct device *dev)
  867. {
  868. return pm_runtime_force_resume(dev);
  869. }
  870. #endif /* CONFIG_PM_SLEEP */
  871. static const struct dev_pm_ops img_mdc_pm_ops = {
  872. SET_RUNTIME_PM_OPS(img_mdc_runtime_suspend,
  873. img_mdc_runtime_resume, NULL)
  874. SET_LATE_SYSTEM_SLEEP_PM_OPS(img_mdc_suspend_late,
  875. img_mdc_resume_early)
  876. };
  877. static struct platform_driver mdc_dma_driver = {
  878. .driver = {
  879. .name = "img-mdc-dma",
  880. .pm = &img_mdc_pm_ops,
  881. .of_match_table = of_match_ptr(mdc_dma_of_match),
  882. },
  883. .probe = mdc_dma_probe,
  884. .remove_new = mdc_dma_remove,
  885. };
  886. module_platform_driver(mdc_dma_driver);
  887. MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver");
  888. MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
  889. MODULE_LICENSE("GPL v2");