sa11x0-dma.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117
  1. /*
  2. * SA11x0 DMAengine support
  3. *
  4. * Copyright (C) 2012 Russell King
  5. * Derived in part from arch/arm/mach-sa1100/dma.c,
  6. * Copyright (C) 2000, 2001 by Nicolas Pitre
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/device.h>
  14. #include <linux/dmaengine.h>
  15. #include <linux/init.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/sa11x0-dma.h>
  21. #include <linux/slab.h>
  22. #include <linux/spinlock.h>
  23. #include "virt-dma.h"
  24. #define NR_PHY_CHAN 6
  25. #define DMA_ALIGN 3
  26. #define DMA_MAX_SIZE 0x1fff
  27. #define DMA_CHUNK_SIZE 0x1000
  28. #define DMA_DDAR 0x00
  29. #define DMA_DCSR_S 0x04
  30. #define DMA_DCSR_C 0x08
  31. #define DMA_DCSR_R 0x0c
  32. #define DMA_DBSA 0x10
  33. #define DMA_DBTA 0x14
  34. #define DMA_DBSB 0x18
  35. #define DMA_DBTB 0x1c
  36. #define DMA_SIZE 0x20
  37. #define DCSR_RUN (1 << 0)
  38. #define DCSR_IE (1 << 1)
  39. #define DCSR_ERROR (1 << 2)
  40. #define DCSR_DONEA (1 << 3)
  41. #define DCSR_STRTA (1 << 4)
  42. #define DCSR_DONEB (1 << 5)
  43. #define DCSR_STRTB (1 << 6)
  44. #define DCSR_BIU (1 << 7)
  45. #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */
  46. #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */
  47. #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */
  48. #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */
  49. #define DDAR_Ser0UDCTr (0x0 << 4)
  50. #define DDAR_Ser0UDCRc (0x1 << 4)
  51. #define DDAR_Ser1SDLCTr (0x2 << 4)
  52. #define DDAR_Ser1SDLCRc (0x3 << 4)
  53. #define DDAR_Ser1UARTTr (0x4 << 4)
  54. #define DDAR_Ser1UARTRc (0x5 << 4)
  55. #define DDAR_Ser2ICPTr (0x6 << 4)
  56. #define DDAR_Ser2ICPRc (0x7 << 4)
  57. #define DDAR_Ser3UARTTr (0x8 << 4)
  58. #define DDAR_Ser3UARTRc (0x9 << 4)
  59. #define DDAR_Ser4MCP0Tr (0xa << 4)
  60. #define DDAR_Ser4MCP0Rc (0xb << 4)
  61. #define DDAR_Ser4MCP1Tr (0xc << 4)
  62. #define DDAR_Ser4MCP1Rc (0xd << 4)
  63. #define DDAR_Ser4SSPTr (0xe << 4)
  64. #define DDAR_Ser4SSPRc (0xf << 4)
  65. struct sa11x0_dma_sg {
  66. u32 addr;
  67. u32 len;
  68. };
  69. struct sa11x0_dma_desc {
  70. struct virt_dma_desc vd;
  71. u32 ddar;
  72. size_t size;
  73. unsigned period;
  74. bool cyclic;
  75. unsigned sglen;
  76. struct sa11x0_dma_sg sg[0];
  77. };
  78. struct sa11x0_dma_phy;
  79. struct sa11x0_dma_chan {
  80. struct virt_dma_chan vc;
  81. /* protected by c->vc.lock */
  82. struct sa11x0_dma_phy *phy;
  83. enum dma_status status;
  84. /* protected by d->lock */
  85. struct list_head node;
  86. u32 ddar;
  87. const char *name;
  88. };
  89. struct sa11x0_dma_phy {
  90. void __iomem *base;
  91. struct sa11x0_dma_dev *dev;
  92. unsigned num;
  93. struct sa11x0_dma_chan *vchan;
  94. /* Protected by c->vc.lock */
  95. unsigned sg_load;
  96. struct sa11x0_dma_desc *txd_load;
  97. unsigned sg_done;
  98. struct sa11x0_dma_desc *txd_done;
  99. u32 dbs[2];
  100. u32 dbt[2];
  101. u32 dcsr;
  102. };
  103. struct sa11x0_dma_dev {
  104. struct dma_device slave;
  105. void __iomem *base;
  106. spinlock_t lock;
  107. struct tasklet_struct task;
  108. struct list_head chan_pending;
  109. struct sa11x0_dma_phy phy[NR_PHY_CHAN];
  110. };
  111. static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
  112. {
  113. return container_of(chan, struct sa11x0_dma_chan, vc.chan);
  114. }
  115. static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
  116. {
  117. return container_of(dmadev, struct sa11x0_dma_dev, slave);
  118. }
  119. static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
  120. {
  121. struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
  122. return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
  123. }
  124. static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
  125. {
  126. kfree(container_of(vd, struct sa11x0_dma_desc, vd));
  127. }
  128. static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
  129. {
  130. list_del(&txd->vd.node);
  131. p->txd_load = txd;
  132. p->sg_load = 0;
  133. dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
  134. p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
  135. }
  136. static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
  137. struct sa11x0_dma_chan *c)
  138. {
  139. struct sa11x0_dma_desc *txd = p->txd_load;
  140. struct sa11x0_dma_sg *sg;
  141. void __iomem *base = p->base;
  142. unsigned dbsx, dbtx;
  143. u32 dcsr;
  144. if (!txd)
  145. return;
  146. dcsr = readl_relaxed(base + DMA_DCSR_R);
  147. /* Don't try to load the next transfer if both buffers are started */
  148. if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
  149. return;
  150. if (p->sg_load == txd->sglen) {
  151. if (!txd->cyclic) {
  152. struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
  153. /*
  154. * We have reached the end of the current descriptor.
  155. * Peek at the next descriptor, and if compatible with
  156. * the current, start processing it.
  157. */
  158. if (txn && txn->ddar == txd->ddar) {
  159. txd = txn;
  160. sa11x0_dma_start_desc(p, txn);
  161. } else {
  162. p->txd_load = NULL;
  163. return;
  164. }
  165. } else {
  166. /* Cyclic: reset back to beginning */
  167. p->sg_load = 0;
  168. }
  169. }
  170. sg = &txd->sg[p->sg_load++];
  171. /* Select buffer to load according to channel status */
  172. if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
  173. ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
  174. dbsx = DMA_DBSA;
  175. dbtx = DMA_DBTA;
  176. dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
  177. } else {
  178. dbsx = DMA_DBSB;
  179. dbtx = DMA_DBTB;
  180. dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
  181. }
  182. writel_relaxed(sg->addr, base + dbsx);
  183. writel_relaxed(sg->len, base + dbtx);
  184. writel(dcsr, base + DMA_DCSR_S);
  185. dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
  186. p->num, dcsr,
  187. 'A' + (dbsx == DMA_DBSB), sg->addr,
  188. 'A' + (dbtx == DMA_DBTB), sg->len);
  189. }
  190. static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
  191. struct sa11x0_dma_chan *c)
  192. {
  193. struct sa11x0_dma_desc *txd = p->txd_done;
  194. if (++p->sg_done == txd->sglen) {
  195. if (!txd->cyclic) {
  196. vchan_cookie_complete(&txd->vd);
  197. p->sg_done = 0;
  198. p->txd_done = p->txd_load;
  199. if (!p->txd_done)
  200. tasklet_schedule(&p->dev->task);
  201. } else {
  202. if ((p->sg_done % txd->period) == 0)
  203. vchan_cyclic_callback(&txd->vd);
  204. /* Cyclic: reset back to beginning */
  205. p->sg_done = 0;
  206. }
  207. }
  208. sa11x0_dma_start_sg(p, c);
  209. }
  210. static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
  211. {
  212. struct sa11x0_dma_phy *p = dev_id;
  213. struct sa11x0_dma_dev *d = p->dev;
  214. struct sa11x0_dma_chan *c;
  215. u32 dcsr;
  216. dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  217. if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
  218. return IRQ_NONE;
  219. /* Clear reported status bits */
  220. writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
  221. p->base + DMA_DCSR_C);
  222. dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
  223. if (dcsr & DCSR_ERROR) {
  224. dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
  225. p->num, dcsr,
  226. readl_relaxed(p->base + DMA_DDAR),
  227. readl_relaxed(p->base + DMA_DBSA),
  228. readl_relaxed(p->base + DMA_DBTA),
  229. readl_relaxed(p->base + DMA_DBSB),
  230. readl_relaxed(p->base + DMA_DBTB));
  231. }
  232. c = p->vchan;
  233. if (c) {
  234. unsigned long flags;
  235. spin_lock_irqsave(&c->vc.lock, flags);
  236. /*
  237. * Now that we're holding the lock, check that the vchan
  238. * really is associated with this pchan before touching the
  239. * hardware. This should always succeed, because we won't
  240. * change p->vchan or c->phy while the channel is actively
  241. * transferring.
  242. */
  243. if (c->phy == p) {
  244. if (dcsr & DCSR_DONEA)
  245. sa11x0_dma_complete(p, c);
  246. if (dcsr & DCSR_DONEB)
  247. sa11x0_dma_complete(p, c);
  248. }
  249. spin_unlock_irqrestore(&c->vc.lock, flags);
  250. }
  251. return IRQ_HANDLED;
  252. }
  253. static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
  254. {
  255. struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
  256. /* If the issued list is empty, we have no further txds to process */
  257. if (txd) {
  258. struct sa11x0_dma_phy *p = c->phy;
  259. sa11x0_dma_start_desc(p, txd);
  260. p->txd_done = txd;
  261. p->sg_done = 0;
  262. /* The channel should not have any transfers started */
  263. WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
  264. (DCSR_STRTA | DCSR_STRTB));
  265. /* Clear the run and start bits before changing DDAR */
  266. writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
  267. p->base + DMA_DCSR_C);
  268. writel_relaxed(txd->ddar, p->base + DMA_DDAR);
  269. /* Try to start both buffers */
  270. sa11x0_dma_start_sg(p, c);
  271. sa11x0_dma_start_sg(p, c);
  272. }
  273. }
  274. static void sa11x0_dma_tasklet(unsigned long arg)
  275. {
  276. struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
  277. struct sa11x0_dma_phy *p;
  278. struct sa11x0_dma_chan *c;
  279. unsigned pch, pch_alloc = 0;
  280. dev_dbg(d->slave.dev, "tasklet enter\n");
  281. list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
  282. spin_lock_irq(&c->vc.lock);
  283. p = c->phy;
  284. if (p && !p->txd_done) {
  285. sa11x0_dma_start_txd(c);
  286. if (!p->txd_done) {
  287. /* No current txd associated with this channel */
  288. dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
  289. /* Mark this channel free */
  290. c->phy = NULL;
  291. p->vchan = NULL;
  292. }
  293. }
  294. spin_unlock_irq(&c->vc.lock);
  295. }
  296. spin_lock_irq(&d->lock);
  297. for (pch = 0; pch < NR_PHY_CHAN; pch++) {
  298. p = &d->phy[pch];
  299. if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
  300. c = list_first_entry(&d->chan_pending,
  301. struct sa11x0_dma_chan, node);
  302. list_del_init(&c->node);
  303. pch_alloc |= 1 << pch;
  304. /* Mark this channel allocated */
  305. p->vchan = c;
  306. dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
  307. }
  308. }
  309. spin_unlock_irq(&d->lock);
  310. for (pch = 0; pch < NR_PHY_CHAN; pch++) {
  311. if (pch_alloc & (1 << pch)) {
  312. p = &d->phy[pch];
  313. c = p->vchan;
  314. spin_lock_irq(&c->vc.lock);
  315. c->phy = p;
  316. sa11x0_dma_start_txd(c);
  317. spin_unlock_irq(&c->vc.lock);
  318. }
  319. }
  320. dev_dbg(d->slave.dev, "tasklet exit\n");
  321. }
  322. static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
  323. {
  324. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  325. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  326. unsigned long flags;
  327. spin_lock_irqsave(&d->lock, flags);
  328. list_del_init(&c->node);
  329. spin_unlock_irqrestore(&d->lock, flags);
  330. vchan_free_chan_resources(&c->vc);
  331. }
  332. static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
  333. {
  334. unsigned reg;
  335. u32 dcsr;
  336. dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  337. if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
  338. (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
  339. reg = DMA_DBSA;
  340. else
  341. reg = DMA_DBSB;
  342. return readl_relaxed(p->base + reg);
  343. }
  344. static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
  345. dma_cookie_t cookie, struct dma_tx_state *state)
  346. {
  347. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  348. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  349. struct sa11x0_dma_phy *p;
  350. struct virt_dma_desc *vd;
  351. unsigned long flags;
  352. enum dma_status ret;
  353. ret = dma_cookie_status(&c->vc.chan, cookie, state);
  354. if (ret == DMA_COMPLETE)
  355. return ret;
  356. if (!state)
  357. return c->status;
  358. spin_lock_irqsave(&c->vc.lock, flags);
  359. p = c->phy;
  360. /*
  361. * If the cookie is on our issue queue, then the residue is
  362. * its total size.
  363. */
  364. vd = vchan_find_desc(&c->vc, cookie);
  365. if (vd) {
  366. state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
  367. } else if (!p) {
  368. state->residue = 0;
  369. } else {
  370. struct sa11x0_dma_desc *txd;
  371. size_t bytes = 0;
  372. if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
  373. txd = p->txd_done;
  374. else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
  375. txd = p->txd_load;
  376. else
  377. txd = NULL;
  378. ret = c->status;
  379. if (txd) {
  380. dma_addr_t addr = sa11x0_dma_pos(p);
  381. unsigned i;
  382. dev_vdbg(d->slave.dev, "tx_status: addr:%pad\n", &addr);
  383. for (i = 0; i < txd->sglen; i++) {
  384. dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
  385. i, txd->sg[i].addr, txd->sg[i].len);
  386. if (addr >= txd->sg[i].addr &&
  387. addr < txd->sg[i].addr + txd->sg[i].len) {
  388. unsigned len;
  389. len = txd->sg[i].len -
  390. (addr - txd->sg[i].addr);
  391. dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
  392. i, len);
  393. bytes += len;
  394. i++;
  395. break;
  396. }
  397. }
  398. for (; i < txd->sglen; i++) {
  399. dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
  400. i, txd->sg[i].addr, txd->sg[i].len);
  401. bytes += txd->sg[i].len;
  402. }
  403. }
  404. state->residue = bytes;
  405. }
  406. spin_unlock_irqrestore(&c->vc.lock, flags);
  407. dev_vdbg(d->slave.dev, "tx_status: bytes 0x%x\n", state->residue);
  408. return ret;
  409. }
  410. /*
  411. * Move pending txds to the issued list, and re-init pending list.
  412. * If not already pending, add this channel to the list of pending
  413. * channels and trigger the tasklet to run.
  414. */
  415. static void sa11x0_dma_issue_pending(struct dma_chan *chan)
  416. {
  417. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  418. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  419. unsigned long flags;
  420. spin_lock_irqsave(&c->vc.lock, flags);
  421. if (vchan_issue_pending(&c->vc)) {
  422. if (!c->phy) {
  423. spin_lock(&d->lock);
  424. if (list_empty(&c->node)) {
  425. list_add_tail(&c->node, &d->chan_pending);
  426. tasklet_schedule(&d->task);
  427. dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
  428. }
  429. spin_unlock(&d->lock);
  430. }
  431. } else
  432. dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
  433. spin_unlock_irqrestore(&c->vc.lock, flags);
  434. }
  435. static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
  436. struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
  437. enum dma_transfer_direction dir, unsigned long flags, void *context)
  438. {
  439. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  440. struct sa11x0_dma_desc *txd;
  441. struct scatterlist *sgent;
  442. unsigned i, j = sglen;
  443. size_t size = 0;
  444. /* SA11x0 channels can only operate in their native direction */
  445. if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
  446. dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
  447. &c->vc, c->ddar, dir);
  448. return NULL;
  449. }
  450. /* Do not allow zero-sized txds */
  451. if (sglen == 0)
  452. return NULL;
  453. for_each_sg(sg, sgent, sglen, i) {
  454. dma_addr_t addr = sg_dma_address(sgent);
  455. unsigned int len = sg_dma_len(sgent);
  456. if (len > DMA_MAX_SIZE)
  457. j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
  458. if (addr & DMA_ALIGN) {
  459. dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n",
  460. &c->vc, &addr);
  461. return NULL;
  462. }
  463. }
  464. txd = kzalloc(struct_size(txd, sg, j), GFP_ATOMIC);
  465. if (!txd) {
  466. dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
  467. return NULL;
  468. }
  469. j = 0;
  470. for_each_sg(sg, sgent, sglen, i) {
  471. dma_addr_t addr = sg_dma_address(sgent);
  472. unsigned len = sg_dma_len(sgent);
  473. size += len;
  474. do {
  475. unsigned tlen = len;
  476. /*
  477. * Check whether the transfer will fit. If not, try
  478. * to split the transfer up such that we end up with
  479. * equal chunks - but make sure that we preserve the
  480. * alignment. This avoids small segments.
  481. */
  482. if (tlen > DMA_MAX_SIZE) {
  483. unsigned mult = DIV_ROUND_UP(tlen,
  484. DMA_MAX_SIZE & ~DMA_ALIGN);
  485. tlen = (tlen / mult) & ~DMA_ALIGN;
  486. }
  487. txd->sg[j].addr = addr;
  488. txd->sg[j].len = tlen;
  489. addr += tlen;
  490. len -= tlen;
  491. j++;
  492. } while (len);
  493. }
  494. txd->ddar = c->ddar;
  495. txd->size = size;
  496. txd->sglen = j;
  497. dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n",
  498. &c->vc, &txd->vd, txd->size, txd->sglen);
  499. return vchan_tx_prep(&c->vc, &txd->vd, flags);
  500. }
  501. static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
  502. struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
  503. enum dma_transfer_direction dir, unsigned long flags)
  504. {
  505. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  506. struct sa11x0_dma_desc *txd;
  507. unsigned i, j, k, sglen, sgperiod;
  508. /* SA11x0 channels can only operate in their native direction */
  509. if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
  510. dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
  511. &c->vc, c->ddar, dir);
  512. return NULL;
  513. }
  514. sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
  515. sglen = size * sgperiod / period;
  516. /* Do not allow zero-sized txds */
  517. if (sglen == 0)
  518. return NULL;
  519. txd = kzalloc(struct_size(txd, sg, sglen), GFP_ATOMIC);
  520. if (!txd) {
  521. dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
  522. return NULL;
  523. }
  524. for (i = k = 0; i < size / period; i++) {
  525. size_t tlen, len = period;
  526. for (j = 0; j < sgperiod; j++, k++) {
  527. tlen = len;
  528. if (tlen > DMA_MAX_SIZE) {
  529. unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
  530. tlen = (tlen / mult) & ~DMA_ALIGN;
  531. }
  532. txd->sg[k].addr = addr;
  533. txd->sg[k].len = tlen;
  534. addr += tlen;
  535. len -= tlen;
  536. }
  537. WARN_ON(len != 0);
  538. }
  539. WARN_ON(k != sglen);
  540. txd->ddar = c->ddar;
  541. txd->size = size;
  542. txd->sglen = sglen;
  543. txd->cyclic = 1;
  544. txd->period = sgperiod;
  545. return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  546. }
  547. static int sa11x0_dma_device_config(struct dma_chan *chan,
  548. struct dma_slave_config *cfg)
  549. {
  550. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  551. u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
  552. dma_addr_t addr;
  553. enum dma_slave_buswidth width;
  554. u32 maxburst;
  555. if (ddar & DDAR_RW) {
  556. addr = cfg->src_addr;
  557. width = cfg->src_addr_width;
  558. maxburst = cfg->src_maxburst;
  559. } else {
  560. addr = cfg->dst_addr;
  561. width = cfg->dst_addr_width;
  562. maxburst = cfg->dst_maxburst;
  563. }
  564. if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
  565. width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
  566. (maxburst != 4 && maxburst != 8))
  567. return -EINVAL;
  568. if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
  569. ddar |= DDAR_DW;
  570. if (maxburst == 8)
  571. ddar |= DDAR_BS;
  572. dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n",
  573. &c->vc, &addr, width, maxburst);
  574. c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
  575. return 0;
  576. }
  577. static int sa11x0_dma_device_pause(struct dma_chan *chan)
  578. {
  579. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  580. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  581. struct sa11x0_dma_phy *p;
  582. LIST_HEAD(head);
  583. unsigned long flags;
  584. dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
  585. spin_lock_irqsave(&c->vc.lock, flags);
  586. if (c->status == DMA_IN_PROGRESS) {
  587. c->status = DMA_PAUSED;
  588. p = c->phy;
  589. if (p) {
  590. writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
  591. } else {
  592. spin_lock(&d->lock);
  593. list_del_init(&c->node);
  594. spin_unlock(&d->lock);
  595. }
  596. }
  597. spin_unlock_irqrestore(&c->vc.lock, flags);
  598. return 0;
  599. }
  600. static int sa11x0_dma_device_resume(struct dma_chan *chan)
  601. {
  602. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  603. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  604. struct sa11x0_dma_phy *p;
  605. LIST_HEAD(head);
  606. unsigned long flags;
  607. dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
  608. spin_lock_irqsave(&c->vc.lock, flags);
  609. if (c->status == DMA_PAUSED) {
  610. c->status = DMA_IN_PROGRESS;
  611. p = c->phy;
  612. if (p) {
  613. writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
  614. } else if (!list_empty(&c->vc.desc_issued)) {
  615. spin_lock(&d->lock);
  616. list_add_tail(&c->node, &d->chan_pending);
  617. spin_unlock(&d->lock);
  618. }
  619. }
  620. spin_unlock_irqrestore(&c->vc.lock, flags);
  621. return 0;
  622. }
  623. static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
  624. {
  625. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  626. struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
  627. struct sa11x0_dma_phy *p;
  628. LIST_HEAD(head);
  629. unsigned long flags;
  630. dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
  631. /* Clear the tx descriptor lists */
  632. spin_lock_irqsave(&c->vc.lock, flags);
  633. vchan_get_all_descriptors(&c->vc, &head);
  634. p = c->phy;
  635. if (p) {
  636. dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
  637. /* vchan is assigned to a pchan - stop the channel */
  638. writel(DCSR_RUN | DCSR_IE |
  639. DCSR_STRTA | DCSR_DONEA |
  640. DCSR_STRTB | DCSR_DONEB,
  641. p->base + DMA_DCSR_C);
  642. if (p->txd_load) {
  643. if (p->txd_load != p->txd_done)
  644. list_add_tail(&p->txd_load->vd.node, &head);
  645. p->txd_load = NULL;
  646. }
  647. if (p->txd_done) {
  648. list_add_tail(&p->txd_done->vd.node, &head);
  649. p->txd_done = NULL;
  650. }
  651. c->phy = NULL;
  652. spin_lock(&d->lock);
  653. p->vchan = NULL;
  654. spin_unlock(&d->lock);
  655. tasklet_schedule(&d->task);
  656. }
  657. spin_unlock_irqrestore(&c->vc.lock, flags);
  658. vchan_dma_desc_free_list(&c->vc, &head);
  659. return 0;
  660. }
  661. struct sa11x0_dma_channel_desc {
  662. u32 ddar;
  663. const char *name;
  664. };
  665. #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
  666. static const struct sa11x0_dma_channel_desc chan_desc[] = {
  667. CD(Ser0UDCTr, 0),
  668. CD(Ser0UDCRc, DDAR_RW),
  669. CD(Ser1SDLCTr, 0),
  670. CD(Ser1SDLCRc, DDAR_RW),
  671. CD(Ser1UARTTr, 0),
  672. CD(Ser1UARTRc, DDAR_RW),
  673. CD(Ser2ICPTr, 0),
  674. CD(Ser2ICPRc, DDAR_RW),
  675. CD(Ser3UARTTr, 0),
  676. CD(Ser3UARTRc, DDAR_RW),
  677. CD(Ser4MCP0Tr, 0),
  678. CD(Ser4MCP0Rc, DDAR_RW),
  679. CD(Ser4MCP1Tr, 0),
  680. CD(Ser4MCP1Rc, DDAR_RW),
  681. CD(Ser4SSPTr, 0),
  682. CD(Ser4SSPRc, DDAR_RW),
  683. };
  684. static const struct dma_slave_map sa11x0_dma_map[] = {
  685. { "sa11x0-ir", "tx", "Ser2ICPTr" },
  686. { "sa11x0-ir", "rx", "Ser2ICPRc" },
  687. { "sa11x0-ssp", "tx", "Ser4SSPTr" },
  688. { "sa11x0-ssp", "rx", "Ser4SSPRc" },
  689. };
  690. static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
  691. struct device *dev)
  692. {
  693. unsigned i;
  694. INIT_LIST_HEAD(&dmadev->channels);
  695. dmadev->dev = dev;
  696. dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
  697. dmadev->device_config = sa11x0_dma_device_config;
  698. dmadev->device_pause = sa11x0_dma_device_pause;
  699. dmadev->device_resume = sa11x0_dma_device_resume;
  700. dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
  701. dmadev->device_tx_status = sa11x0_dma_tx_status;
  702. dmadev->device_issue_pending = sa11x0_dma_issue_pending;
  703. for (i = 0; i < ARRAY_SIZE(chan_desc); i++) {
  704. struct sa11x0_dma_chan *c;
  705. c = kzalloc(sizeof(*c), GFP_KERNEL);
  706. if (!c) {
  707. dev_err(dev, "no memory for channel %u\n", i);
  708. return -ENOMEM;
  709. }
  710. c->status = DMA_IN_PROGRESS;
  711. c->ddar = chan_desc[i].ddar;
  712. c->name = chan_desc[i].name;
  713. INIT_LIST_HEAD(&c->node);
  714. c->vc.desc_free = sa11x0_dma_free_desc;
  715. vchan_init(&c->vc, dmadev);
  716. }
  717. return dma_async_device_register(dmadev);
  718. }
  719. static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
  720. void *data)
  721. {
  722. int irq = platform_get_irq(pdev, nr);
  723. if (irq <= 0)
  724. return -ENXIO;
  725. return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
  726. }
  727. static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
  728. void *data)
  729. {
  730. int irq = platform_get_irq(pdev, nr);
  731. if (irq > 0)
  732. free_irq(irq, data);
  733. }
  734. static void sa11x0_dma_free_channels(struct dma_device *dmadev)
  735. {
  736. struct sa11x0_dma_chan *c, *cn;
  737. list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
  738. list_del(&c->vc.chan.device_node);
  739. tasklet_kill(&c->vc.task);
  740. kfree(c);
  741. }
  742. }
  743. static int sa11x0_dma_probe(struct platform_device *pdev)
  744. {
  745. struct sa11x0_dma_dev *d;
  746. struct resource *res;
  747. unsigned i;
  748. int ret;
  749. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  750. if (!res)
  751. return -ENXIO;
  752. d = kzalloc(sizeof(*d), GFP_KERNEL);
  753. if (!d) {
  754. ret = -ENOMEM;
  755. goto err_alloc;
  756. }
  757. spin_lock_init(&d->lock);
  758. INIT_LIST_HEAD(&d->chan_pending);
  759. d->slave.filter.fn = sa11x0_dma_filter_fn;
  760. d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map);
  761. d->slave.filter.map = sa11x0_dma_map;
  762. d->base = ioremap(res->start, resource_size(res));
  763. if (!d->base) {
  764. ret = -ENOMEM;
  765. goto err_ioremap;
  766. }
  767. tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
  768. for (i = 0; i < NR_PHY_CHAN; i++) {
  769. struct sa11x0_dma_phy *p = &d->phy[i];
  770. p->dev = d;
  771. p->num = i;
  772. p->base = d->base + i * DMA_SIZE;
  773. writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
  774. DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
  775. p->base + DMA_DCSR_C);
  776. writel_relaxed(0, p->base + DMA_DDAR);
  777. ret = sa11x0_dma_request_irq(pdev, i, p);
  778. if (ret) {
  779. while (i) {
  780. i--;
  781. sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
  782. }
  783. goto err_irq;
  784. }
  785. }
  786. dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
  787. dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
  788. d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
  789. d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
  790. d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  791. d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  792. d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  793. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
  794. d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  795. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
  796. ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
  797. if (ret) {
  798. dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
  799. ret);
  800. goto err_slave_reg;
  801. }
  802. platform_set_drvdata(pdev, d);
  803. return 0;
  804. err_slave_reg:
  805. sa11x0_dma_free_channels(&d->slave);
  806. for (i = 0; i < NR_PHY_CHAN; i++)
  807. sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
  808. err_irq:
  809. tasklet_kill(&d->task);
  810. iounmap(d->base);
  811. err_ioremap:
  812. kfree(d);
  813. err_alloc:
  814. return ret;
  815. }
  816. static int sa11x0_dma_remove(struct platform_device *pdev)
  817. {
  818. struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
  819. unsigned pch;
  820. dma_async_device_unregister(&d->slave);
  821. sa11x0_dma_free_channels(&d->slave);
  822. for (pch = 0; pch < NR_PHY_CHAN; pch++)
  823. sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
  824. tasklet_kill(&d->task);
  825. iounmap(d->base);
  826. kfree(d);
  827. return 0;
  828. }
  829. static int sa11x0_dma_suspend(struct device *dev)
  830. {
  831. struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
  832. unsigned pch;
  833. for (pch = 0; pch < NR_PHY_CHAN; pch++) {
  834. struct sa11x0_dma_phy *p = &d->phy[pch];
  835. u32 dcsr, saved_dcsr;
  836. dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  837. if (dcsr & DCSR_RUN) {
  838. writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
  839. dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  840. }
  841. saved_dcsr &= DCSR_RUN | DCSR_IE;
  842. if (dcsr & DCSR_BIU) {
  843. p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
  844. p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
  845. p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
  846. p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
  847. saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
  848. (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
  849. } else {
  850. p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
  851. p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
  852. p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
  853. p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
  854. saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
  855. }
  856. p->dcsr = saved_dcsr;
  857. writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
  858. }
  859. return 0;
  860. }
  861. static int sa11x0_dma_resume(struct device *dev)
  862. {
  863. struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
  864. unsigned pch;
  865. for (pch = 0; pch < NR_PHY_CHAN; pch++) {
  866. struct sa11x0_dma_phy *p = &d->phy[pch];
  867. struct sa11x0_dma_desc *txd = NULL;
  868. u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
  869. WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
  870. if (p->txd_done)
  871. txd = p->txd_done;
  872. else if (p->txd_load)
  873. txd = p->txd_load;
  874. if (!txd)
  875. continue;
  876. writel_relaxed(txd->ddar, p->base + DMA_DDAR);
  877. writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
  878. writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
  879. writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
  880. writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
  881. writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
  882. }
  883. return 0;
  884. }
  885. static const struct dev_pm_ops sa11x0_dma_pm_ops = {
  886. .suspend_noirq = sa11x0_dma_suspend,
  887. .resume_noirq = sa11x0_dma_resume,
  888. .freeze_noirq = sa11x0_dma_suspend,
  889. .thaw_noirq = sa11x0_dma_resume,
  890. .poweroff_noirq = sa11x0_dma_suspend,
  891. .restore_noirq = sa11x0_dma_resume,
  892. };
  893. static struct platform_driver sa11x0_dma_driver = {
  894. .driver = {
  895. .name = "sa11x0-dma",
  896. .pm = &sa11x0_dma_pm_ops,
  897. },
  898. .probe = sa11x0_dma_probe,
  899. .remove = sa11x0_dma_remove,
  900. };
  901. bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
  902. {
  903. if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
  904. struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
  905. const char *p = param;
  906. return !strcmp(c->name, p);
  907. }
  908. return false;
  909. }
  910. EXPORT_SYMBOL(sa11x0_dma_filter_fn);
  911. static int __init sa11x0_dma_init(void)
  912. {
  913. return platform_driver_register(&sa11x0_dma_driver);
  914. }
  915. subsys_initcall(sa11x0_dma_init);
  916. static void __exit sa11x0_dma_exit(void)
  917. {
  918. platform_driver_unregister(&sa11x0_dma_driver);
  919. }
  920. module_exit(sa11x0_dma_exit);
  921. MODULE_AUTHOR("Russell King");
  922. MODULE_DESCRIPTION("SA-11x0 DMA driver");
  923. MODULE_LICENSE("GPL v2");
  924. MODULE_ALIAS("platform:sa11x0-dma");