dma.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Renesas R-Car Audio DMAC support
  4. //
  5. // Copyright (C) 2015 Renesas Electronics Corp.
  6. // Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
  7. #include <linux/delay.h>
  8. #include <linux/of_dma.h>
  9. #include "rsnd.h"
  10. /*
  11. * Audio DMAC peri peri register
  12. */
  13. #define PDMASAR 0x00
  14. #define PDMADAR 0x04
  15. #define PDMACHCR 0x0c
  16. /* PDMACHCR */
  17. #define PDMACHCR_DE (1 << 0)
  18. struct rsnd_dmaen {
  19. struct dma_chan *chan;
  20. dma_cookie_t cookie;
  21. unsigned int dma_len;
  22. };
  23. struct rsnd_dmapp {
  24. int dmapp_id;
  25. u32 chcr;
  26. };
  27. struct rsnd_dma {
  28. struct rsnd_mod mod;
  29. struct rsnd_mod *mod_from;
  30. struct rsnd_mod *mod_to;
  31. dma_addr_t src_addr;
  32. dma_addr_t dst_addr;
  33. union {
  34. struct rsnd_dmaen en;
  35. struct rsnd_dmapp pp;
  36. } dma;
  37. };
  38. struct rsnd_dma_ctrl {
  39. void __iomem *base;
  40. int dmaen_num;
  41. int dmapp_num;
  42. };
  43. #define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma)
  44. #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod)
  45. #define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en)
  46. #define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp)
  47. /* for DEBUG */
  48. static struct rsnd_mod_ops mem_ops = {
  49. .name = "mem",
  50. };
  51. static struct rsnd_mod mem = {
  52. };
  53. /*
  54. * Audio DMAC
  55. */
  56. static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
  57. struct rsnd_dai_stream *io)
  58. {
  59. if (rsnd_io_is_working(io))
  60. rsnd_dai_period_elapsed(io);
  61. }
  62. static void rsnd_dmaen_complete(void *data)
  63. {
  64. struct rsnd_mod *mod = data;
  65. rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
  66. }
  67. static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
  68. struct rsnd_mod *mod_from,
  69. struct rsnd_mod *mod_to)
  70. {
  71. if ((!mod_from && !mod_to) ||
  72. (mod_from && mod_to))
  73. return NULL;
  74. if (mod_from)
  75. return rsnd_mod_dma_req(io, mod_from);
  76. else
  77. return rsnd_mod_dma_req(io, mod_to);
  78. }
  79. static int rsnd_dmaen_stop(struct rsnd_mod *mod,
  80. struct rsnd_dai_stream *io,
  81. struct rsnd_priv *priv)
  82. {
  83. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  84. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  85. if (dmaen->chan)
  86. dmaengine_terminate_all(dmaen->chan);
  87. return 0;
  88. }
  89. static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
  90. struct rsnd_dai_stream *io,
  91. struct rsnd_priv *priv)
  92. {
  93. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  94. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  95. /*
  96. * DMAEngine release uses mutex lock.
  97. * Thus, it shouldn't be called under spinlock.
  98. * Let's call it under nolock_start
  99. */
  100. if (dmaen->chan)
  101. dma_release_channel(dmaen->chan);
  102. dmaen->chan = NULL;
  103. return 0;
  104. }
  105. static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod,
  106. struct rsnd_dai_stream *io,
  107. struct rsnd_priv *priv)
  108. {
  109. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  110. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  111. struct device *dev = rsnd_priv_to_dev(priv);
  112. if (dmaen->chan) {
  113. dev_err(dev, "it already has dma channel\n");
  114. return -EIO;
  115. }
  116. /*
  117. * DMAEngine request uses mutex lock.
  118. * Thus, it shouldn't be called under spinlock.
  119. * Let's call it under nolock_start
  120. */
  121. dmaen->chan = rsnd_dmaen_request_channel(io,
  122. dma->mod_from,
  123. dma->mod_to);
  124. if (IS_ERR_OR_NULL(dmaen->chan)) {
  125. dmaen->chan = NULL;
  126. dev_err(dev, "can't get dma channel\n");
  127. return -EIO;
  128. }
  129. return 0;
  130. }
  131. static int rsnd_dmaen_start(struct rsnd_mod *mod,
  132. struct rsnd_dai_stream *io,
  133. struct rsnd_priv *priv)
  134. {
  135. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  136. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  137. struct snd_pcm_substream *substream = io->substream;
  138. struct device *dev = rsnd_priv_to_dev(priv);
  139. struct dma_async_tx_descriptor *desc;
  140. struct dma_slave_config cfg = {};
  141. int is_play = rsnd_io_is_play(io);
  142. int ret;
  143. cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
  144. cfg.src_addr = dma->src_addr;
  145. cfg.dst_addr = dma->dst_addr;
  146. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  147. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  148. dev_dbg(dev, "%s[%d] %pad -> %pad\n",
  149. rsnd_mod_name(mod), rsnd_mod_id(mod),
  150. &cfg.src_addr, &cfg.dst_addr);
  151. ret = dmaengine_slave_config(dmaen->chan, &cfg);
  152. if (ret < 0)
  153. return ret;
  154. desc = dmaengine_prep_dma_cyclic(dmaen->chan,
  155. substream->runtime->dma_addr,
  156. snd_pcm_lib_buffer_bytes(substream),
  157. snd_pcm_lib_period_bytes(substream),
  158. is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
  159. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  160. if (!desc) {
  161. dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
  162. return -EIO;
  163. }
  164. desc->callback = rsnd_dmaen_complete;
  165. desc->callback_param = rsnd_mod_get(dma);
  166. dmaen->dma_len = snd_pcm_lib_buffer_bytes(substream);
  167. dmaen->cookie = dmaengine_submit(desc);
  168. if (dmaen->cookie < 0) {
  169. dev_err(dev, "dmaengine_submit() fail\n");
  170. return -EIO;
  171. }
  172. dma_async_issue_pending(dmaen->chan);
  173. return 0;
  174. }
  175. struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
  176. struct rsnd_mod *mod, char *name)
  177. {
  178. struct dma_chan *chan = NULL;
  179. struct device_node *np;
  180. int i = 0;
  181. for_each_child_of_node(of_node, np) {
  182. if (i == rsnd_mod_id(mod) && (!chan))
  183. chan = of_dma_request_slave_channel(np, name);
  184. i++;
  185. }
  186. /* It should call of_node_put(), since, it is rsnd_xxx_of_node() */
  187. of_node_put(of_node);
  188. return chan;
  189. }
  190. static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
  191. struct rsnd_dma *dma,
  192. struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
  193. {
  194. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  195. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  196. struct dma_chan *chan;
  197. /* try to get DMAEngine channel */
  198. chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
  199. if (IS_ERR_OR_NULL(chan)) {
  200. /* Let's follow when -EPROBE_DEFER case */
  201. if (PTR_ERR(chan) == -EPROBE_DEFER)
  202. return PTR_ERR(chan);
  203. /*
  204. * DMA failed. try to PIO mode
  205. * see
  206. * rsnd_ssi_fallback()
  207. * rsnd_rdai_continuance_probe()
  208. */
  209. return -EAGAIN;
  210. }
  211. /*
  212. * use it for IPMMU if needed
  213. * see
  214. * rsnd_preallocate_pages()
  215. */
  216. io->dmac_dev = chan->device->dev;
  217. dma_release_channel(chan);
  218. dmac->dmaen_num++;
  219. return 0;
  220. }
  221. static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
  222. struct rsnd_dai_stream *io,
  223. snd_pcm_uframes_t *pointer)
  224. {
  225. struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
  226. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  227. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  228. struct dma_tx_state state;
  229. enum dma_status status;
  230. unsigned int pos = 0;
  231. status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state);
  232. if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
  233. if (state.residue > 0 && state.residue <= dmaen->dma_len)
  234. pos = dmaen->dma_len - state.residue;
  235. }
  236. *pointer = bytes_to_frames(runtime, pos);
  237. return 0;
  238. }
  239. static struct rsnd_mod_ops rsnd_dmaen_ops = {
  240. .name = "audmac",
  241. .nolock_start = rsnd_dmaen_nolock_start,
  242. .nolock_stop = rsnd_dmaen_nolock_stop,
  243. .start = rsnd_dmaen_start,
  244. .stop = rsnd_dmaen_stop,
  245. .pointer= rsnd_dmaen_pointer,
  246. };
  247. /*
  248. * Audio DMAC peri peri
  249. */
  250. static const u8 gen2_id_table_ssiu[] = {
  251. 0x00, /* SSI00 */
  252. 0x04, /* SSI10 */
  253. 0x08, /* SSI20 */
  254. 0x0c, /* SSI3 */
  255. 0x0d, /* SSI4 */
  256. 0x0e, /* SSI5 */
  257. 0x0f, /* SSI6 */
  258. 0x10, /* SSI7 */
  259. 0x11, /* SSI8 */
  260. 0x12, /* SSI90 */
  261. };
  262. static const u8 gen2_id_table_scu[] = {
  263. 0x2d, /* SCU_SRCI0 */
  264. 0x2e, /* SCU_SRCI1 */
  265. 0x2f, /* SCU_SRCI2 */
  266. 0x30, /* SCU_SRCI3 */
  267. 0x31, /* SCU_SRCI4 */
  268. 0x32, /* SCU_SRCI5 */
  269. 0x33, /* SCU_SRCI6 */
  270. 0x34, /* SCU_SRCI7 */
  271. 0x35, /* SCU_SRCI8 */
  272. 0x36, /* SCU_SRCI9 */
  273. };
  274. static const u8 gen2_id_table_cmd[] = {
  275. 0x37, /* SCU_CMD0 */
  276. 0x38, /* SCU_CMD1 */
  277. };
  278. static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
  279. struct rsnd_mod *mod)
  280. {
  281. struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
  282. struct rsnd_mod *src = rsnd_io_to_mod_src(io);
  283. struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
  284. const u8 *entry = NULL;
  285. int id = rsnd_mod_id(mod);
  286. int size = 0;
  287. if (mod == ssi) {
  288. entry = gen2_id_table_ssiu;
  289. size = ARRAY_SIZE(gen2_id_table_ssiu);
  290. } else if (mod == src) {
  291. entry = gen2_id_table_scu;
  292. size = ARRAY_SIZE(gen2_id_table_scu);
  293. } else if (mod == dvc) {
  294. entry = gen2_id_table_cmd;
  295. size = ARRAY_SIZE(gen2_id_table_cmd);
  296. }
  297. if ((!entry) || (size <= id)) {
  298. struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
  299. dev_err(dev, "unknown connection (%s[%d])\n",
  300. rsnd_mod_name(mod), rsnd_mod_id(mod));
  301. /* use non-prohibited SRS number as error */
  302. return 0x00; /* SSI00 */
  303. }
  304. return entry[id];
  305. }
  306. static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
  307. struct rsnd_mod *mod_from,
  308. struct rsnd_mod *mod_to)
  309. {
  310. return (rsnd_dmapp_get_id(io, mod_from) << 24) +
  311. (rsnd_dmapp_get_id(io, mod_to) << 16);
  312. }
  313. #define rsnd_dmapp_addr(dmac, dma, reg) \
  314. (dmac->base + 0x20 + reg + \
  315. (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
  316. static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
  317. {
  318. struct rsnd_mod *mod = rsnd_mod_get(dma);
  319. struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
  320. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  321. struct device *dev = rsnd_priv_to_dev(priv);
  322. dev_dbg(dev, "w %p : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
  323. iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
  324. }
  325. static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
  326. {
  327. struct rsnd_mod *mod = rsnd_mod_get(dma);
  328. struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
  329. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  330. return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
  331. }
  332. static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
  333. {
  334. struct rsnd_mod *mod = rsnd_mod_get(dma);
  335. struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
  336. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  337. void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
  338. u32 val = ioread32(addr);
  339. val &= ~mask;
  340. val |= (data & mask);
  341. iowrite32(val, addr);
  342. }
  343. static int rsnd_dmapp_stop(struct rsnd_mod *mod,
  344. struct rsnd_dai_stream *io,
  345. struct rsnd_priv *priv)
  346. {
  347. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  348. int i;
  349. rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
  350. for (i = 0; i < 1024; i++) {
  351. if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
  352. return 0;
  353. udelay(1);
  354. }
  355. return -EIO;
  356. }
  357. static int rsnd_dmapp_start(struct rsnd_mod *mod,
  358. struct rsnd_dai_stream *io,
  359. struct rsnd_priv *priv)
  360. {
  361. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  362. struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
  363. rsnd_dmapp_write(dma, dma->src_addr, PDMASAR);
  364. rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR);
  365. rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR);
  366. return 0;
  367. }
  368. static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
  369. struct rsnd_dma *dma,
  370. struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
  371. {
  372. struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
  373. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  374. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  375. struct device *dev = rsnd_priv_to_dev(priv);
  376. dmapp->dmapp_id = dmac->dmapp_num;
  377. dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE;
  378. dmac->dmapp_num++;
  379. dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
  380. dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
  381. return 0;
  382. }
  383. static struct rsnd_mod_ops rsnd_dmapp_ops = {
  384. .name = "audmac-pp",
  385. .start = rsnd_dmapp_start,
  386. .stop = rsnd_dmapp_stop,
  387. .quit = rsnd_dmapp_stop,
  388. };
  389. /*
  390. * Common DMAC Interface
  391. */
  392. /*
  393. * DMA read/write register offset
  394. *
  395. * RSND_xxx_I_N for Audio DMAC input
  396. * RSND_xxx_O_N for Audio DMAC output
  397. * RSND_xxx_I_P for Audio DMAC peri peri input
  398. * RSND_xxx_O_P for Audio DMAC peri peri output
  399. *
  400. * ex) R-Car H2 case
  401. * mod / DMAC in / DMAC out / DMAC PP in / DMAC pp out
  402. * SSI : 0xec541000 / 0xec241008 / 0xec24100c
  403. * SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000
  404. * SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000
  405. * CMD : 0xec500000 / / 0xec008000 0xec308000
  406. */
  407. #define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
  408. #define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
  409. #define RDMA_SSIU_I_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
  410. #define RDMA_SSIU_O_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
  411. #define RDMA_SSIU_I_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
  412. #define RDMA_SSIU_O_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
  413. #define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
  414. #define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
  415. #define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i))
  416. #define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i))
  417. #define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i))
  418. #define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i))
  419. static dma_addr_t
  420. rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
  421. struct rsnd_mod *mod,
  422. int is_play, int is_from)
  423. {
  424. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  425. struct device *dev = rsnd_priv_to_dev(priv);
  426. phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
  427. phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
  428. int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod);
  429. int use_src = !!rsnd_io_to_mod_src(io);
  430. int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
  431. !!rsnd_io_to_mod_mix(io) ||
  432. !!rsnd_io_to_mod_ctu(io);
  433. int id = rsnd_mod_id(mod);
  434. struct dma_addr {
  435. dma_addr_t out_addr;
  436. dma_addr_t in_addr;
  437. } dma_addrs[3][2][3] = {
  438. /* SRC */
  439. /* Capture */
  440. {{{ 0, 0 },
  441. { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) },
  442. { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } },
  443. /* Playback */
  444. {{ 0, 0, },
  445. { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) },
  446. { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } }
  447. },
  448. /* SSI */
  449. /* Capture */
  450. {{{ RDMA_SSI_O_N(ssi, id), 0 },
  451. { RDMA_SSIU_O_P(ssi, id), 0 },
  452. { RDMA_SSIU_O_P(ssi, id), 0 } },
  453. /* Playback */
  454. {{ 0, RDMA_SSI_I_N(ssi, id) },
  455. { 0, RDMA_SSIU_I_P(ssi, id) },
  456. { 0, RDMA_SSIU_I_P(ssi, id) } }
  457. },
  458. /* SSIU */
  459. /* Capture */
  460. {{{ RDMA_SSIU_O_N(ssi, id), 0 },
  461. { RDMA_SSIU_O_P(ssi, id), 0 },
  462. { RDMA_SSIU_O_P(ssi, id), 0 } },
  463. /* Playback */
  464. {{ 0, RDMA_SSIU_I_N(ssi, id) },
  465. { 0, RDMA_SSIU_I_P(ssi, id) },
  466. { 0, RDMA_SSIU_I_P(ssi, id) } } },
  467. };
  468. /* it shouldn't happen */
  469. if (use_cmd && !use_src)
  470. dev_err(dev, "DVC is selected without SRC\n");
  471. /* use SSIU or SSI ? */
  472. if (is_ssi && rsnd_ssi_use_busif(io))
  473. is_ssi++;
  474. return (is_from) ?
  475. dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
  476. dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
  477. }
  478. static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
  479. struct rsnd_mod *mod,
  480. int is_play, int is_from)
  481. {
  482. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  483. /*
  484. * gen1 uses default DMA addr
  485. */
  486. if (rsnd_is_gen1(priv))
  487. return 0;
  488. if (!mod)
  489. return 0;
  490. return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
  491. }
  492. #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
  493. static void rsnd_dma_of_path(struct rsnd_mod *this,
  494. struct rsnd_dai_stream *io,
  495. int is_play,
  496. struct rsnd_mod **mod_from,
  497. struct rsnd_mod **mod_to)
  498. {
  499. struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
  500. struct rsnd_mod *src = rsnd_io_to_mod_src(io);
  501. struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
  502. struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
  503. struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
  504. struct rsnd_mod *mod[MOD_MAX];
  505. struct rsnd_mod *mod_start, *mod_end;
  506. struct rsnd_priv *priv = rsnd_mod_to_priv(this);
  507. struct device *dev = rsnd_priv_to_dev(priv);
  508. int nr, i, idx;
  509. if (!ssi)
  510. return;
  511. nr = 0;
  512. for (i = 0; i < MOD_MAX; i++) {
  513. mod[i] = NULL;
  514. nr += !!rsnd_io_to_mod(io, i);
  515. }
  516. /*
  517. * [S] -*-> [E]
  518. * [S] -*-> SRC -o-> [E]
  519. * [S] -*-> SRC -> DVC -o-> [E]
  520. * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
  521. *
  522. * playback [S] = mem
  523. * [E] = SSI
  524. *
  525. * capture [S] = SSI
  526. * [E] = mem
  527. *
  528. * -*-> Audio DMAC
  529. * -o-> Audio DMAC peri peri
  530. */
  531. mod_start = (is_play) ? NULL : ssi;
  532. mod_end = (is_play) ? ssi : NULL;
  533. idx = 0;
  534. mod[idx++] = mod_start;
  535. for (i = 1; i < nr; i++) {
  536. if (src) {
  537. mod[idx++] = src;
  538. src = NULL;
  539. } else if (ctu) {
  540. mod[idx++] = ctu;
  541. ctu = NULL;
  542. } else if (mix) {
  543. mod[idx++] = mix;
  544. mix = NULL;
  545. } else if (dvc) {
  546. mod[idx++] = dvc;
  547. dvc = NULL;
  548. }
  549. }
  550. mod[idx] = mod_end;
  551. /*
  552. * | SSI | SRC |
  553. * -------------+-----+-----+
  554. * is_play | o | * |
  555. * !is_play | * | o |
  556. */
  557. if ((this == ssi) == (is_play)) {
  558. *mod_from = mod[idx - 1];
  559. *mod_to = mod[idx];
  560. } else {
  561. *mod_from = mod[0];
  562. *mod_to = mod[1];
  563. }
  564. dev_dbg(dev, "module connection (this is %s[%d])\n",
  565. rsnd_mod_name(this), rsnd_mod_id(this));
  566. for (i = 0; i <= idx; i++) {
  567. dev_dbg(dev, " %s[%d]%s\n",
  568. rsnd_mod_name(mod[i] ? mod[i] : &mem),
  569. rsnd_mod_id (mod[i] ? mod[i] : &mem),
  570. (mod[i] == *mod_from) ? " from" :
  571. (mod[i] == *mod_to) ? " to" : "");
  572. }
  573. }
  574. static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
  575. struct rsnd_mod **dma_mod)
  576. {
  577. struct rsnd_mod *mod_from = NULL;
  578. struct rsnd_mod *mod_to = NULL;
  579. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  580. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  581. struct device *dev = rsnd_priv_to_dev(priv);
  582. struct rsnd_dma *dma;
  583. struct rsnd_mod_ops *ops;
  584. enum rsnd_mod_type type;
  585. int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
  586. struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
  587. int is_play = rsnd_io_is_play(io);
  588. int ret, dma_id;
  589. /*
  590. * DMA failed. try to PIO mode
  591. * see
  592. * rsnd_ssi_fallback()
  593. * rsnd_rdai_continuance_probe()
  594. */
  595. if (!dmac)
  596. return -EAGAIN;
  597. rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
  598. /* for Gen2 or later */
  599. if (mod_from && mod_to) {
  600. ops = &rsnd_dmapp_ops;
  601. attach = rsnd_dmapp_attach;
  602. dma_id = dmac->dmapp_num;
  603. type = RSND_MOD_AUDMAPP;
  604. } else {
  605. ops = &rsnd_dmaen_ops;
  606. attach = rsnd_dmaen_attach;
  607. dma_id = dmac->dmaen_num;
  608. type = RSND_MOD_AUDMA;
  609. }
  610. /* for Gen1, overwrite */
  611. if (rsnd_is_gen1(priv)) {
  612. ops = &rsnd_dmaen_ops;
  613. attach = rsnd_dmaen_attach;
  614. dma_id = dmac->dmaen_num;
  615. type = RSND_MOD_AUDMA;
  616. }
  617. dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
  618. if (!dma)
  619. return -ENOMEM;
  620. *dma_mod = rsnd_mod_get(dma);
  621. ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
  622. rsnd_mod_get_status, type, dma_id);
  623. if (ret < 0)
  624. return ret;
  625. dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n",
  626. rsnd_mod_name(*dma_mod), rsnd_mod_id(*dma_mod),
  627. rsnd_mod_name(mod_from ? mod_from : &mem),
  628. rsnd_mod_id (mod_from ? mod_from : &mem),
  629. rsnd_mod_name(mod_to ? mod_to : &mem),
  630. rsnd_mod_id (mod_to ? mod_to : &mem));
  631. ret = attach(io, dma, mod_from, mod_to);
  632. if (ret < 0)
  633. return ret;
  634. dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
  635. dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
  636. dma->mod_from = mod_from;
  637. dma->mod_to = mod_to;
  638. return 0;
  639. }
  640. int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
  641. struct rsnd_mod **dma_mod)
  642. {
  643. if (!(*dma_mod)) {
  644. int ret = rsnd_dma_alloc(io, mod, dma_mod);
  645. if (ret < 0)
  646. return ret;
  647. }
  648. return rsnd_dai_connect(*dma_mod, io, (*dma_mod)->type);
  649. }
  650. int rsnd_dma_probe(struct rsnd_priv *priv)
  651. {
  652. struct platform_device *pdev = rsnd_priv_to_pdev(priv);
  653. struct device *dev = rsnd_priv_to_dev(priv);
  654. struct rsnd_dma_ctrl *dmac;
  655. struct resource *res;
  656. /*
  657. * for Gen1
  658. */
  659. if (rsnd_is_gen1(priv))
  660. return 0;
  661. /*
  662. * for Gen2 or later
  663. */
  664. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
  665. dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
  666. if (!dmac || !res) {
  667. dev_err(dev, "dma allocate failed\n");
  668. return 0; /* it will be PIO mode */
  669. }
  670. dmac->dmapp_num = 0;
  671. dmac->base = devm_ioremap_resource(dev, res);
  672. if (IS_ERR(dmac->base))
  673. return PTR_ERR(dmac->base);
  674. priv->dma = dmac;
  675. /* dummy mem mod for debug */
  676. return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, NULL, 0, 0);
  677. }