fsl_asrc_dma.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Freescale ASRC ALSA SoC Platform (DMA) driver
  4. //
  5. // Copyright (C) 2014 Freescale Semiconductor, Inc.
  6. //
  7. // Author: Nicolin Chen <nicoleotsuka@gmail.com>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/module.h>
  10. #include <linux/platform_data/dma-imx.h>
  11. #include <sound/dmaengine_pcm.h>
  12. #include <sound/pcm_params.h>
  13. #include "fsl_asrc.h"
  14. #define FSL_ASRC_DMABUF_SIZE (256 * 1024)
  15. static const struct snd_pcm_hardware snd_imx_hardware = {
  16. .info = SNDRV_PCM_INFO_INTERLEAVED |
  17. SNDRV_PCM_INFO_BLOCK_TRANSFER |
  18. SNDRV_PCM_INFO_MMAP |
  19. SNDRV_PCM_INFO_MMAP_VALID |
  20. SNDRV_PCM_INFO_PAUSE |
  21. SNDRV_PCM_INFO_RESUME,
  22. .buffer_bytes_max = FSL_ASRC_DMABUF_SIZE,
  23. .period_bytes_min = 128,
  24. .period_bytes_max = 65535, /* Limited by SDMA engine */
  25. .periods_min = 2,
  26. .periods_max = 255,
  27. .fifo_size = 0,
  28. };
  29. static bool filter(struct dma_chan *chan, void *param)
  30. {
  31. if (!imx_dma_is_general_purpose(chan))
  32. return false;
  33. chan->private = param;
  34. return true;
  35. }
  36. static void fsl_asrc_dma_complete(void *arg)
  37. {
  38. struct snd_pcm_substream *substream = arg;
  39. struct snd_pcm_runtime *runtime = substream->runtime;
  40. struct fsl_asrc_pair *pair = runtime->private_data;
  41. pair->pos += snd_pcm_lib_period_bytes(substream);
  42. if (pair->pos >= snd_pcm_lib_buffer_bytes(substream))
  43. pair->pos = 0;
  44. snd_pcm_period_elapsed(substream);
  45. }
  46. static int fsl_asrc_dma_prepare_and_submit(struct snd_pcm_substream *substream)
  47. {
  48. u8 dir = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? OUT : IN;
  49. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  50. struct snd_pcm_runtime *runtime = substream->runtime;
  51. struct fsl_asrc_pair *pair = runtime->private_data;
  52. struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
  53. struct device *dev = component->dev;
  54. unsigned long flags = DMA_CTRL_ACK;
  55. /* Prepare and submit Front-End DMA channel */
  56. if (!substream->runtime->no_period_wakeup)
  57. flags |= DMA_PREP_INTERRUPT;
  58. pair->pos = 0;
  59. pair->desc[!dir] = dmaengine_prep_dma_cyclic(
  60. pair->dma_chan[!dir], runtime->dma_addr,
  61. snd_pcm_lib_buffer_bytes(substream),
  62. snd_pcm_lib_period_bytes(substream),
  63. dir == OUT ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, flags);
  64. if (!pair->desc[!dir]) {
  65. dev_err(dev, "failed to prepare slave DMA for Front-End\n");
  66. return -ENOMEM;
  67. }
  68. pair->desc[!dir]->callback = fsl_asrc_dma_complete;
  69. pair->desc[!dir]->callback_param = substream;
  70. dmaengine_submit(pair->desc[!dir]);
  71. /* Prepare and submit Back-End DMA channel */
  72. pair->desc[dir] = dmaengine_prep_dma_cyclic(
  73. pair->dma_chan[dir], 0xffff, 64, 64, DMA_DEV_TO_DEV, 0);
  74. if (!pair->desc[dir]) {
  75. dev_err(dev, "failed to prepare slave DMA for Back-End\n");
  76. return -ENOMEM;
  77. }
  78. dmaengine_submit(pair->desc[dir]);
  79. return 0;
  80. }
  81. static int fsl_asrc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
  82. {
  83. struct snd_pcm_runtime *runtime = substream->runtime;
  84. struct fsl_asrc_pair *pair = runtime->private_data;
  85. int ret;
  86. switch (cmd) {
  87. case SNDRV_PCM_TRIGGER_START:
  88. case SNDRV_PCM_TRIGGER_RESUME:
  89. case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
  90. ret = fsl_asrc_dma_prepare_and_submit(substream);
  91. if (ret)
  92. return ret;
  93. dma_async_issue_pending(pair->dma_chan[IN]);
  94. dma_async_issue_pending(pair->dma_chan[OUT]);
  95. break;
  96. case SNDRV_PCM_TRIGGER_STOP:
  97. case SNDRV_PCM_TRIGGER_SUSPEND:
  98. case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
  99. dmaengine_terminate_all(pair->dma_chan[OUT]);
  100. dmaengine_terminate_all(pair->dma_chan[IN]);
  101. break;
  102. default:
  103. return -EINVAL;
  104. }
  105. return 0;
  106. }
  107. static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream,
  108. struct snd_pcm_hw_params *params)
  109. {
  110. enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  111. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  112. bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
  113. struct snd_dmaengine_dai_dma_data *dma_params_fe = NULL;
  114. struct snd_dmaengine_dai_dma_data *dma_params_be = NULL;
  115. struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
  116. struct snd_pcm_runtime *runtime = substream->runtime;
  117. struct fsl_asrc_pair *pair = runtime->private_data;
  118. struct fsl_asrc *asrc_priv = pair->asrc_priv;
  119. struct dma_slave_config config_fe, config_be;
  120. enum asrc_pair_index index = pair->index;
  121. struct device *dev = component->dev;
  122. int stream = substream->stream;
  123. struct imx_dma_data *tmp_data;
  124. struct snd_soc_dpcm *dpcm;
  125. struct dma_chan *tmp_chan;
  126. struct device *dev_be;
  127. u8 dir = tx ? OUT : IN;
  128. dma_cap_mask_t mask;
  129. int ret;
  130. /* Fetch the Back-End dma_data from DPCM */
  131. list_for_each_entry(dpcm, &rtd->dpcm[stream].be_clients, list_be) {
  132. struct snd_soc_pcm_runtime *be = dpcm->be;
  133. struct snd_pcm_substream *substream_be;
  134. struct snd_soc_dai *dai = be->cpu_dai;
  135. if (dpcm->fe != rtd)
  136. continue;
  137. substream_be = snd_soc_dpcm_get_substream(be, stream);
  138. dma_params_be = snd_soc_dai_get_dma_data(dai, substream_be);
  139. dev_be = dai->dev;
  140. break;
  141. }
  142. if (!dma_params_be) {
  143. dev_err(dev, "failed to get the substream of Back-End\n");
  144. return -EINVAL;
  145. }
  146. /* Override dma_data of the Front-End and config its dmaengine */
  147. dma_params_fe = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
  148. dma_params_fe->addr = asrc_priv->paddr + REG_ASRDx(!dir, index);
  149. dma_params_fe->maxburst = dma_params_be->maxburst;
  150. pair->dma_chan[!dir] = fsl_asrc_get_dma_channel(pair, !dir);
  151. if (!pair->dma_chan[!dir]) {
  152. dev_err(dev, "failed to request DMA channel\n");
  153. return -EINVAL;
  154. }
  155. memset(&config_fe, 0, sizeof(config_fe));
  156. ret = snd_dmaengine_pcm_prepare_slave_config(substream, params, &config_fe);
  157. if (ret) {
  158. dev_err(dev, "failed to prepare DMA config for Front-End\n");
  159. return ret;
  160. }
  161. ret = dmaengine_slave_config(pair->dma_chan[!dir], &config_fe);
  162. if (ret) {
  163. dev_err(dev, "failed to config DMA channel for Front-End\n");
  164. return ret;
  165. }
  166. /* Request and config DMA channel for Back-End */
  167. dma_cap_zero(mask);
  168. dma_cap_set(DMA_SLAVE, mask);
  169. dma_cap_set(DMA_CYCLIC, mask);
  170. /* Get DMA request of Back-End */
  171. tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
  172. tmp_data = tmp_chan->private;
  173. pair->dma_data.dma_request = tmp_data->dma_request;
  174. dma_release_channel(tmp_chan);
  175. /* Get DMA request of Front-End */
  176. tmp_chan = fsl_asrc_get_dma_channel(pair, dir);
  177. tmp_data = tmp_chan->private;
  178. pair->dma_data.dma_request2 = tmp_data->dma_request;
  179. pair->dma_data.peripheral_type = tmp_data->peripheral_type;
  180. pair->dma_data.priority = tmp_data->priority;
  181. dma_release_channel(tmp_chan);
  182. pair->dma_chan[dir] = dma_request_channel(mask, filter, &pair->dma_data);
  183. if (!pair->dma_chan[dir]) {
  184. dev_err(dev, "failed to request DMA channel for Back-End\n");
  185. return -EINVAL;
  186. }
  187. if (asrc_priv->asrc_width == 16)
  188. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  189. else
  190. buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
  191. config_be.direction = DMA_DEV_TO_DEV;
  192. config_be.src_addr_width = buswidth;
  193. config_be.src_maxburst = dma_params_be->maxburst;
  194. config_be.dst_addr_width = buswidth;
  195. config_be.dst_maxburst = dma_params_be->maxburst;
  196. if (tx) {
  197. config_be.src_addr = asrc_priv->paddr + REG_ASRDO(index);
  198. config_be.dst_addr = dma_params_be->addr;
  199. } else {
  200. config_be.dst_addr = asrc_priv->paddr + REG_ASRDI(index);
  201. config_be.src_addr = dma_params_be->addr;
  202. }
  203. ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be);
  204. if (ret) {
  205. dev_err(dev, "failed to config DMA channel for Back-End\n");
  206. dma_release_channel(pair->dma_chan[dir]);
  207. return ret;
  208. }
  209. snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
  210. return 0;
  211. }
  212. static int fsl_asrc_dma_hw_free(struct snd_pcm_substream *substream)
  213. {
  214. struct snd_pcm_runtime *runtime = substream->runtime;
  215. struct fsl_asrc_pair *pair = runtime->private_data;
  216. snd_pcm_set_runtime_buffer(substream, NULL);
  217. if (pair->dma_chan[IN])
  218. dma_release_channel(pair->dma_chan[IN]);
  219. if (pair->dma_chan[OUT])
  220. dma_release_channel(pair->dma_chan[OUT]);
  221. pair->dma_chan[IN] = NULL;
  222. pair->dma_chan[OUT] = NULL;
  223. return 0;
  224. }
  225. static int fsl_asrc_dma_startup(struct snd_pcm_substream *substream)
  226. {
  227. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  228. struct snd_pcm_runtime *runtime = substream->runtime;
  229. struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
  230. struct device *dev = component->dev;
  231. struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
  232. struct fsl_asrc_pair *pair;
  233. pair = kzalloc(sizeof(struct fsl_asrc_pair), GFP_KERNEL);
  234. if (!pair)
  235. return -ENOMEM;
  236. pair->asrc_priv = asrc_priv;
  237. runtime->private_data = pair;
  238. snd_pcm_hw_constraint_integer(substream->runtime,
  239. SNDRV_PCM_HW_PARAM_PERIODS);
  240. snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware);
  241. return 0;
  242. }
  243. static int fsl_asrc_dma_shutdown(struct snd_pcm_substream *substream)
  244. {
  245. struct snd_pcm_runtime *runtime = substream->runtime;
  246. struct fsl_asrc_pair *pair = runtime->private_data;
  247. struct fsl_asrc *asrc_priv;
  248. if (!pair)
  249. return 0;
  250. asrc_priv = pair->asrc_priv;
  251. if (asrc_priv->pair[pair->index] == pair)
  252. asrc_priv->pair[pair->index] = NULL;
  253. kfree(pair);
  254. return 0;
  255. }
  256. static snd_pcm_uframes_t fsl_asrc_dma_pcm_pointer(struct snd_pcm_substream *substream)
  257. {
  258. struct snd_pcm_runtime *runtime = substream->runtime;
  259. struct fsl_asrc_pair *pair = runtime->private_data;
  260. return bytes_to_frames(substream->runtime, pair->pos);
  261. }
  262. static const struct snd_pcm_ops fsl_asrc_dma_pcm_ops = {
  263. .ioctl = snd_pcm_lib_ioctl,
  264. .hw_params = fsl_asrc_dma_hw_params,
  265. .hw_free = fsl_asrc_dma_hw_free,
  266. .trigger = fsl_asrc_dma_trigger,
  267. .open = fsl_asrc_dma_startup,
  268. .close = fsl_asrc_dma_shutdown,
  269. .pointer = fsl_asrc_dma_pcm_pointer,
  270. };
  271. static int fsl_asrc_dma_pcm_new(struct snd_soc_pcm_runtime *rtd)
  272. {
  273. struct snd_card *card = rtd->card->snd_card;
  274. struct snd_pcm_substream *substream;
  275. struct snd_pcm *pcm = rtd->pcm;
  276. int ret, i;
  277. ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
  278. if (ret) {
  279. dev_err(card->dev, "failed to set DMA mask\n");
  280. return ret;
  281. }
  282. for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_LAST; i++) {
  283. substream = pcm->streams[i].substream;
  284. if (!substream)
  285. continue;
  286. ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
  287. FSL_ASRC_DMABUF_SIZE, &substream->dma_buffer);
  288. if (ret) {
  289. dev_err(card->dev, "failed to allocate DMA buffer\n");
  290. goto err;
  291. }
  292. }
  293. return 0;
  294. err:
  295. if (--i == 0 && pcm->streams[i].substream)
  296. snd_dma_free_pages(&pcm->streams[i].substream->dma_buffer);
  297. return ret;
  298. }
  299. static void fsl_asrc_dma_pcm_free(struct snd_pcm *pcm)
  300. {
  301. struct snd_pcm_substream *substream;
  302. int i;
  303. for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_LAST; i++) {
  304. substream = pcm->streams[i].substream;
  305. if (!substream)
  306. continue;
  307. snd_dma_free_pages(&substream->dma_buffer);
  308. substream->dma_buffer.area = NULL;
  309. substream->dma_buffer.addr = 0;
  310. }
  311. }
  312. struct snd_soc_component_driver fsl_asrc_component = {
  313. .name = DRV_NAME,
  314. .ops = &fsl_asrc_dma_pcm_ops,
  315. .pcm_new = fsl_asrc_dma_pcm_new,
  316. .pcm_free = fsl_asrc_dma_pcm_free,
  317. };
  318. EXPORT_SYMBOL_GPL(fsl_asrc_component);