mtk-afe-fe-dai.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * mtk-afe-fe-dais.c -- Mediatek afe fe dai operator
  4. *
  5. * Copyright (c) 2016 MediaTek Inc.
  6. * Author: Garlic Tseng <garlic.tseng@mediatek.com>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/pm_runtime.h>
  10. #include <linux/regmap.h>
  11. #include <sound/soc.h>
  12. #include "mtk-afe-platform-driver.h"
  13. #include "mtk-afe-fe-dai.h"
  14. #include "mtk-base-afe.h"
  15. #define AFE_BASE_END_OFFSET 8
  16. static int mtk_regmap_update_bits(struct regmap *map, int reg,
  17. unsigned int mask,
  18. unsigned int val)
  19. {
  20. if (reg < 0)
  21. return 0;
  22. return regmap_update_bits(map, reg, mask, val);
  23. }
  24. static int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
  25. {
  26. if (reg < 0)
  27. return 0;
  28. return regmap_write(map, reg, val);
  29. }
  30. int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
  31. struct snd_soc_dai *dai)
  32. {
  33. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  34. struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
  35. struct snd_pcm_runtime *runtime = substream->runtime;
  36. int memif_num = rtd->cpu_dai->id;
  37. struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
  38. const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
  39. int ret;
  40. memif->substream = substream;
  41. snd_pcm_hw_constraint_step(substream->runtime, 0,
  42. SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
  43. /* enable agent */
  44. mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
  45. 1 << memif->data->agent_disable_shift,
  46. 0 << memif->data->agent_disable_shift);
  47. snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware);
  48. /*
  49. * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
  50. * smaller than period_size due to AFE's internal buffer.
  51. * This easily leads to overrun when avail_min is period_size.
  52. * One more period can hold the possible unread buffer.
  53. */
  54. if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
  55. int periods_max = mtk_afe_hardware->periods_max;
  56. ret = snd_pcm_hw_constraint_minmax(runtime,
  57. SNDRV_PCM_HW_PARAM_PERIODS,
  58. 3, periods_max);
  59. if (ret < 0) {
  60. dev_err(afe->dev, "hw_constraint_minmax failed\n");
  61. return ret;
  62. }
  63. }
  64. ret = snd_pcm_hw_constraint_integer(runtime,
  65. SNDRV_PCM_HW_PARAM_PERIODS);
  66. if (ret < 0)
  67. dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
  68. /* dynamic allocate irq to memif */
  69. if (memif->irq_usage < 0) {
  70. int irq_id = mtk_dynamic_irq_acquire(afe);
  71. if (irq_id != afe->irqs_size) {
  72. /* link */
  73. memif->irq_usage = irq_id;
  74. } else {
  75. dev_err(afe->dev, "%s() error: no more asys irq\n",
  76. __func__);
  77. ret = -EBUSY;
  78. }
  79. }
  80. return ret;
  81. }
  82. EXPORT_SYMBOL_GPL(mtk_afe_fe_startup);
  83. void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
  84. struct snd_soc_dai *dai)
  85. {
  86. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  87. struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
  88. struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
  89. int irq_id;
  90. irq_id = memif->irq_usage;
  91. mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
  92. 1 << memif->data->agent_disable_shift,
  93. 1 << memif->data->agent_disable_shift);
  94. if (!memif->const_irq) {
  95. mtk_dynamic_irq_release(afe, irq_id);
  96. memif->irq_usage = -1;
  97. memif->substream = NULL;
  98. }
  99. }
  100. EXPORT_SYMBOL_GPL(mtk_afe_fe_shutdown);
  101. int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
  102. struct snd_pcm_hw_params *params,
  103. struct snd_soc_dai *dai)
  104. {
  105. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  106. struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
  107. struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
  108. int msb_at_bit33 = 0;
  109. int ret, fs = 0;
  110. ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
  111. if (ret < 0)
  112. return ret;
  113. msb_at_bit33 = upper_32_bits(substream->runtime->dma_addr) ? 1 : 0;
  114. memif->phys_buf_addr = lower_32_bits(substream->runtime->dma_addr);
  115. memif->buffer_size = substream->runtime->dma_bytes;
  116. /* start */
  117. mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
  118. memif->phys_buf_addr);
  119. /* end */
  120. mtk_regmap_write(afe->regmap,
  121. memif->data->reg_ofs_base + AFE_BASE_END_OFFSET,
  122. memif->phys_buf_addr + memif->buffer_size - 1);
  123. /* set MSB to 33-bit */
  124. mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
  125. 1 << memif->data->msb_shift,
  126. msb_at_bit33 << memif->data->msb_shift);
  127. /* set channel */
  128. if (memif->data->mono_shift >= 0) {
  129. unsigned int mono = (params_channels(params) == 1) ? 1 : 0;
  130. mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
  131. 1 << memif->data->mono_shift,
  132. mono << memif->data->mono_shift);
  133. }
  134. /* set rate */
  135. if (memif->data->fs_shift < 0)
  136. return 0;
  137. fs = afe->memif_fs(substream, params_rate(params));
  138. if (fs < 0)
  139. return -EINVAL;
  140. mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
  141. memif->data->fs_maskbit << memif->data->fs_shift,
  142. fs << memif->data->fs_shift);
  143. return 0;
  144. }
  145. EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_params);
  146. int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
  147. struct snd_soc_dai *dai)
  148. {
  149. return snd_pcm_lib_free_pages(substream);
  150. }
  151. EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_free);
  152. int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
  153. struct snd_soc_dai *dai)
  154. {
  155. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  156. struct snd_pcm_runtime * const runtime = substream->runtime;
  157. struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
  158. struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
  159. struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
  160. const struct mtk_base_irq_data *irq_data = irqs->irq_data;
  161. unsigned int counter = runtime->period_size;
  162. int fs;
  163. dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
  164. switch (cmd) {
  165. case SNDRV_PCM_TRIGGER_START:
  166. case SNDRV_PCM_TRIGGER_RESUME:
  167. if (memif->data->enable_shift >= 0)
  168. mtk_regmap_update_bits(afe->regmap,
  169. memif->data->enable_reg,
  170. 1 << memif->data->enable_shift,
  171. 1 << memif->data->enable_shift);
  172. /* set irq counter */
  173. mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
  174. irq_data->irq_cnt_maskbit
  175. << irq_data->irq_cnt_shift,
  176. counter << irq_data->irq_cnt_shift);
  177. /* set irq fs */
  178. fs = afe->irq_fs(substream, runtime->rate);
  179. if (fs < 0)
  180. return -EINVAL;
  181. mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
  182. irq_data->irq_fs_maskbit
  183. << irq_data->irq_fs_shift,
  184. fs << irq_data->irq_fs_shift);
  185. /* enable interrupt */
  186. mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
  187. 1 << irq_data->irq_en_shift,
  188. 1 << irq_data->irq_en_shift);
  189. return 0;
  190. case SNDRV_PCM_TRIGGER_STOP:
  191. case SNDRV_PCM_TRIGGER_SUSPEND:
  192. mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
  193. 1 << memif->data->enable_shift, 0);
  194. /* disable interrupt */
  195. mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
  196. 1 << irq_data->irq_en_shift,
  197. 0 << irq_data->irq_en_shift);
  198. /* and clear pending IRQ */
  199. mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
  200. 1 << irq_data->irq_clr_shift);
  201. return 0;
  202. default:
  203. return -EINVAL;
  204. }
  205. }
  206. EXPORT_SYMBOL_GPL(mtk_afe_fe_trigger);
  207. int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
  208. struct snd_soc_dai *dai)
  209. {
  210. struct snd_soc_pcm_runtime *rtd = substream->private_data;
  211. struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
  212. struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
  213. int hd_audio = 0;
  214. /* set hd mode */
  215. switch (substream->runtime->format) {
  216. case SNDRV_PCM_FORMAT_S16_LE:
  217. hd_audio = 0;
  218. break;
  219. case SNDRV_PCM_FORMAT_S32_LE:
  220. hd_audio = 1;
  221. break;
  222. case SNDRV_PCM_FORMAT_S24_LE:
  223. hd_audio = 1;
  224. break;
  225. default:
  226. dev_err(afe->dev, "%s() error: unsupported format %d\n",
  227. __func__, substream->runtime->format);
  228. break;
  229. }
  230. mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
  231. 1 << memif->data->hd_shift,
  232. hd_audio << memif->data->hd_shift);
  233. return 0;
  234. }
  235. EXPORT_SYMBOL_GPL(mtk_afe_fe_prepare);
  236. const struct snd_soc_dai_ops mtk_afe_fe_ops = {
  237. .startup = mtk_afe_fe_startup,
  238. .shutdown = mtk_afe_fe_shutdown,
  239. .hw_params = mtk_afe_fe_hw_params,
  240. .hw_free = mtk_afe_fe_hw_free,
  241. .prepare = mtk_afe_fe_prepare,
  242. .trigger = mtk_afe_fe_trigger,
  243. };
  244. EXPORT_SYMBOL_GPL(mtk_afe_fe_ops);
  245. static DEFINE_MUTEX(irqs_lock);
  246. int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe)
  247. {
  248. int i;
  249. mutex_lock(&afe->irq_alloc_lock);
  250. for (i = 0; i < afe->irqs_size; ++i) {
  251. if (afe->irqs[i].irq_occupyed == 0) {
  252. afe->irqs[i].irq_occupyed = 1;
  253. mutex_unlock(&afe->irq_alloc_lock);
  254. return i;
  255. }
  256. }
  257. mutex_unlock(&afe->irq_alloc_lock);
  258. return afe->irqs_size;
  259. }
  260. EXPORT_SYMBOL_GPL(mtk_dynamic_irq_acquire);
  261. int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id)
  262. {
  263. mutex_lock(&afe->irq_alloc_lock);
  264. if (irq_id >= 0 && irq_id < afe->irqs_size) {
  265. afe->irqs[irq_id].irq_occupyed = 0;
  266. mutex_unlock(&afe->irq_alloc_lock);
  267. return 0;
  268. }
  269. mutex_unlock(&afe->irq_alloc_lock);
  270. return -EINVAL;
  271. }
  272. EXPORT_SYMBOL_GPL(mtk_dynamic_irq_release);
  273. int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
  274. {
  275. struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
  276. struct device *dev = afe->dev;
  277. struct regmap *regmap = afe->regmap;
  278. int i;
  279. if (pm_runtime_status_suspended(dev) || afe->suspended)
  280. return 0;
  281. if (!afe->reg_back_up)
  282. afe->reg_back_up =
  283. devm_kcalloc(dev, afe->reg_back_up_list_num,
  284. sizeof(unsigned int), GFP_KERNEL);
  285. for (i = 0; i < afe->reg_back_up_list_num; i++)
  286. regmap_read(regmap, afe->reg_back_up_list[i],
  287. &afe->reg_back_up[i]);
  288. afe->suspended = true;
  289. afe->runtime_suspend(dev);
  290. return 0;
  291. }
  292. EXPORT_SYMBOL_GPL(mtk_afe_dai_suspend);
  293. int mtk_afe_dai_resume(struct snd_soc_dai *dai)
  294. {
  295. struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
  296. struct device *dev = afe->dev;
  297. struct regmap *regmap = afe->regmap;
  298. int i = 0;
  299. if (pm_runtime_status_suspended(dev) || !afe->suspended)
  300. return 0;
  301. afe->runtime_resume(dev);
  302. if (!afe->reg_back_up)
  303. dev_dbg(dev, "%s no reg_backup\n", __func__);
  304. for (i = 0; i < afe->reg_back_up_list_num; i++)
  305. mtk_regmap_write(regmap, afe->reg_back_up_list[i],
  306. afe->reg_back_up[i]);
  307. afe->suspended = false;
  308. return 0;
  309. }
  310. EXPORT_SYMBOL_GPL(mtk_afe_dai_resume);
  311. MODULE_DESCRIPTION("Mediatek simple fe dai operator");
  312. MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
  313. MODULE_LICENSE("GPL v2");