lpass-cpu.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
  4. *
  5. * lpass-cpu.c -- ALSA SoC CPU DAI driver for QTi LPASS
  6. */
  7. #include <dt-bindings/sound/qcom,lpass.h>
  8. #include <linux/clk.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/of.h>
  12. #include <linux/platform_device.h>
  13. #include <sound/pcm.h>
  14. #include <sound/pcm_params.h>
  15. #include <linux/regmap.h>
  16. #include <sound/soc.h>
  17. #include <sound/soc-dai.h>
  18. #include "lpass-lpaif-reg.h"
  19. #include "lpass.h"
  20. #define LPASS_CPU_MAX_MI2S_LINES 4
  21. #define LPASS_CPU_I2S_SD0_MASK BIT(0)
  22. #define LPASS_CPU_I2S_SD1_MASK BIT(1)
  23. #define LPASS_CPU_I2S_SD2_MASK BIT(2)
  24. #define LPASS_CPU_I2S_SD3_MASK BIT(3)
  25. #define LPASS_CPU_I2S_SD0_1_MASK GENMASK(1, 0)
  26. #define LPASS_CPU_I2S_SD2_3_MASK GENMASK(3, 2)
  27. #define LPASS_CPU_I2S_SD0_1_2_MASK GENMASK(2, 0)
  28. #define LPASS_CPU_I2S_SD0_1_2_3_MASK GENMASK(3, 0)
  29. #define LPASS_REG_READ 1
  30. #define LPASS_REG_WRITE 0
  31. /*
  32. * Channel maps for Quad channel playbacks on MI2S Secondary
  33. */
  34. static struct snd_pcm_chmap_elem lpass_quad_chmaps[] = {
  35. { .channels = 4,
  36. .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_RL,
  37. SNDRV_CHMAP_FR, SNDRV_CHMAP_RR } },
  38. { }
  39. };
  40. static int lpass_cpu_init_i2sctl_bitfields(struct device *dev,
  41. struct lpaif_i2sctl *i2sctl, struct regmap *map)
  42. {
  43. struct lpass_data *drvdata = dev_get_drvdata(dev);
  44. const struct lpass_variant *v = drvdata->variant;
  45. i2sctl->loopback = devm_regmap_field_alloc(dev, map, v->loopback);
  46. i2sctl->spken = devm_regmap_field_alloc(dev, map, v->spken);
  47. i2sctl->spkmode = devm_regmap_field_alloc(dev, map, v->spkmode);
  48. i2sctl->spkmono = devm_regmap_field_alloc(dev, map, v->spkmono);
  49. i2sctl->micen = devm_regmap_field_alloc(dev, map, v->micen);
  50. i2sctl->micmode = devm_regmap_field_alloc(dev, map, v->micmode);
  51. i2sctl->micmono = devm_regmap_field_alloc(dev, map, v->micmono);
  52. i2sctl->wssrc = devm_regmap_field_alloc(dev, map, v->wssrc);
  53. i2sctl->bitwidth = devm_regmap_field_alloc(dev, map, v->bitwidth);
  54. if (IS_ERR(i2sctl->loopback) || IS_ERR(i2sctl->spken) ||
  55. IS_ERR(i2sctl->spkmode) || IS_ERR(i2sctl->spkmono) ||
  56. IS_ERR(i2sctl->micen) || IS_ERR(i2sctl->micmode) ||
  57. IS_ERR(i2sctl->micmono) || IS_ERR(i2sctl->wssrc) ||
  58. IS_ERR(i2sctl->bitwidth))
  59. return -EINVAL;
  60. return 0;
  61. }
  62. static int lpass_cpu_daiops_set_sysclk(struct snd_soc_dai *dai, int clk_id,
  63. unsigned int freq, int dir)
  64. {
  65. struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
  66. int ret;
  67. ret = clk_set_rate(drvdata->mi2s_osr_clk[dai->driver->id], freq);
  68. if (ret)
  69. dev_err(dai->dev, "error setting mi2s osrclk to %u: %d\n",
  70. freq, ret);
  71. return ret;
  72. }
  73. static int lpass_cpu_daiops_startup(struct snd_pcm_substream *substream,
  74. struct snd_soc_dai *dai)
  75. {
  76. struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
  77. int ret;
  78. ret = clk_prepare_enable(drvdata->mi2s_osr_clk[dai->driver->id]);
  79. if (ret) {
  80. dev_err(dai->dev, "error in enabling mi2s osr clk: %d\n", ret);
  81. return ret;
  82. }
  83. ret = clk_prepare(drvdata->mi2s_bit_clk[dai->driver->id]);
  84. if (ret) {
  85. dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
  86. clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
  87. return ret;
  88. }
  89. return 0;
  90. }
  91. static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
  92. struct snd_soc_dai *dai)
  93. {
  94. struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
  95. struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
  96. unsigned int id = dai->driver->id;
  97. clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
  98. /*
  99. * Ensure LRCLK is disabled even in device node validation.
  100. * Will not impact if disabled in lpass_cpu_daiops_trigger()
  101. * suspend.
  102. */
  103. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
  104. regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
  105. else
  106. regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
  107. /*
  108. * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
  109. * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
  110. * lpass_cpu_daiops_prepare.
  111. */
  112. if (drvdata->mi2s_was_prepared[dai->driver->id]) {
  113. drvdata->mi2s_was_prepared[dai->driver->id] = false;
  114. clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
  115. }
  116. clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
  117. }
  118. static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
  119. struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
  120. {
  121. struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
  122. struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
  123. unsigned int id = dai->driver->id;
  124. snd_pcm_format_t format = params_format(params);
  125. unsigned int channels = params_channels(params);
  126. unsigned int rate = params_rate(params);
  127. unsigned int mode;
  128. unsigned int regval;
  129. int bitwidth, ret;
  130. bitwidth = snd_pcm_format_width(format);
  131. if (bitwidth < 0) {
  132. dev_err(dai->dev, "invalid bit width given: %d\n", bitwidth);
  133. return bitwidth;
  134. }
  135. ret = regmap_fields_write(i2sctl->loopback, id,
  136. LPAIF_I2SCTL_LOOPBACK_DISABLE);
  137. if (ret) {
  138. dev_err(dai->dev, "error updating loopback field: %d\n", ret);
  139. return ret;
  140. }
  141. ret = regmap_fields_write(i2sctl->wssrc, id,
  142. LPAIF_I2SCTL_WSSRC_INTERNAL);
  143. if (ret) {
  144. dev_err(dai->dev, "error updating wssrc field: %d\n", ret);
  145. return ret;
  146. }
  147. switch (bitwidth) {
  148. case 16:
  149. regval = LPAIF_I2SCTL_BITWIDTH_16;
  150. break;
  151. case 24:
  152. regval = LPAIF_I2SCTL_BITWIDTH_24;
  153. break;
  154. case 32:
  155. regval = LPAIF_I2SCTL_BITWIDTH_32;
  156. break;
  157. default:
  158. dev_err(dai->dev, "invalid bitwidth given: %d\n", bitwidth);
  159. return -EINVAL;
  160. }
  161. ret = regmap_fields_write(i2sctl->bitwidth, id, regval);
  162. if (ret) {
  163. dev_err(dai->dev, "error updating bitwidth field: %d\n", ret);
  164. return ret;
  165. }
  166. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
  167. mode = drvdata->mi2s_playback_sd_mode[id];
  168. else
  169. mode = drvdata->mi2s_capture_sd_mode[id];
  170. if (!mode) {
  171. dev_err(dai->dev, "no line is assigned\n");
  172. return -EINVAL;
  173. }
  174. switch (channels) {
  175. case 1:
  176. case 2:
  177. switch (mode) {
  178. case LPAIF_I2SCTL_MODE_QUAD01:
  179. case LPAIF_I2SCTL_MODE_6CH:
  180. case LPAIF_I2SCTL_MODE_8CH:
  181. mode = LPAIF_I2SCTL_MODE_SD0;
  182. break;
  183. case LPAIF_I2SCTL_MODE_QUAD23:
  184. mode = LPAIF_I2SCTL_MODE_SD2;
  185. break;
  186. }
  187. break;
  188. case 4:
  189. if (mode < LPAIF_I2SCTL_MODE_QUAD01) {
  190. dev_err(dai->dev, "cannot configure 4 channels with mode %d\n",
  191. mode);
  192. return -EINVAL;
  193. }
  194. switch (mode) {
  195. case LPAIF_I2SCTL_MODE_6CH:
  196. case LPAIF_I2SCTL_MODE_8CH:
  197. mode = LPAIF_I2SCTL_MODE_QUAD01;
  198. break;
  199. }
  200. break;
  201. case 6:
  202. if (mode < LPAIF_I2SCTL_MODE_6CH) {
  203. dev_err(dai->dev, "cannot configure 6 channels with mode %d\n",
  204. mode);
  205. return -EINVAL;
  206. }
  207. switch (mode) {
  208. case LPAIF_I2SCTL_MODE_8CH:
  209. mode = LPAIF_I2SCTL_MODE_6CH;
  210. break;
  211. }
  212. break;
  213. case 8:
  214. if (mode < LPAIF_I2SCTL_MODE_8CH) {
  215. dev_err(dai->dev, "cannot configure 8 channels with mode %d\n",
  216. mode);
  217. return -EINVAL;
  218. }
  219. break;
  220. default:
  221. dev_err(dai->dev, "invalid channels given: %u\n", channels);
  222. return -EINVAL;
  223. }
  224. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  225. ret = regmap_fields_write(i2sctl->spkmode, id,
  226. LPAIF_I2SCTL_SPKMODE(mode));
  227. if (ret) {
  228. dev_err(dai->dev, "error writing to i2sctl spkr mode: %d\n",
  229. ret);
  230. return ret;
  231. }
  232. if (channels >= 2)
  233. ret = regmap_fields_write(i2sctl->spkmono, id,
  234. LPAIF_I2SCTL_SPKMONO_STEREO);
  235. else
  236. ret = regmap_fields_write(i2sctl->spkmono, id,
  237. LPAIF_I2SCTL_SPKMONO_MONO);
  238. } else {
  239. ret = regmap_fields_write(i2sctl->micmode, id,
  240. LPAIF_I2SCTL_MICMODE(mode));
  241. if (ret) {
  242. dev_err(dai->dev, "error writing to i2sctl mic mode: %d\n",
  243. ret);
  244. return ret;
  245. }
  246. if (channels >= 2)
  247. ret = regmap_fields_write(i2sctl->micmono, id,
  248. LPAIF_I2SCTL_MICMONO_STEREO);
  249. else
  250. ret = regmap_fields_write(i2sctl->micmono, id,
  251. LPAIF_I2SCTL_MICMONO_MONO);
  252. }
  253. if (ret) {
  254. dev_err(dai->dev, "error writing to i2sctl channels mode: %d\n",
  255. ret);
  256. return ret;
  257. }
  258. ret = clk_set_rate(drvdata->mi2s_bit_clk[id],
  259. rate * bitwidth * 2);
  260. if (ret) {
  261. dev_err(dai->dev, "error setting mi2s bitclk to %u: %d\n",
  262. rate * bitwidth * 2, ret);
  263. return ret;
  264. }
  265. return 0;
  266. }
  267. static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
  268. int cmd, struct snd_soc_dai *dai)
  269. {
  270. struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
  271. struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
  272. unsigned int id = dai->driver->id;
  273. int ret = -EINVAL;
  274. switch (cmd) {
  275. case SNDRV_PCM_TRIGGER_START:
  276. case SNDRV_PCM_TRIGGER_RESUME:
  277. case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
  278. /*
  279. * Ensure lpass BCLK/LRCLK is enabled during
  280. * device resume as lpass_cpu_daiops_prepare() is not called
  281. * after the device resumes. We don't check mi2s_was_prepared before
  282. * enable/disable BCLK in trigger events because:
  283. * 1. These trigger events are paired, so the BCLK
  284. * enable_count is balanced.
  285. * 2. the BCLK can be shared (ex: headset and headset mic),
  286. * we need to increase the enable_count so that we don't
  287. * turn off the shared BCLK while other devices are using
  288. * it.
  289. */
  290. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  291. ret = regmap_fields_write(i2sctl->spken, id,
  292. LPAIF_I2SCTL_SPKEN_ENABLE);
  293. } else {
  294. ret = regmap_fields_write(i2sctl->micen, id,
  295. LPAIF_I2SCTL_MICEN_ENABLE);
  296. }
  297. if (ret)
  298. dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
  299. ret);
  300. ret = clk_enable(drvdata->mi2s_bit_clk[id]);
  301. if (ret) {
  302. dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
  303. clk_disable(drvdata->mi2s_osr_clk[id]);
  304. return ret;
  305. }
  306. break;
  307. case SNDRV_PCM_TRIGGER_STOP:
  308. case SNDRV_PCM_TRIGGER_SUSPEND:
  309. case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
  310. /*
  311. * To ensure lpass BCLK/LRCLK is disabled during
  312. * device suspend.
  313. */
  314. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  315. ret = regmap_fields_write(i2sctl->spken, id,
  316. LPAIF_I2SCTL_SPKEN_DISABLE);
  317. } else {
  318. ret = regmap_fields_write(i2sctl->micen, id,
  319. LPAIF_I2SCTL_MICEN_DISABLE);
  320. }
  321. if (ret)
  322. dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
  323. ret);
  324. clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
  325. break;
  326. }
  327. return ret;
  328. }
  329. static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
  330. struct snd_soc_dai *dai)
  331. {
  332. struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
  333. struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
  334. unsigned int id = dai->driver->id;
  335. int ret;
  336. /*
  337. * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
  338. * data flow starts. This allows other codec to have some delay before
  339. * the data flow.
  340. * (ex: to drop start up pop noise before capture starts).
  341. */
  342. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
  343. ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
  344. else
  345. ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
  346. if (ret) {
  347. dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
  348. return ret;
  349. }
  350. /*
  351. * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
  352. * be called multiple times. It's paired with the clk_disable in
  353. * lpass_cpu_daiops_shutdown.
  354. */
  355. if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
  356. ret = clk_enable(drvdata->mi2s_bit_clk[id]);
  357. if (ret) {
  358. dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
  359. return ret;
  360. }
  361. drvdata->mi2s_was_prepared[dai->driver->id] = true;
  362. }
  363. return 0;
  364. }
  365. static int lpass_cpu_daiops_pcm_new(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai)
  366. {
  367. int ret;
  368. struct snd_soc_dai_driver *drv = dai->driver;
  369. struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
  370. if (drvdata->mi2s_playback_sd_mode[dai->id] == LPAIF_I2SCTL_MODE_QUAD01) {
  371. ret = snd_pcm_add_chmap_ctls(rtd->pcm, SNDRV_PCM_STREAM_PLAYBACK,
  372. lpass_quad_chmaps, drv->playback.channels_max, 0,
  373. NULL);
  374. if (ret < 0)
  375. return ret;
  376. }
  377. return 0;
  378. }
  379. static int lpass_cpu_daiops_probe(struct snd_soc_dai *dai)
  380. {
  381. struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
  382. int ret;
  383. /* ensure audio hardware is disabled */
  384. ret = regmap_write(drvdata->lpaif_map,
  385. LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id), 0);
  386. if (ret)
  387. dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
  388. return ret;
  389. }
  390. const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
  391. .probe = lpass_cpu_daiops_probe,
  392. .set_sysclk = lpass_cpu_daiops_set_sysclk,
  393. .startup = lpass_cpu_daiops_startup,
  394. .shutdown = lpass_cpu_daiops_shutdown,
  395. .hw_params = lpass_cpu_daiops_hw_params,
  396. .trigger = lpass_cpu_daiops_trigger,
  397. .prepare = lpass_cpu_daiops_prepare,
  398. };
  399. EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
  400. const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops2 = {
  401. .pcm_new = lpass_cpu_daiops_pcm_new,
  402. .probe = lpass_cpu_daiops_probe,
  403. .set_sysclk = lpass_cpu_daiops_set_sysclk,
  404. .startup = lpass_cpu_daiops_startup,
  405. .shutdown = lpass_cpu_daiops_shutdown,
  406. .hw_params = lpass_cpu_daiops_hw_params,
  407. .trigger = lpass_cpu_daiops_trigger,
  408. .prepare = lpass_cpu_daiops_prepare,
  409. };
  410. EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops2);
  411. static int asoc_qcom_of_xlate_dai_name(struct snd_soc_component *component,
  412. const struct of_phandle_args *args,
  413. const char **dai_name)
  414. {
  415. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  416. const struct lpass_variant *variant = drvdata->variant;
  417. int id = args->args[0];
  418. int ret = -EINVAL;
  419. int i;
  420. for (i = 0; i < variant->num_dai; i++) {
  421. if (variant->dai_driver[i].id == id) {
  422. *dai_name = variant->dai_driver[i].name;
  423. ret = 0;
  424. break;
  425. }
  426. }
  427. return ret;
  428. }
  429. static const struct snd_soc_component_driver lpass_cpu_comp_driver = {
  430. .name = "lpass-cpu",
  431. .of_xlate_dai_name = asoc_qcom_of_xlate_dai_name,
  432. .legacy_dai_naming = 1,
  433. };
  434. static bool lpass_cpu_regmap_writeable(struct device *dev, unsigned int reg)
  435. {
  436. struct lpass_data *drvdata = dev_get_drvdata(dev);
  437. const struct lpass_variant *v = drvdata->variant;
  438. int i;
  439. for (i = 0; i < v->i2s_ports; ++i)
  440. if (reg == LPAIF_I2SCTL_REG(v, i))
  441. return true;
  442. for (i = 0; i < v->irq_ports; ++i) {
  443. if (reg == LPAIF_IRQEN_REG(v, i))
  444. return true;
  445. if (reg == LPAIF_IRQCLEAR_REG(v, i))
  446. return true;
  447. }
  448. for (i = 0; i < v->rdma_channels; ++i) {
  449. if (reg == LPAIF_RDMACTL_REG(v, i))
  450. return true;
  451. if (reg == LPAIF_RDMABASE_REG(v, i))
  452. return true;
  453. if (reg == LPAIF_RDMABUFF_REG(v, i))
  454. return true;
  455. if (reg == LPAIF_RDMAPER_REG(v, i))
  456. return true;
  457. }
  458. for (i = 0; i < v->wrdma_channels; ++i) {
  459. if (reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
  460. return true;
  461. if (reg == LPAIF_WRDMABASE_REG(v, i + v->wrdma_channel_start))
  462. return true;
  463. if (reg == LPAIF_WRDMABUFF_REG(v, i + v->wrdma_channel_start))
  464. return true;
  465. if (reg == LPAIF_WRDMAPER_REG(v, i + v->wrdma_channel_start))
  466. return true;
  467. }
  468. return false;
  469. }
  470. static bool lpass_cpu_regmap_readable(struct device *dev, unsigned int reg)
  471. {
  472. struct lpass_data *drvdata = dev_get_drvdata(dev);
  473. const struct lpass_variant *v = drvdata->variant;
  474. int i;
  475. for (i = 0; i < v->i2s_ports; ++i)
  476. if (reg == LPAIF_I2SCTL_REG(v, i))
  477. return true;
  478. for (i = 0; i < v->irq_ports; ++i) {
  479. if (reg == LPAIF_IRQCLEAR_REG(v, i))
  480. return true;
  481. if (reg == LPAIF_IRQEN_REG(v, i))
  482. return true;
  483. if (reg == LPAIF_IRQSTAT_REG(v, i))
  484. return true;
  485. }
  486. for (i = 0; i < v->rdma_channels; ++i) {
  487. if (reg == LPAIF_RDMACTL_REG(v, i))
  488. return true;
  489. if (reg == LPAIF_RDMABASE_REG(v, i))
  490. return true;
  491. if (reg == LPAIF_RDMABUFF_REG(v, i))
  492. return true;
  493. if (reg == LPAIF_RDMACURR_REG(v, i))
  494. return true;
  495. if (reg == LPAIF_RDMAPER_REG(v, i))
  496. return true;
  497. }
  498. for (i = 0; i < v->wrdma_channels; ++i) {
  499. if (reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
  500. return true;
  501. if (reg == LPAIF_WRDMABASE_REG(v, i + v->wrdma_channel_start))
  502. return true;
  503. if (reg == LPAIF_WRDMABUFF_REG(v, i + v->wrdma_channel_start))
  504. return true;
  505. if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
  506. return true;
  507. if (reg == LPAIF_WRDMAPER_REG(v, i + v->wrdma_channel_start))
  508. return true;
  509. }
  510. return false;
  511. }
  512. static bool lpass_cpu_regmap_volatile(struct device *dev, unsigned int reg)
  513. {
  514. struct lpass_data *drvdata = dev_get_drvdata(dev);
  515. const struct lpass_variant *v = drvdata->variant;
  516. int i;
  517. for (i = 0; i < v->irq_ports; ++i) {
  518. if (reg == LPAIF_IRQCLEAR_REG(v, i))
  519. return true;
  520. if (reg == LPAIF_IRQSTAT_REG(v, i))
  521. return true;
  522. }
  523. for (i = 0; i < v->rdma_channels; ++i)
  524. if (reg == LPAIF_RDMACURR_REG(v, i))
  525. return true;
  526. for (i = 0; i < v->wrdma_channels; ++i)
  527. if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
  528. return true;
  529. return false;
  530. }
  531. static struct regmap_config lpass_cpu_regmap_config = {
  532. .name = "lpass_cpu",
  533. .reg_bits = 32,
  534. .reg_stride = 4,
  535. .val_bits = 32,
  536. .writeable_reg = lpass_cpu_regmap_writeable,
  537. .readable_reg = lpass_cpu_regmap_readable,
  538. .volatile_reg = lpass_cpu_regmap_volatile,
  539. .cache_type = REGCACHE_FLAT,
  540. };
  541. static int lpass_hdmi_init_bitfields(struct device *dev, struct regmap *map)
  542. {
  543. struct lpass_data *drvdata = dev_get_drvdata(dev);
  544. const struct lpass_variant *v = drvdata->variant;
  545. unsigned int i;
  546. struct lpass_hdmi_tx_ctl *tx_ctl;
  547. struct regmap_field *legacy_en;
  548. struct lpass_vbit_ctrl *vbit_ctl;
  549. struct regmap_field *tx_parity;
  550. struct lpass_dp_metadata_ctl *meta_ctl;
  551. struct lpass_sstream_ctl *sstream_ctl;
  552. struct regmap_field *ch_msb;
  553. struct regmap_field *ch_lsb;
  554. struct lpass_hdmitx_dmactl *tx_dmactl;
  555. int rval;
  556. tx_ctl = devm_kzalloc(dev, sizeof(*tx_ctl), GFP_KERNEL);
  557. if (!tx_ctl)
  558. return -ENOMEM;
  559. QCOM_REGMAP_FIELD_ALLOC(dev, map, v->soft_reset, tx_ctl->soft_reset);
  560. QCOM_REGMAP_FIELD_ALLOC(dev, map, v->force_reset, tx_ctl->force_reset);
  561. drvdata->tx_ctl = tx_ctl;
  562. QCOM_REGMAP_FIELD_ALLOC(dev, map, v->legacy_en, legacy_en);
  563. drvdata->hdmitx_legacy_en = legacy_en;
  564. vbit_ctl = devm_kzalloc(dev, sizeof(*vbit_ctl), GFP_KERNEL);
  565. if (!vbit_ctl)
  566. return -ENOMEM;
  567. QCOM_REGMAP_FIELD_ALLOC(dev, map, v->replace_vbit, vbit_ctl->replace_vbit);
  568. QCOM_REGMAP_FIELD_ALLOC(dev, map, v->vbit_stream, vbit_ctl->vbit_stream);
  569. drvdata->vbit_ctl = vbit_ctl;
  570. QCOM_REGMAP_FIELD_ALLOC(dev, map, v->calc_en, tx_parity);
  571. drvdata->hdmitx_parity_calc_en = tx_parity;
  572. meta_ctl = devm_kzalloc(dev, sizeof(*meta_ctl), GFP_KERNEL);
  573. if (!meta_ctl)
  574. return -ENOMEM;
  575. rval = devm_regmap_field_bulk_alloc(dev, map, &meta_ctl->mute, &v->mute, 7);
  576. if (rval)
  577. return rval;
  578. drvdata->meta_ctl = meta_ctl;
  579. sstream_ctl = devm_kzalloc(dev, sizeof(*sstream_ctl), GFP_KERNEL);
  580. if (!sstream_ctl)
  581. return -ENOMEM;
  582. rval = devm_regmap_field_bulk_alloc(dev, map, &sstream_ctl->sstream_en, &v->sstream_en, 9);
  583. if (rval)
  584. return rval;
  585. drvdata->sstream_ctl = sstream_ctl;
  586. for (i = 0; i < LPASS_MAX_HDMI_DMA_CHANNELS; i++) {
  587. QCOM_REGMAP_FIELD_ALLOC(dev, map, v->msb_bits, ch_msb);
  588. drvdata->hdmitx_ch_msb[i] = ch_msb;
  589. QCOM_REGMAP_FIELD_ALLOC(dev, map, v->lsb_bits, ch_lsb);
  590. drvdata->hdmitx_ch_lsb[i] = ch_lsb;
  591. tx_dmactl = devm_kzalloc(dev, sizeof(*tx_dmactl), GFP_KERNEL);
  592. if (!tx_dmactl)
  593. return -ENOMEM;
  594. QCOM_REGMAP_FIELD_ALLOC(dev, map, v->use_hw_chs, tx_dmactl->use_hw_chs);
  595. QCOM_REGMAP_FIELD_ALLOC(dev, map, v->use_hw_usr, tx_dmactl->use_hw_usr);
  596. QCOM_REGMAP_FIELD_ALLOC(dev, map, v->hw_chs_sel, tx_dmactl->hw_chs_sel);
  597. QCOM_REGMAP_FIELD_ALLOC(dev, map, v->hw_usr_sel, tx_dmactl->hw_usr_sel);
  598. drvdata->hdmi_tx_dmactl[i] = tx_dmactl;
  599. }
  600. return 0;
  601. }
  602. static bool lpass_hdmi_regmap_writeable(struct device *dev, unsigned int reg)
  603. {
  604. struct lpass_data *drvdata = dev_get_drvdata(dev);
  605. const struct lpass_variant *v = drvdata->variant;
  606. int i;
  607. if (reg == LPASS_HDMI_TX_CTL_ADDR(v))
  608. return true;
  609. if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
  610. return true;
  611. if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
  612. return true;
  613. if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
  614. return true;
  615. if (reg == LPASS_HDMI_TX_DP_ADDR(v))
  616. return true;
  617. if (reg == LPASS_HDMI_TX_SSTREAM_ADDR(v))
  618. return true;
  619. if (reg == LPASS_HDMITX_APP_IRQEN_REG(v))
  620. return true;
  621. if (reg == LPASS_HDMITX_APP_IRQCLEAR_REG(v))
  622. return true;
  623. for (i = 0; i < v->hdmi_rdma_channels; i++) {
  624. if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
  625. return true;
  626. if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
  627. return true;
  628. if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
  629. return true;
  630. }
  631. for (i = 0; i < v->hdmi_rdma_channels; ++i) {
  632. if (reg == LPAIF_HDMI_RDMACTL_REG(v, i))
  633. return true;
  634. if (reg == LPAIF_HDMI_RDMABASE_REG(v, i))
  635. return true;
  636. if (reg == LPAIF_HDMI_RDMABUFF_REG(v, i))
  637. return true;
  638. if (reg == LPAIF_HDMI_RDMAPER_REG(v, i))
  639. return true;
  640. }
  641. return false;
  642. }
  643. static bool lpass_hdmi_regmap_readable(struct device *dev, unsigned int reg)
  644. {
  645. struct lpass_data *drvdata = dev_get_drvdata(dev);
  646. const struct lpass_variant *v = drvdata->variant;
  647. int i;
  648. if (reg == LPASS_HDMI_TX_CTL_ADDR(v))
  649. return true;
  650. if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
  651. return true;
  652. if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
  653. return true;
  654. for (i = 0; i < v->hdmi_rdma_channels; i++) {
  655. if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
  656. return true;
  657. if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
  658. return true;
  659. if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
  660. return true;
  661. }
  662. if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
  663. return true;
  664. if (reg == LPASS_HDMI_TX_DP_ADDR(v))
  665. return true;
  666. if (reg == LPASS_HDMI_TX_SSTREAM_ADDR(v))
  667. return true;
  668. if (reg == LPASS_HDMITX_APP_IRQEN_REG(v))
  669. return true;
  670. if (reg == LPASS_HDMITX_APP_IRQSTAT_REG(v))
  671. return true;
  672. for (i = 0; i < v->hdmi_rdma_channels; ++i) {
  673. if (reg == LPAIF_HDMI_RDMACTL_REG(v, i))
  674. return true;
  675. if (reg == LPAIF_HDMI_RDMABASE_REG(v, i))
  676. return true;
  677. if (reg == LPAIF_HDMI_RDMABUFF_REG(v, i))
  678. return true;
  679. if (reg == LPAIF_HDMI_RDMAPER_REG(v, i))
  680. return true;
  681. if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
  682. return true;
  683. }
  684. return false;
  685. }
  686. static bool lpass_hdmi_regmap_volatile(struct device *dev, unsigned int reg)
  687. {
  688. struct lpass_data *drvdata = dev_get_drvdata(dev);
  689. const struct lpass_variant *v = drvdata->variant;
  690. int i;
  691. if (reg == LPASS_HDMITX_APP_IRQSTAT_REG(v))
  692. return true;
  693. if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
  694. return true;
  695. if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
  696. return true;
  697. if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
  698. return true;
  699. for (i = 0; i < v->hdmi_rdma_channels; ++i) {
  700. if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
  701. return true;
  702. if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
  703. return true;
  704. if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
  705. return true;
  706. if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
  707. return true;
  708. }
  709. return false;
  710. }
  711. static struct regmap_config lpass_hdmi_regmap_config = {
  712. .name = "lpass_hdmi",
  713. .reg_bits = 32,
  714. .reg_stride = 4,
  715. .val_bits = 32,
  716. .writeable_reg = lpass_hdmi_regmap_writeable,
  717. .readable_reg = lpass_hdmi_regmap_readable,
  718. .volatile_reg = lpass_hdmi_regmap_volatile,
  719. .cache_type = REGCACHE_FLAT,
  720. };
  721. static bool __lpass_rxtx_regmap_accessible(struct device *dev, unsigned int reg, bool rw)
  722. {
  723. struct lpass_data *drvdata = dev_get_drvdata(dev);
  724. const struct lpass_variant *v = drvdata->variant;
  725. int i;
  726. for (i = 0; i < v->rxtx_irq_ports; ++i) {
  727. if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i))
  728. return true;
  729. if (reg == LPAIF_RXTX_IRQEN_REG(v, i))
  730. return true;
  731. if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i))
  732. return true;
  733. }
  734. for (i = 0; i < v->rxtx_rdma_channels; ++i) {
  735. if (reg == LPAIF_CDC_RXTX_RDMACTL_REG(v, i, LPASS_CDC_DMA_RX0))
  736. return true;
  737. if (reg == LPAIF_CDC_RXTX_RDMABASE_REG(v, i, LPASS_CDC_DMA_RX0))
  738. return true;
  739. if (reg == LPAIF_CDC_RXTX_RDMABUFF_REG(v, i, LPASS_CDC_DMA_RX0))
  740. return true;
  741. if (rw == LPASS_REG_READ) {
  742. if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
  743. return true;
  744. }
  745. if (reg == LPAIF_CDC_RXTX_RDMAPER_REG(v, i, LPASS_CDC_DMA_RX0))
  746. return true;
  747. if (reg == LPAIF_CDC_RXTX_RDMA_INTF_REG(v, i, LPASS_CDC_DMA_RX0))
  748. return true;
  749. }
  750. for (i = 0; i < v->rxtx_wrdma_channels; ++i) {
  751. if (reg == LPAIF_CDC_RXTX_WRDMACTL_REG(v, i + v->rxtx_wrdma_channel_start,
  752. LPASS_CDC_DMA_TX3))
  753. return true;
  754. if (reg == LPAIF_CDC_RXTX_WRDMABASE_REG(v, i + v->rxtx_wrdma_channel_start,
  755. LPASS_CDC_DMA_TX3))
  756. return true;
  757. if (reg == LPAIF_CDC_RXTX_WRDMABUFF_REG(v, i + v->rxtx_wrdma_channel_start,
  758. LPASS_CDC_DMA_TX3))
  759. return true;
  760. if (rw == LPASS_REG_READ) {
  761. if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
  762. return true;
  763. }
  764. if (reg == LPAIF_CDC_RXTX_WRDMAPER_REG(v, i + v->rxtx_wrdma_channel_start,
  765. LPASS_CDC_DMA_TX3))
  766. return true;
  767. if (reg == LPAIF_CDC_RXTX_WRDMA_INTF_REG(v, i + v->rxtx_wrdma_channel_start,
  768. LPASS_CDC_DMA_TX3))
  769. return true;
  770. }
  771. return false;
  772. }
  773. static bool lpass_rxtx_regmap_writeable(struct device *dev, unsigned int reg)
  774. {
  775. return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_WRITE);
  776. }
  777. static bool lpass_rxtx_regmap_readable(struct device *dev, unsigned int reg)
  778. {
  779. return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_READ);
  780. }
  781. static bool lpass_rxtx_regmap_volatile(struct device *dev, unsigned int reg)
  782. {
  783. struct lpass_data *drvdata = dev_get_drvdata(dev);
  784. const struct lpass_variant *v = drvdata->variant;
  785. int i;
  786. for (i = 0; i < v->rxtx_irq_ports; ++i) {
  787. if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i))
  788. return true;
  789. if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i))
  790. return true;
  791. }
  792. for (i = 0; i < v->rxtx_rdma_channels; ++i)
  793. if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
  794. return true;
  795. for (i = 0; i < v->rxtx_wrdma_channels; ++i)
  796. if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i + v->rxtx_wrdma_channel_start,
  797. LPASS_CDC_DMA_TX3))
  798. return true;
  799. return false;
  800. }
  801. static bool __lpass_va_regmap_accessible(struct device *dev, unsigned int reg, bool rw)
  802. {
  803. struct lpass_data *drvdata = dev_get_drvdata(dev);
  804. const struct lpass_variant *v = drvdata->variant;
  805. int i;
  806. for (i = 0; i < v->va_irq_ports; ++i) {
  807. if (reg == LPAIF_VA_IRQCLEAR_REG(v, i))
  808. return true;
  809. if (reg == LPAIF_VA_IRQEN_REG(v, i))
  810. return true;
  811. if (reg == LPAIF_VA_IRQSTAT_REG(v, i))
  812. return true;
  813. }
  814. for (i = 0; i < v->va_wrdma_channels; ++i) {
  815. if (reg == LPAIF_CDC_VA_WRDMACTL_REG(v, i + v->va_wrdma_channel_start,
  816. LPASS_CDC_DMA_VA_TX0))
  817. return true;
  818. if (reg == LPAIF_CDC_VA_WRDMABASE_REG(v, i + v->va_wrdma_channel_start,
  819. LPASS_CDC_DMA_VA_TX0))
  820. return true;
  821. if (reg == LPAIF_CDC_VA_WRDMABUFF_REG(v, i + v->va_wrdma_channel_start,
  822. LPASS_CDC_DMA_VA_TX0))
  823. return true;
  824. if (rw == LPASS_REG_READ) {
  825. if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start,
  826. LPASS_CDC_DMA_VA_TX0))
  827. return true;
  828. }
  829. if (reg == LPAIF_CDC_VA_WRDMAPER_REG(v, i + v->va_wrdma_channel_start,
  830. LPASS_CDC_DMA_VA_TX0))
  831. return true;
  832. if (reg == LPAIF_CDC_VA_WRDMA_INTF_REG(v, i + v->va_wrdma_channel_start,
  833. LPASS_CDC_DMA_VA_TX0))
  834. return true;
  835. }
  836. return false;
  837. }
  838. static bool lpass_va_regmap_writeable(struct device *dev, unsigned int reg)
  839. {
  840. return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_WRITE);
  841. }
  842. static bool lpass_va_regmap_readable(struct device *dev, unsigned int reg)
  843. {
  844. return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_READ);
  845. }
  846. static bool lpass_va_regmap_volatile(struct device *dev, unsigned int reg)
  847. {
  848. struct lpass_data *drvdata = dev_get_drvdata(dev);
  849. const struct lpass_variant *v = drvdata->variant;
  850. int i;
  851. for (i = 0; i < v->va_irq_ports; ++i) {
  852. if (reg == LPAIF_VA_IRQCLEAR_REG(v, i))
  853. return true;
  854. if (reg == LPAIF_VA_IRQSTAT_REG(v, i))
  855. return true;
  856. }
  857. for (i = 0; i < v->va_wrdma_channels; ++i) {
  858. if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start,
  859. LPASS_CDC_DMA_VA_TX0))
  860. return true;
  861. }
  862. return false;
  863. }
  864. static struct regmap_config lpass_rxtx_regmap_config = {
  865. .reg_bits = 32,
  866. .reg_stride = 4,
  867. .val_bits = 32,
  868. .writeable_reg = lpass_rxtx_regmap_writeable,
  869. .readable_reg = lpass_rxtx_regmap_readable,
  870. .volatile_reg = lpass_rxtx_regmap_volatile,
  871. .cache_type = REGCACHE_FLAT,
  872. };
  873. static struct regmap_config lpass_va_regmap_config = {
  874. .reg_bits = 32,
  875. .reg_stride = 4,
  876. .val_bits = 32,
  877. .writeable_reg = lpass_va_regmap_writeable,
  878. .readable_reg = lpass_va_regmap_readable,
  879. .volatile_reg = lpass_va_regmap_volatile,
  880. .cache_type = REGCACHE_FLAT,
  881. };
  882. static unsigned int of_lpass_cpu_parse_sd_lines(struct device *dev,
  883. struct device_node *node,
  884. const char *name)
  885. {
  886. unsigned int lines[LPASS_CPU_MAX_MI2S_LINES];
  887. unsigned int sd_line_mask = 0;
  888. int num_lines, i;
  889. num_lines = of_property_read_variable_u32_array(node, name, lines, 0,
  890. LPASS_CPU_MAX_MI2S_LINES);
  891. if (num_lines < 0)
  892. return LPAIF_I2SCTL_MODE_NONE;
  893. for (i = 0; i < num_lines; i++)
  894. sd_line_mask |= BIT(lines[i]);
  895. switch (sd_line_mask) {
  896. case LPASS_CPU_I2S_SD0_MASK:
  897. return LPAIF_I2SCTL_MODE_SD0;
  898. case LPASS_CPU_I2S_SD1_MASK:
  899. return LPAIF_I2SCTL_MODE_SD1;
  900. case LPASS_CPU_I2S_SD2_MASK:
  901. return LPAIF_I2SCTL_MODE_SD2;
  902. case LPASS_CPU_I2S_SD3_MASK:
  903. return LPAIF_I2SCTL_MODE_SD3;
  904. case LPASS_CPU_I2S_SD0_1_MASK:
  905. return LPAIF_I2SCTL_MODE_QUAD01;
  906. case LPASS_CPU_I2S_SD2_3_MASK:
  907. return LPAIF_I2SCTL_MODE_QUAD23;
  908. case LPASS_CPU_I2S_SD0_1_2_MASK:
  909. return LPAIF_I2SCTL_MODE_6CH;
  910. case LPASS_CPU_I2S_SD0_1_2_3_MASK:
  911. return LPAIF_I2SCTL_MODE_8CH;
  912. default:
  913. dev_err(dev, "Unsupported SD line mask: %#x\n", sd_line_mask);
  914. return LPAIF_I2SCTL_MODE_NONE;
  915. }
  916. }
  917. static void of_lpass_cpu_parse_dai_data(struct device *dev,
  918. struct lpass_data *data)
  919. {
  920. struct device_node *node;
  921. int ret, i, id;
  922. /* Allow all channels by default for backwards compatibility */
  923. for (i = 0; i < data->variant->num_dai; i++) {
  924. id = data->variant->dai_driver[i].id;
  925. data->mi2s_playback_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
  926. data->mi2s_capture_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
  927. }
  928. for_each_child_of_node(dev->of_node, node) {
  929. ret = of_property_read_u32(node, "reg", &id);
  930. if (ret || id < 0) {
  931. dev_err(dev, "valid dai id not found: %d\n", ret);
  932. continue;
  933. }
  934. if (id == LPASS_DP_RX) {
  935. data->hdmi_port_enable = 1;
  936. } else if (is_cdc_dma_port(id)) {
  937. data->codec_dma_enable = 1;
  938. } else {
  939. data->mi2s_playback_sd_mode[id] =
  940. of_lpass_cpu_parse_sd_lines(dev, node,
  941. "qcom,playback-sd-lines");
  942. data->mi2s_capture_sd_mode[id] =
  943. of_lpass_cpu_parse_sd_lines(dev, node,
  944. "qcom,capture-sd-lines");
  945. }
  946. }
  947. }
  948. static int of_lpass_cdc_dma_clks_parse(struct device *dev,
  949. struct lpass_data *data)
  950. {
  951. data->codec_mem0 = devm_clk_get(dev, "audio_cc_codec_mem0");
  952. if (IS_ERR(data->codec_mem0))
  953. return PTR_ERR(data->codec_mem0);
  954. data->codec_mem1 = devm_clk_get(dev, "audio_cc_codec_mem1");
  955. if (IS_ERR(data->codec_mem1))
  956. return PTR_ERR(data->codec_mem1);
  957. data->codec_mem2 = devm_clk_get(dev, "audio_cc_codec_mem2");
  958. if (IS_ERR(data->codec_mem2))
  959. return PTR_ERR(data->codec_mem2);
  960. data->va_mem0 = devm_clk_get(dev, "aon_cc_va_mem0");
  961. if (IS_ERR(data->va_mem0))
  962. return PTR_ERR(data->va_mem0);
  963. return 0;
  964. }
  965. int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
  966. {
  967. struct lpass_data *drvdata;
  968. struct device_node *dsp_of_node;
  969. struct resource *res;
  970. const struct lpass_variant *variant;
  971. struct device *dev = &pdev->dev;
  972. int ret, i, dai_id;
  973. dsp_of_node = of_parse_phandle(pdev->dev.of_node, "qcom,adsp", 0);
  974. if (dsp_of_node) {
  975. dev_err(dev, "DSP exists and holds audio resources\n");
  976. of_node_put(dsp_of_node);
  977. return -EBUSY;
  978. }
  979. drvdata = devm_kzalloc(dev, sizeof(struct lpass_data), GFP_KERNEL);
  980. if (!drvdata)
  981. return -ENOMEM;
  982. platform_set_drvdata(pdev, drvdata);
  983. variant = device_get_match_data(dev);
  984. if (!variant)
  985. return -EINVAL;
  986. if (of_device_is_compatible(dev->of_node, "qcom,lpass-cpu-apq8016"))
  987. dev_warn(dev, "qcom,lpass-cpu-apq8016 compatible is deprecated\n");
  988. drvdata->variant = variant;
  989. of_lpass_cpu_parse_dai_data(dev, drvdata);
  990. if (drvdata->codec_dma_enable) {
  991. drvdata->rxtx_lpaif =
  992. devm_platform_ioremap_resource_byname(pdev, "lpass-rxtx-lpaif");
  993. if (IS_ERR(drvdata->rxtx_lpaif))
  994. return PTR_ERR(drvdata->rxtx_lpaif);
  995. drvdata->va_lpaif = devm_platform_ioremap_resource_byname(pdev, "lpass-va-lpaif");
  996. if (IS_ERR(drvdata->va_lpaif))
  997. return PTR_ERR(drvdata->va_lpaif);
  998. lpass_rxtx_regmap_config.max_register = LPAIF_CDC_RXTX_WRDMAPER_REG(variant,
  999. variant->rxtx_wrdma_channels +
  1000. variant->rxtx_wrdma_channel_start, LPASS_CDC_DMA_TX3);
  1001. drvdata->rxtx_lpaif_map = devm_regmap_init_mmio(dev, drvdata->rxtx_lpaif,
  1002. &lpass_rxtx_regmap_config);
  1003. if (IS_ERR(drvdata->rxtx_lpaif_map))
  1004. return PTR_ERR(drvdata->rxtx_lpaif_map);
  1005. lpass_va_regmap_config.max_register = LPAIF_CDC_VA_WRDMAPER_REG(variant,
  1006. variant->va_wrdma_channels +
  1007. variant->va_wrdma_channel_start, LPASS_CDC_DMA_VA_TX0);
  1008. drvdata->va_lpaif_map = devm_regmap_init_mmio(dev, drvdata->va_lpaif,
  1009. &lpass_va_regmap_config);
  1010. if (IS_ERR(drvdata->va_lpaif_map))
  1011. return PTR_ERR(drvdata->va_lpaif_map);
  1012. ret = of_lpass_cdc_dma_clks_parse(dev, drvdata);
  1013. if (ret) {
  1014. dev_err(dev, "failed to get cdc dma clocks %d\n", ret);
  1015. return ret;
  1016. }
  1017. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-rxtx-cdc-dma-lpm");
  1018. if (!res)
  1019. return -EINVAL;
  1020. drvdata->rxtx_cdc_dma_lpm_buf = res->start;
  1021. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-va-cdc-dma-lpm");
  1022. if (!res)
  1023. return -EINVAL;
  1024. drvdata->va_cdc_dma_lpm_buf = res->start;
  1025. }
  1026. drvdata->lpaif = devm_platform_ioremap_resource_byname(pdev, "lpass-lpaif");
  1027. if (IS_ERR(drvdata->lpaif))
  1028. return PTR_ERR(drvdata->lpaif);
  1029. lpass_cpu_regmap_config.max_register = LPAIF_WRDMAPER_REG(variant,
  1030. variant->wrdma_channels +
  1031. variant->wrdma_channel_start);
  1032. drvdata->lpaif_map = devm_regmap_init_mmio(dev, drvdata->lpaif,
  1033. &lpass_cpu_regmap_config);
  1034. if (IS_ERR(drvdata->lpaif_map)) {
  1035. dev_err(dev, "error initializing regmap: %ld\n",
  1036. PTR_ERR(drvdata->lpaif_map));
  1037. return PTR_ERR(drvdata->lpaif_map);
  1038. }
  1039. if (drvdata->hdmi_port_enable) {
  1040. drvdata->hdmiif = devm_platform_ioremap_resource_byname(pdev, "lpass-hdmiif");
  1041. if (IS_ERR(drvdata->hdmiif))
  1042. return PTR_ERR(drvdata->hdmiif);
  1043. lpass_hdmi_regmap_config.max_register = LPAIF_HDMI_RDMAPER_REG(variant,
  1044. variant->hdmi_rdma_channels - 1);
  1045. drvdata->hdmiif_map = devm_regmap_init_mmio(dev, drvdata->hdmiif,
  1046. &lpass_hdmi_regmap_config);
  1047. if (IS_ERR(drvdata->hdmiif_map)) {
  1048. dev_err(dev, "error initializing regmap: %ld\n",
  1049. PTR_ERR(drvdata->hdmiif_map));
  1050. return PTR_ERR(drvdata->hdmiif_map);
  1051. }
  1052. }
  1053. if (variant->init) {
  1054. ret = variant->init(pdev);
  1055. if (ret) {
  1056. dev_err(dev, "error initializing variant: %d\n", ret);
  1057. return ret;
  1058. }
  1059. }
  1060. for (i = 0; i < variant->num_dai; i++) {
  1061. dai_id = variant->dai_driver[i].id;
  1062. if (dai_id == LPASS_DP_RX || is_cdc_dma_port(dai_id))
  1063. continue;
  1064. drvdata->mi2s_osr_clk[dai_id] = devm_clk_get_optional(dev,
  1065. variant->dai_osr_clk_names[i]);
  1066. drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(dev,
  1067. variant->dai_bit_clk_names[i]);
  1068. if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) {
  1069. dev_err(dev,
  1070. "error getting %s: %ld\n",
  1071. variant->dai_bit_clk_names[i],
  1072. PTR_ERR(drvdata->mi2s_bit_clk[dai_id]));
  1073. return PTR_ERR(drvdata->mi2s_bit_clk[dai_id]);
  1074. }
  1075. if (drvdata->mi2s_playback_sd_mode[dai_id] ==
  1076. LPAIF_I2SCTL_MODE_QUAD01) {
  1077. variant->dai_driver[dai_id].playback.channels_min = 4;
  1078. variant->dai_driver[dai_id].playback.channels_max = 4;
  1079. }
  1080. }
  1081. /* Allocation for i2sctl regmap fields */
  1082. drvdata->i2sctl = devm_kzalloc(&pdev->dev, sizeof(struct lpaif_i2sctl),
  1083. GFP_KERNEL);
  1084. if (!drvdata->i2sctl)
  1085. return -ENOMEM;
  1086. /* Initialize bitfields for dai I2SCTL register */
  1087. ret = lpass_cpu_init_i2sctl_bitfields(dev, drvdata->i2sctl,
  1088. drvdata->lpaif_map);
  1089. if (ret) {
  1090. dev_err(dev, "error init i2sctl field: %d\n", ret);
  1091. return ret;
  1092. }
  1093. if (drvdata->hdmi_port_enable) {
  1094. ret = lpass_hdmi_init_bitfields(dev, drvdata->hdmiif_map);
  1095. if (ret) {
  1096. dev_err(dev, "%s error hdmi init failed\n", __func__);
  1097. return ret;
  1098. }
  1099. }
  1100. ret = devm_snd_soc_register_component(dev,
  1101. &lpass_cpu_comp_driver,
  1102. variant->dai_driver,
  1103. variant->num_dai);
  1104. if (ret) {
  1105. dev_err(dev, "error registering cpu driver: %d\n", ret);
  1106. goto err;
  1107. }
  1108. ret = asoc_qcom_lpass_platform_register(pdev);
  1109. if (ret) {
  1110. dev_err(dev, "error registering platform driver: %d\n", ret);
  1111. goto err;
  1112. }
  1113. err:
  1114. return ret;
  1115. }
  1116. EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_probe);
  1117. void asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev)
  1118. {
  1119. struct lpass_data *drvdata = platform_get_drvdata(pdev);
  1120. if (drvdata->variant->exit)
  1121. drvdata->variant->exit(pdev);
  1122. }
  1123. EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_remove);
  1124. void asoc_qcom_lpass_cpu_platform_shutdown(struct platform_device *pdev)
  1125. {
  1126. struct lpass_data *drvdata = platform_get_drvdata(pdev);
  1127. if (drvdata->variant->exit)
  1128. drvdata->variant->exit(pdev);
  1129. }
  1130. EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_shutdown);
  1131. MODULE_DESCRIPTION("QTi LPASS CPU Driver");
  1132. MODULE_LICENSE("GPL");