sst-firmware.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282
  1. /*
  2. * Intel SST Firmware Loader
  3. *
  4. * Copyright (C) 2013, Intel Corporation. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/slab.h>
  18. #include <linux/sched.h>
  19. #include <linux/firmware.h>
  20. #include <linux/export.h>
  21. #include <linux/module.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/dmaengine.h>
  25. #include <linux/pci.h>
  26. #include <linux/acpi.h>
  27. /* supported DMA engine drivers */
  28. #include <linux/dma/dw.h>
  29. #include <asm/page.h>
  30. #include <asm/pgtable.h>
  31. #include "sst-dsp.h"
  32. #include "sst-dsp-priv.h"
  33. #define SST_DMA_RESOURCES 2
  34. #define SST_DSP_DMA_MAX_BURST 0x3
  35. #define SST_HSW_BLOCK_ANY 0xffffffff
  36. #define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
  37. struct sst_dma {
  38. struct sst_dsp *sst;
  39. struct dw_dma_chip *chip;
  40. struct dma_async_tx_descriptor *desc;
  41. struct dma_chan *ch;
  42. };
  43. static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
  44. {
  45. u32 tmp = 0;
  46. int i, m, n;
  47. const u8 *src_byte = src;
  48. m = bytes / 4;
  49. n = bytes % 4;
  50. /* __iowrite32_copy use 32bit size values so divide by 4 */
  51. __iowrite32_copy((void *)dest, src, m);
  52. if (n) {
  53. for (i = 0; i < n; i++)
  54. tmp |= (u32)*(src_byte + m * 4 + i) << (i * 8);
  55. __iowrite32_copy((void *)(dest + m * 4), &tmp, 1);
  56. }
  57. }
  58. static void sst_dma_transfer_complete(void *arg)
  59. {
  60. struct sst_dsp *sst = (struct sst_dsp *)arg;
  61. dev_dbg(sst->dev, "DMA: callback\n");
  62. }
  63. static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
  64. dma_addr_t src_addr, size_t size)
  65. {
  66. struct dma_async_tx_descriptor *desc;
  67. struct sst_dma *dma = sst->dma;
  68. if (dma->ch == NULL) {
  69. dev_err(sst->dev, "error: no DMA channel\n");
  70. return -ENODEV;
  71. }
  72. dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
  73. (unsigned long)src_addr, (unsigned long)dest_addr, size);
  74. desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
  75. src_addr, size, DMA_CTRL_ACK);
  76. if (!desc){
  77. dev_err(sst->dev, "error: dma prep memcpy failed\n");
  78. return -EINVAL;
  79. }
  80. desc->callback = sst_dma_transfer_complete;
  81. desc->callback_param = sst;
  82. desc->tx_submit(desc);
  83. dma_wait_for_async_tx(desc);
  84. return 0;
  85. }
  86. /* copy to DSP */
  87. int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
  88. dma_addr_t src_addr, size_t size)
  89. {
  90. return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
  91. src_addr, size);
  92. }
  93. EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
  94. /* copy from DSP */
  95. int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
  96. dma_addr_t src_addr, size_t size)
  97. {
  98. return sst_dsp_dma_copy(sst, dest_addr,
  99. src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
  100. }
  101. EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
  102. /* remove module from memory - callers hold locks */
  103. static void block_list_remove(struct sst_dsp *dsp,
  104. struct list_head *block_list)
  105. {
  106. struct sst_mem_block *block, *tmp;
  107. int err;
  108. /* disable each block */
  109. list_for_each_entry(block, block_list, module_list) {
  110. if (block->ops && block->ops->disable) {
  111. err = block->ops->disable(block);
  112. if (err < 0)
  113. dev_err(dsp->dev,
  114. "error: cant disable block %d:%d\n",
  115. block->type, block->index);
  116. }
  117. }
  118. /* mark each block as free */
  119. list_for_each_entry_safe(block, tmp, block_list, module_list) {
  120. list_del(&block->module_list);
  121. list_move(&block->list, &dsp->free_block_list);
  122. dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
  123. block->type, block->index, block->offset);
  124. }
  125. }
  126. /* prepare the memory block to receive data from host - callers hold locks */
  127. static int block_list_prepare(struct sst_dsp *dsp,
  128. struct list_head *block_list)
  129. {
  130. struct sst_mem_block *block;
  131. int ret = 0;
  132. /* enable each block so that's it'e ready for data */
  133. list_for_each_entry(block, block_list, module_list) {
  134. if (block->ops && block->ops->enable && !block->users) {
  135. ret = block->ops->enable(block);
  136. if (ret < 0) {
  137. dev_err(dsp->dev,
  138. "error: cant disable block %d:%d\n",
  139. block->type, block->index);
  140. goto err;
  141. }
  142. }
  143. }
  144. return ret;
  145. err:
  146. list_for_each_entry(block, block_list, module_list) {
  147. if (block->ops && block->ops->disable)
  148. block->ops->disable(block);
  149. }
  150. return ret;
  151. }
  152. static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
  153. int irq)
  154. {
  155. struct dw_dma_chip *chip;
  156. int err;
  157. chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
  158. if (!chip)
  159. return ERR_PTR(-ENOMEM);
  160. chip->irq = irq;
  161. chip->regs = devm_ioremap_resource(dev, mem);
  162. if (IS_ERR(chip->regs))
  163. return ERR_CAST(chip->regs);
  164. err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
  165. if (err)
  166. return ERR_PTR(err);
  167. chip->dev = dev;
  168. err = dw_dma_probe(chip);
  169. if (err)
  170. return ERR_PTR(err);
  171. return chip;
  172. }
  173. static void dw_remove(struct dw_dma_chip *chip)
  174. {
  175. dw_dma_remove(chip);
  176. }
  177. static bool dma_chan_filter(struct dma_chan *chan, void *param)
  178. {
  179. struct sst_dsp *dsp = (struct sst_dsp *)param;
  180. return chan->device->dev == dsp->dma_dev;
  181. }
  182. int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
  183. {
  184. struct sst_dma *dma = dsp->dma;
  185. struct dma_slave_config slave;
  186. dma_cap_mask_t mask;
  187. int ret;
  188. dma_cap_zero(mask);
  189. dma_cap_set(DMA_SLAVE, mask);
  190. dma_cap_set(DMA_MEMCPY, mask);
  191. dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
  192. if (dma->ch == NULL) {
  193. dev_err(dsp->dev, "error: DMA request channel failed\n");
  194. return -EIO;
  195. }
  196. memset(&slave, 0, sizeof(slave));
  197. slave.direction = DMA_MEM_TO_DEV;
  198. slave.src_addr_width =
  199. slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  200. slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
  201. ret = dmaengine_slave_config(dma->ch, &slave);
  202. if (ret) {
  203. dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
  204. ret);
  205. dma_release_channel(dma->ch);
  206. dma->ch = NULL;
  207. }
  208. return ret;
  209. }
  210. EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
  211. void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
  212. {
  213. struct sst_dma *dma = dsp->dma;
  214. if (!dma->ch)
  215. return;
  216. dma_release_channel(dma->ch);
  217. dma->ch = NULL;
  218. }
  219. EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
  220. static int sst_dma_new(struct sst_dsp *sst)
  221. {
  222. struct sst_pdata *sst_pdata = sst->pdata;
  223. struct sst_dma *dma;
  224. struct resource mem;
  225. int ret = 0;
  226. if (sst->pdata->resindex_dma_base == -1)
  227. /* DMA is not used, return and squelsh error messages */
  228. return 0;
  229. /* configure the correct platform data for whatever DMA engine
  230. * is attached to the ADSP IP. */
  231. switch (sst->pdata->dma_engine) {
  232. case SST_DMA_TYPE_DW:
  233. break;
  234. default:
  235. dev_err(sst->dev, "error: invalid DMA engine %d\n",
  236. sst->pdata->dma_engine);
  237. return -EINVAL;
  238. }
  239. dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
  240. if (!dma)
  241. return -ENOMEM;
  242. dma->sst = sst;
  243. memset(&mem, 0, sizeof(mem));
  244. mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
  245. mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
  246. mem.flags = IORESOURCE_MEM;
  247. /* now register DMA engine device */
  248. dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
  249. if (IS_ERR(dma->chip)) {
  250. dev_err(sst->dev, "error: DMA device register failed\n");
  251. ret = PTR_ERR(dma->chip);
  252. goto err_dma_dev;
  253. }
  254. sst->dma = dma;
  255. sst->fw_use_dma = true;
  256. return 0;
  257. err_dma_dev:
  258. devm_kfree(sst->dev, dma);
  259. return ret;
  260. }
  261. static void sst_dma_free(struct sst_dma *dma)
  262. {
  263. if (dma == NULL)
  264. return;
  265. if (dma->ch)
  266. dma_release_channel(dma->ch);
  267. if (dma->chip)
  268. dw_remove(dma->chip);
  269. }
  270. /* create new generic firmware object */
  271. struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
  272. const struct firmware *fw, void *private)
  273. {
  274. struct sst_fw *sst_fw;
  275. int err;
  276. if (!dsp->ops->parse_fw)
  277. return NULL;
  278. sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
  279. if (sst_fw == NULL)
  280. return NULL;
  281. sst_fw->dsp = dsp;
  282. sst_fw->private = private;
  283. sst_fw->size = fw->size;
  284. /* allocate DMA buffer to store FW data */
  285. sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
  286. &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
  287. if (!sst_fw->dma_buf) {
  288. dev_err(dsp->dev, "error: DMA alloc failed\n");
  289. kfree(sst_fw);
  290. return NULL;
  291. }
  292. /* copy FW data to DMA-able memory */
  293. memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
  294. if (dsp->fw_use_dma) {
  295. err = sst_dsp_dma_get_channel(dsp, 0);
  296. if (err < 0)
  297. goto chan_err;
  298. }
  299. /* call core specific FW paser to load FW data into DSP */
  300. err = dsp->ops->parse_fw(sst_fw);
  301. if (err < 0) {
  302. dev_err(dsp->dev, "error: parse fw failed %d\n", err);
  303. goto parse_err;
  304. }
  305. if (dsp->fw_use_dma)
  306. sst_dsp_dma_put_channel(dsp);
  307. mutex_lock(&dsp->mutex);
  308. list_add(&sst_fw->list, &dsp->fw_list);
  309. mutex_unlock(&dsp->mutex);
  310. return sst_fw;
  311. parse_err:
  312. if (dsp->fw_use_dma)
  313. sst_dsp_dma_put_channel(dsp);
  314. chan_err:
  315. dma_free_coherent(dsp->dma_dev, sst_fw->size,
  316. sst_fw->dma_buf,
  317. sst_fw->dmable_fw_paddr);
  318. sst_fw->dma_buf = NULL;
  319. kfree(sst_fw);
  320. return NULL;
  321. }
  322. EXPORT_SYMBOL_GPL(sst_fw_new);
  323. int sst_fw_reload(struct sst_fw *sst_fw)
  324. {
  325. struct sst_dsp *dsp = sst_fw->dsp;
  326. int ret;
  327. dev_dbg(dsp->dev, "reloading firmware\n");
  328. /* call core specific FW paser to load FW data into DSP */
  329. ret = dsp->ops->parse_fw(sst_fw);
  330. if (ret < 0)
  331. dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
  332. return ret;
  333. }
  334. EXPORT_SYMBOL_GPL(sst_fw_reload);
  335. void sst_fw_unload(struct sst_fw *sst_fw)
  336. {
  337. struct sst_dsp *dsp = sst_fw->dsp;
  338. struct sst_module *module, *mtmp;
  339. struct sst_module_runtime *runtime, *rtmp;
  340. dev_dbg(dsp->dev, "unloading firmware\n");
  341. mutex_lock(&dsp->mutex);
  342. /* check module by module */
  343. list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
  344. if (module->sst_fw == sst_fw) {
  345. /* remove runtime modules */
  346. list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
  347. block_list_remove(dsp, &runtime->block_list);
  348. list_del(&runtime->list);
  349. kfree(runtime);
  350. }
  351. /* now remove the module */
  352. block_list_remove(dsp, &module->block_list);
  353. list_del(&module->list);
  354. kfree(module);
  355. }
  356. }
  357. /* remove all scratch blocks */
  358. block_list_remove(dsp, &dsp->scratch_block_list);
  359. mutex_unlock(&dsp->mutex);
  360. }
  361. EXPORT_SYMBOL_GPL(sst_fw_unload);
  362. /* free single firmware object */
  363. void sst_fw_free(struct sst_fw *sst_fw)
  364. {
  365. struct sst_dsp *dsp = sst_fw->dsp;
  366. mutex_lock(&dsp->mutex);
  367. list_del(&sst_fw->list);
  368. mutex_unlock(&dsp->mutex);
  369. if (sst_fw->dma_buf)
  370. dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
  371. sst_fw->dmable_fw_paddr);
  372. kfree(sst_fw);
  373. }
  374. EXPORT_SYMBOL_GPL(sst_fw_free);
  375. /* free all firmware objects */
  376. void sst_fw_free_all(struct sst_dsp *dsp)
  377. {
  378. struct sst_fw *sst_fw, *t;
  379. mutex_lock(&dsp->mutex);
  380. list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
  381. list_del(&sst_fw->list);
  382. dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
  383. sst_fw->dmable_fw_paddr);
  384. kfree(sst_fw);
  385. }
  386. mutex_unlock(&dsp->mutex);
  387. }
  388. EXPORT_SYMBOL_GPL(sst_fw_free_all);
  389. /* create a new SST generic module from FW template */
  390. struct sst_module *sst_module_new(struct sst_fw *sst_fw,
  391. struct sst_module_template *template, void *private)
  392. {
  393. struct sst_dsp *dsp = sst_fw->dsp;
  394. struct sst_module *sst_module;
  395. sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
  396. if (sst_module == NULL)
  397. return NULL;
  398. sst_module->id = template->id;
  399. sst_module->dsp = dsp;
  400. sst_module->sst_fw = sst_fw;
  401. sst_module->scratch_size = template->scratch_size;
  402. sst_module->persistent_size = template->persistent_size;
  403. sst_module->entry = template->entry;
  404. sst_module->state = SST_MODULE_STATE_UNLOADED;
  405. INIT_LIST_HEAD(&sst_module->block_list);
  406. INIT_LIST_HEAD(&sst_module->runtime_list);
  407. mutex_lock(&dsp->mutex);
  408. list_add(&sst_module->list, &dsp->module_list);
  409. mutex_unlock(&dsp->mutex);
  410. return sst_module;
  411. }
  412. EXPORT_SYMBOL_GPL(sst_module_new);
  413. /* free firmware module and remove from available list */
  414. void sst_module_free(struct sst_module *sst_module)
  415. {
  416. struct sst_dsp *dsp = sst_module->dsp;
  417. mutex_lock(&dsp->mutex);
  418. list_del(&sst_module->list);
  419. mutex_unlock(&dsp->mutex);
  420. kfree(sst_module);
  421. }
  422. EXPORT_SYMBOL_GPL(sst_module_free);
  423. struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
  424. int id, void *private)
  425. {
  426. struct sst_dsp *dsp = module->dsp;
  427. struct sst_module_runtime *runtime;
  428. runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
  429. if (runtime == NULL)
  430. return NULL;
  431. runtime->id = id;
  432. runtime->dsp = dsp;
  433. runtime->module = module;
  434. INIT_LIST_HEAD(&runtime->block_list);
  435. mutex_lock(&dsp->mutex);
  436. list_add(&runtime->list, &module->runtime_list);
  437. mutex_unlock(&dsp->mutex);
  438. return runtime;
  439. }
  440. EXPORT_SYMBOL_GPL(sst_module_runtime_new);
  441. void sst_module_runtime_free(struct sst_module_runtime *runtime)
  442. {
  443. struct sst_dsp *dsp = runtime->dsp;
  444. mutex_lock(&dsp->mutex);
  445. list_del(&runtime->list);
  446. mutex_unlock(&dsp->mutex);
  447. kfree(runtime);
  448. }
  449. EXPORT_SYMBOL_GPL(sst_module_runtime_free);
  450. static struct sst_mem_block *find_block(struct sst_dsp *dsp,
  451. struct sst_block_allocator *ba)
  452. {
  453. struct sst_mem_block *block;
  454. list_for_each_entry(block, &dsp->free_block_list, list) {
  455. if (block->type == ba->type && block->offset == ba->offset)
  456. return block;
  457. }
  458. return NULL;
  459. }
  460. /* Block allocator must be on block boundary */
  461. static int block_alloc_contiguous(struct sst_dsp *dsp,
  462. struct sst_block_allocator *ba, struct list_head *block_list)
  463. {
  464. struct list_head tmp = LIST_HEAD_INIT(tmp);
  465. struct sst_mem_block *block;
  466. u32 block_start = SST_HSW_BLOCK_ANY;
  467. int size = ba->size, offset = ba->offset;
  468. while (ba->size > 0) {
  469. block = find_block(dsp, ba);
  470. if (!block) {
  471. list_splice(&tmp, &dsp->free_block_list);
  472. ba->size = size;
  473. ba->offset = offset;
  474. return -ENOMEM;
  475. }
  476. list_move_tail(&block->list, &tmp);
  477. ba->offset += block->size;
  478. ba->size -= block->size;
  479. }
  480. ba->size = size;
  481. ba->offset = offset;
  482. list_for_each_entry(block, &tmp, list) {
  483. if (block->offset < block_start)
  484. block_start = block->offset;
  485. list_add(&block->module_list, block_list);
  486. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  487. block->type, block->index, block->offset);
  488. }
  489. list_splice(&tmp, &dsp->used_block_list);
  490. return 0;
  491. }
  492. /* allocate first free DSP blocks for data - callers hold locks */
  493. static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  494. struct list_head *block_list)
  495. {
  496. struct sst_mem_block *block, *tmp;
  497. int ret = 0;
  498. if (ba->size == 0)
  499. return 0;
  500. /* find first free whole blocks that can hold module */
  501. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  502. /* ignore blocks with wrong type */
  503. if (block->type != ba->type)
  504. continue;
  505. if (ba->size > block->size)
  506. continue;
  507. ba->offset = block->offset;
  508. block->bytes_used = ba->size % block->size;
  509. list_add(&block->module_list, block_list);
  510. list_move(&block->list, &dsp->used_block_list);
  511. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  512. block->type, block->index, block->offset);
  513. return 0;
  514. }
  515. /* then find free multiple blocks that can hold module */
  516. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  517. /* ignore blocks with wrong type */
  518. if (block->type != ba->type)
  519. continue;
  520. /* do we span > 1 blocks */
  521. if (ba->size > block->size) {
  522. /* align ba to block boundary */
  523. ba->offset = block->offset;
  524. ret = block_alloc_contiguous(dsp, ba, block_list);
  525. if (ret == 0)
  526. return ret;
  527. }
  528. }
  529. /* not enough free block space */
  530. return -ENOMEM;
  531. }
  532. int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  533. struct list_head *block_list)
  534. {
  535. int ret;
  536. dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
  537. ba->size, ba->offset, ba->type);
  538. mutex_lock(&dsp->mutex);
  539. ret = block_alloc(dsp, ba, block_list);
  540. if (ret < 0) {
  541. dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
  542. goto out;
  543. }
  544. /* prepare DSP blocks for module usage */
  545. ret = block_list_prepare(dsp, block_list);
  546. if (ret < 0)
  547. dev_err(dsp->dev, "error: prepare failed\n");
  548. out:
  549. mutex_unlock(&dsp->mutex);
  550. return ret;
  551. }
  552. EXPORT_SYMBOL_GPL(sst_alloc_blocks);
  553. int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
  554. {
  555. mutex_lock(&dsp->mutex);
  556. block_list_remove(dsp, block_list);
  557. mutex_unlock(&dsp->mutex);
  558. return 0;
  559. }
  560. EXPORT_SYMBOL_GPL(sst_free_blocks);
  561. /* allocate memory blocks for static module addresses - callers hold locks */
  562. static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  563. struct list_head *block_list)
  564. {
  565. struct sst_mem_block *block, *tmp;
  566. struct sst_block_allocator ba_tmp = *ba;
  567. u32 end = ba->offset + ba->size, block_end;
  568. int err;
  569. /* only IRAM/DRAM blocks are managed */
  570. if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
  571. return 0;
  572. /* are blocks already attached to this module */
  573. list_for_each_entry_safe(block, tmp, block_list, module_list) {
  574. /* ignore blocks with wrong type */
  575. if (block->type != ba->type)
  576. continue;
  577. block_end = block->offset + block->size;
  578. /* find block that holds section */
  579. if (ba->offset >= block->offset && end <= block_end)
  580. return 0;
  581. /* does block span more than 1 section */
  582. if (ba->offset >= block->offset && ba->offset < block_end) {
  583. /* align ba to block boundary */
  584. ba_tmp.size -= block_end - ba->offset;
  585. ba_tmp.offset = block_end;
  586. err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
  587. if (err < 0)
  588. return -ENOMEM;
  589. /* module already owns blocks */
  590. return 0;
  591. }
  592. }
  593. /* find first free blocks that can hold section in free list */
  594. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  595. block_end = block->offset + block->size;
  596. /* ignore blocks with wrong type */
  597. if (block->type != ba->type)
  598. continue;
  599. /* find block that holds section */
  600. if (ba->offset >= block->offset && end <= block_end) {
  601. /* add block */
  602. list_move(&block->list, &dsp->used_block_list);
  603. list_add(&block->module_list, block_list);
  604. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  605. block->type, block->index, block->offset);
  606. return 0;
  607. }
  608. /* does block span more than 1 section */
  609. if (ba->offset >= block->offset && ba->offset < block_end) {
  610. /* add block */
  611. list_move(&block->list, &dsp->used_block_list);
  612. list_add(&block->module_list, block_list);
  613. /* align ba to block boundary */
  614. ba_tmp.size -= block_end - ba->offset;
  615. ba_tmp.offset = block_end;
  616. err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
  617. if (err < 0)
  618. return -ENOMEM;
  619. return 0;
  620. }
  621. }
  622. return -ENOMEM;
  623. }
  624. /* Load fixed module data into DSP memory blocks */
  625. int sst_module_alloc_blocks(struct sst_module *module)
  626. {
  627. struct sst_dsp *dsp = module->dsp;
  628. struct sst_fw *sst_fw = module->sst_fw;
  629. struct sst_block_allocator ba;
  630. int ret;
  631. memset(&ba, 0, sizeof(ba));
  632. ba.size = module->size;
  633. ba.type = module->type;
  634. ba.offset = module->offset;
  635. dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
  636. ba.size, ba.offset, ba.type);
  637. mutex_lock(&dsp->mutex);
  638. /* alloc blocks that includes this section */
  639. ret = block_alloc_fixed(dsp, &ba, &module->block_list);
  640. if (ret < 0) {
  641. dev_err(dsp->dev,
  642. "error: no free blocks for section at offset 0x%x size 0x%x\n",
  643. module->offset, module->size);
  644. mutex_unlock(&dsp->mutex);
  645. return -ENOMEM;
  646. }
  647. /* prepare DSP blocks for module copy */
  648. ret = block_list_prepare(dsp, &module->block_list);
  649. if (ret < 0) {
  650. dev_err(dsp->dev, "error: fw module prepare failed\n");
  651. goto err;
  652. }
  653. /* copy partial module data to blocks */
  654. if (dsp->fw_use_dma) {
  655. ret = sst_dsp_dma_copyto(dsp,
  656. dsp->addr.lpe_base + module->offset,
  657. sst_fw->dmable_fw_paddr + module->data_offset,
  658. module->size);
  659. if (ret < 0) {
  660. dev_err(dsp->dev, "error: module copy failed\n");
  661. goto err;
  662. }
  663. } else
  664. sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
  665. module->size);
  666. mutex_unlock(&dsp->mutex);
  667. return ret;
  668. err:
  669. block_list_remove(dsp, &module->block_list);
  670. mutex_unlock(&dsp->mutex);
  671. return ret;
  672. }
  673. EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
  674. /* Unload entire module from DSP memory */
  675. int sst_module_free_blocks(struct sst_module *module)
  676. {
  677. struct sst_dsp *dsp = module->dsp;
  678. mutex_lock(&dsp->mutex);
  679. block_list_remove(dsp, &module->block_list);
  680. mutex_unlock(&dsp->mutex);
  681. return 0;
  682. }
  683. EXPORT_SYMBOL_GPL(sst_module_free_blocks);
  684. int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
  685. int offset)
  686. {
  687. struct sst_dsp *dsp = runtime->dsp;
  688. struct sst_module *module = runtime->module;
  689. struct sst_block_allocator ba;
  690. int ret;
  691. if (module->persistent_size == 0)
  692. return 0;
  693. memset(&ba, 0, sizeof(ba));
  694. ba.size = module->persistent_size;
  695. ba.type = SST_MEM_DRAM;
  696. mutex_lock(&dsp->mutex);
  697. /* do we need to allocate at a fixed address ? */
  698. if (offset != 0) {
  699. ba.offset = offset;
  700. dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
  701. ba.size, ba.type, ba.offset);
  702. /* alloc blocks that includes this section */
  703. ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
  704. } else {
  705. dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
  706. ba.size, ba.type);
  707. /* alloc blocks that includes this section */
  708. ret = block_alloc(dsp, &ba, &runtime->block_list);
  709. }
  710. if (ret < 0) {
  711. dev_err(dsp->dev,
  712. "error: no free blocks for runtime module size 0x%x\n",
  713. module->persistent_size);
  714. mutex_unlock(&dsp->mutex);
  715. return -ENOMEM;
  716. }
  717. runtime->persistent_offset = ba.offset;
  718. /* prepare DSP blocks for module copy */
  719. ret = block_list_prepare(dsp, &runtime->block_list);
  720. if (ret < 0) {
  721. dev_err(dsp->dev, "error: runtime block prepare failed\n");
  722. goto err;
  723. }
  724. mutex_unlock(&dsp->mutex);
  725. return ret;
  726. err:
  727. block_list_remove(dsp, &module->block_list);
  728. mutex_unlock(&dsp->mutex);
  729. return ret;
  730. }
  731. EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
  732. int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
  733. {
  734. struct sst_dsp *dsp = runtime->dsp;
  735. mutex_lock(&dsp->mutex);
  736. block_list_remove(dsp, &runtime->block_list);
  737. mutex_unlock(&dsp->mutex);
  738. return 0;
  739. }
  740. EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
  741. int sst_module_runtime_save(struct sst_module_runtime *runtime,
  742. struct sst_module_runtime_context *context)
  743. {
  744. struct sst_dsp *dsp = runtime->dsp;
  745. struct sst_module *module = runtime->module;
  746. int ret = 0;
  747. dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
  748. runtime->id, runtime->persistent_offset,
  749. module->persistent_size);
  750. context->buffer = dma_alloc_coherent(dsp->dma_dev,
  751. module->persistent_size,
  752. &context->dma_buffer, GFP_DMA | GFP_KERNEL);
  753. if (!context->buffer) {
  754. dev_err(dsp->dev, "error: DMA context alloc failed\n");
  755. return -ENOMEM;
  756. }
  757. mutex_lock(&dsp->mutex);
  758. if (dsp->fw_use_dma) {
  759. ret = sst_dsp_dma_get_channel(dsp, 0);
  760. if (ret < 0)
  761. goto err;
  762. ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
  763. dsp->addr.lpe_base + runtime->persistent_offset,
  764. module->persistent_size);
  765. sst_dsp_dma_put_channel(dsp);
  766. if (ret < 0) {
  767. dev_err(dsp->dev, "error: context copy failed\n");
  768. goto err;
  769. }
  770. } else
  771. sst_memcpy32(context->buffer, dsp->addr.lpe +
  772. runtime->persistent_offset,
  773. module->persistent_size);
  774. err:
  775. mutex_unlock(&dsp->mutex);
  776. return ret;
  777. }
  778. EXPORT_SYMBOL_GPL(sst_module_runtime_save);
  779. int sst_module_runtime_restore(struct sst_module_runtime *runtime,
  780. struct sst_module_runtime_context *context)
  781. {
  782. struct sst_dsp *dsp = runtime->dsp;
  783. struct sst_module *module = runtime->module;
  784. int ret = 0;
  785. dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
  786. runtime->id, runtime->persistent_offset,
  787. module->persistent_size);
  788. mutex_lock(&dsp->mutex);
  789. if (!context->buffer) {
  790. dev_info(dsp->dev, "no context buffer need to restore!\n");
  791. goto err;
  792. }
  793. if (dsp->fw_use_dma) {
  794. ret = sst_dsp_dma_get_channel(dsp, 0);
  795. if (ret < 0)
  796. goto err;
  797. ret = sst_dsp_dma_copyto(dsp,
  798. dsp->addr.lpe_base + runtime->persistent_offset,
  799. context->dma_buffer, module->persistent_size);
  800. sst_dsp_dma_put_channel(dsp);
  801. if (ret < 0) {
  802. dev_err(dsp->dev, "error: module copy failed\n");
  803. goto err;
  804. }
  805. } else
  806. sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
  807. context->buffer, module->persistent_size);
  808. dma_free_coherent(dsp->dma_dev, module->persistent_size,
  809. context->buffer, context->dma_buffer);
  810. context->buffer = NULL;
  811. err:
  812. mutex_unlock(&dsp->mutex);
  813. return ret;
  814. }
  815. EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
  816. /* register a DSP memory block for use with FW based modules */
  817. struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
  818. u32 size, enum sst_mem_type type, const struct sst_block_ops *ops,
  819. u32 index, void *private)
  820. {
  821. struct sst_mem_block *block;
  822. block = kzalloc(sizeof(*block), GFP_KERNEL);
  823. if (block == NULL)
  824. return NULL;
  825. block->offset = offset;
  826. block->size = size;
  827. block->index = index;
  828. block->type = type;
  829. block->dsp = dsp;
  830. block->private = private;
  831. block->ops = ops;
  832. mutex_lock(&dsp->mutex);
  833. list_add(&block->list, &dsp->free_block_list);
  834. mutex_unlock(&dsp->mutex);
  835. return block;
  836. }
  837. EXPORT_SYMBOL_GPL(sst_mem_block_register);
  838. /* unregister all DSP memory blocks */
  839. void sst_mem_block_unregister_all(struct sst_dsp *dsp)
  840. {
  841. struct sst_mem_block *block, *tmp;
  842. mutex_lock(&dsp->mutex);
  843. /* unregister used blocks */
  844. list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
  845. list_del(&block->list);
  846. kfree(block);
  847. }
  848. /* unregister free blocks */
  849. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  850. list_del(&block->list);
  851. kfree(block);
  852. }
  853. mutex_unlock(&dsp->mutex);
  854. }
  855. EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
  856. /* allocate scratch buffer blocks */
  857. int sst_block_alloc_scratch(struct sst_dsp *dsp)
  858. {
  859. struct sst_module *module;
  860. struct sst_block_allocator ba;
  861. int ret;
  862. mutex_lock(&dsp->mutex);
  863. /* calculate required scratch size */
  864. dsp->scratch_size = 0;
  865. list_for_each_entry(module, &dsp->module_list, list) {
  866. dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
  867. module->id, module->scratch_size);
  868. if (dsp->scratch_size < module->scratch_size)
  869. dsp->scratch_size = module->scratch_size;
  870. }
  871. dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
  872. dsp->scratch_size);
  873. if (dsp->scratch_size == 0) {
  874. dev_info(dsp->dev, "no modules need scratch buffer\n");
  875. mutex_unlock(&dsp->mutex);
  876. return 0;
  877. }
  878. /* allocate blocks for module scratch buffers */
  879. dev_dbg(dsp->dev, "allocating scratch blocks\n");
  880. ba.size = dsp->scratch_size;
  881. ba.type = SST_MEM_DRAM;
  882. /* do we need to allocate at fixed offset */
  883. if (dsp->scratch_offset != 0) {
  884. dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
  885. ba.size, ba.type, ba.offset);
  886. ba.offset = dsp->scratch_offset;
  887. /* alloc blocks that includes this section */
  888. ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
  889. } else {
  890. dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
  891. ba.size, ba.type);
  892. ba.offset = 0;
  893. ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
  894. }
  895. if (ret < 0) {
  896. dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
  897. mutex_unlock(&dsp->mutex);
  898. return ret;
  899. }
  900. ret = block_list_prepare(dsp, &dsp->scratch_block_list);
  901. if (ret < 0) {
  902. dev_err(dsp->dev, "error: scratch block prepare failed\n");
  903. mutex_unlock(&dsp->mutex);
  904. return ret;
  905. }
  906. /* assign the same offset of scratch to each module */
  907. dsp->scratch_offset = ba.offset;
  908. mutex_unlock(&dsp->mutex);
  909. return dsp->scratch_size;
  910. }
  911. EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
  912. /* free all scratch blocks */
  913. void sst_block_free_scratch(struct sst_dsp *dsp)
  914. {
  915. mutex_lock(&dsp->mutex);
  916. block_list_remove(dsp, &dsp->scratch_block_list);
  917. mutex_unlock(&dsp->mutex);
  918. }
  919. EXPORT_SYMBOL_GPL(sst_block_free_scratch);
  920. /* get a module from it's unique ID */
  921. struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
  922. {
  923. struct sst_module *module;
  924. mutex_lock(&dsp->mutex);
  925. list_for_each_entry(module, &dsp->module_list, list) {
  926. if (module->id == id) {
  927. mutex_unlock(&dsp->mutex);
  928. return module;
  929. }
  930. }
  931. mutex_unlock(&dsp->mutex);
  932. return NULL;
  933. }
  934. EXPORT_SYMBOL_GPL(sst_module_get_from_id);
  935. struct sst_module_runtime *sst_module_runtime_get_from_id(
  936. struct sst_module *module, u32 id)
  937. {
  938. struct sst_module_runtime *runtime;
  939. struct sst_dsp *dsp = module->dsp;
  940. mutex_lock(&dsp->mutex);
  941. list_for_each_entry(runtime, &module->runtime_list, list) {
  942. if (runtime->id == id) {
  943. mutex_unlock(&dsp->mutex);
  944. return runtime;
  945. }
  946. }
  947. mutex_unlock(&dsp->mutex);
  948. return NULL;
  949. }
  950. EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
  951. /* returns block address in DSP address space */
  952. u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
  953. enum sst_mem_type type)
  954. {
  955. switch (type) {
  956. case SST_MEM_IRAM:
  957. return offset - dsp->addr.iram_offset +
  958. dsp->addr.dsp_iram_offset;
  959. case SST_MEM_DRAM:
  960. return offset - dsp->addr.dram_offset +
  961. dsp->addr.dsp_dram_offset;
  962. default:
  963. return 0;
  964. }
  965. }
  966. EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
  967. struct sst_dsp *sst_dsp_new(struct device *dev,
  968. struct sst_dsp_device *sst_dev, struct sst_pdata *pdata)
  969. {
  970. struct sst_dsp *sst;
  971. int err;
  972. dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id);
  973. sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
  974. if (sst == NULL)
  975. return NULL;
  976. spin_lock_init(&sst->spinlock);
  977. mutex_init(&sst->mutex);
  978. sst->dev = dev;
  979. sst->dma_dev = pdata->dma_dev;
  980. sst->thread_context = sst_dev->thread_context;
  981. sst->sst_dev = sst_dev;
  982. sst->id = pdata->id;
  983. sst->irq = pdata->irq;
  984. sst->ops = sst_dev->ops;
  985. sst->pdata = pdata;
  986. INIT_LIST_HEAD(&sst->used_block_list);
  987. INIT_LIST_HEAD(&sst->free_block_list);
  988. INIT_LIST_HEAD(&sst->module_list);
  989. INIT_LIST_HEAD(&sst->fw_list);
  990. INIT_LIST_HEAD(&sst->scratch_block_list);
  991. /* Initialise SST Audio DSP */
  992. if (sst->ops->init) {
  993. err = sst->ops->init(sst, pdata);
  994. if (err < 0)
  995. return NULL;
  996. }
  997. /* Register the ISR */
  998. err = request_threaded_irq(sst->irq, sst->ops->irq_handler,
  999. sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
  1000. if (err)
  1001. goto irq_err;
  1002. err = sst_dma_new(sst);
  1003. if (err) {
  1004. dev_err(dev, "sst_dma_new failed %d\n", err);
  1005. goto dma_err;
  1006. }
  1007. return sst;
  1008. dma_err:
  1009. free_irq(sst->irq, sst);
  1010. irq_err:
  1011. if (sst->ops->free)
  1012. sst->ops->free(sst);
  1013. return NULL;
  1014. }
  1015. EXPORT_SYMBOL_GPL(sst_dsp_new);
  1016. void sst_dsp_free(struct sst_dsp *sst)
  1017. {
  1018. free_irq(sst->irq, sst);
  1019. if (sst->ops->free)
  1020. sst->ops->free(sst);
  1021. sst_dma_free(sst->dma);
  1022. }
  1023. EXPORT_SYMBOL_GPL(sst_dsp_free);
  1024. MODULE_DESCRIPTION("Intel SST Firmware Loader");
  1025. MODULE_LICENSE("GPL v2");