skl.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049
  1. /*
  2. * skl.c - Implementation of ASoC Intel SKL HD Audio driver
  3. *
  4. * Copyright (C) 2014-2015 Intel Corp
  5. * Author: Jeeja KP <jeeja.kp@intel.com>
  6. *
  7. * Derived mostly from Intel HDA driver with following copyrights:
  8. * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
  9. * PeiSen Hou <pshou@realtek.com.tw>
  10. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; version 2 of the License.
  15. *
  16. * This program is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  22. */
  23. #include <linux/module.h>
  24. #include <linux/pci.h>
  25. #include <linux/pm_runtime.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/firmware.h>
  28. #include <linux/delay.h>
  29. #include <sound/pcm.h>
  30. #include <sound/soc-acpi.h>
  31. #include <sound/soc-acpi-intel-match.h>
  32. #include <sound/hda_register.h>
  33. #include <sound/hdaudio.h>
  34. #include <sound/hda_i915.h>
  35. #include "skl.h"
  36. #include "skl-sst-dsp.h"
  37. #include "skl-sst-ipc.h"
  38. /*
  39. * initialize the PCI registers
  40. */
  41. static void skl_update_pci_byte(struct pci_dev *pci, unsigned int reg,
  42. unsigned char mask, unsigned char val)
  43. {
  44. unsigned char data;
  45. pci_read_config_byte(pci, reg, &data);
  46. data &= ~mask;
  47. data |= (val & mask);
  48. pci_write_config_byte(pci, reg, data);
  49. }
  50. static void skl_init_pci(struct skl *skl)
  51. {
  52. struct hdac_bus *bus = skl_to_bus(skl);
  53. /*
  54. * Clear bits 0-2 of PCI register TCSEL (at offset 0x44)
  55. * TCSEL == Traffic Class Select Register, which sets PCI express QOS
  56. * Ensuring these bits are 0 clears playback static on some HD Audio
  57. * codecs.
  58. * The PCI register TCSEL is defined in the Intel manuals.
  59. */
  60. dev_dbg(bus->dev, "Clearing TCSEL\n");
  61. skl_update_pci_byte(skl->pci, AZX_PCIREG_TCSEL, 0x07, 0);
  62. }
  63. static void update_pci_dword(struct pci_dev *pci,
  64. unsigned int reg, u32 mask, u32 val)
  65. {
  66. u32 data = 0;
  67. pci_read_config_dword(pci, reg, &data);
  68. data &= ~mask;
  69. data |= (val & mask);
  70. pci_write_config_dword(pci, reg, data);
  71. }
  72. /*
  73. * skl_enable_miscbdcge - enable/dsiable CGCTL.MISCBDCGE bits
  74. *
  75. * @dev: device pointer
  76. * @enable: enable/disable flag
  77. */
  78. static void skl_enable_miscbdcge(struct device *dev, bool enable)
  79. {
  80. struct pci_dev *pci = to_pci_dev(dev);
  81. u32 val;
  82. val = enable ? AZX_CGCTL_MISCBDCGE_MASK : 0;
  83. update_pci_dword(pci, AZX_PCIREG_CGCTL, AZX_CGCTL_MISCBDCGE_MASK, val);
  84. }
  85. /**
  86. * skl_clock_power_gating: Enable/Disable clock and power gating
  87. *
  88. * @dev: Device pointer
  89. * @enable: Enable/Disable flag
  90. */
  91. static void skl_clock_power_gating(struct device *dev, bool enable)
  92. {
  93. struct pci_dev *pci = to_pci_dev(dev);
  94. struct hdac_bus *bus = pci_get_drvdata(pci);
  95. u32 val;
  96. /* Update PDCGE bit of CGCTL register */
  97. val = enable ? AZX_CGCTL_ADSPDCGE : 0;
  98. update_pci_dword(pci, AZX_PCIREG_CGCTL, AZX_CGCTL_ADSPDCGE, val);
  99. /* Update L1SEN bit of EM2 register */
  100. val = enable ? AZX_REG_VS_EM2_L1SEN : 0;
  101. snd_hdac_chip_updatel(bus, VS_EM2, AZX_REG_VS_EM2_L1SEN, val);
  102. /* Update ADSPPGD bit of PGCTL register */
  103. val = enable ? 0 : AZX_PGCTL_ADSPPGD;
  104. update_pci_dword(pci, AZX_PCIREG_PGCTL, AZX_PGCTL_ADSPPGD, val);
  105. }
  106. /*
  107. * While performing reset, controller may not come back properly causing
  108. * issues, so recommendation is to set CGCTL.MISCBDCGE to 0 then do reset
  109. * (init chip) and then again set CGCTL.MISCBDCGE to 1
  110. */
  111. static int skl_init_chip(struct hdac_bus *bus, bool full_reset)
  112. {
  113. struct hdac_ext_link *hlink;
  114. int ret;
  115. skl_enable_miscbdcge(bus->dev, false);
  116. ret = snd_hdac_bus_init_chip(bus, full_reset);
  117. /* Reset stream-to-link mapping */
  118. list_for_each_entry(hlink, &bus->hlink_list, list)
  119. bus->io_ops->reg_writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
  120. skl_enable_miscbdcge(bus->dev, true);
  121. return ret;
  122. }
  123. void skl_update_d0i3c(struct device *dev, bool enable)
  124. {
  125. struct pci_dev *pci = to_pci_dev(dev);
  126. struct hdac_bus *bus = pci_get_drvdata(pci);
  127. u8 reg;
  128. int timeout = 50;
  129. reg = snd_hdac_chip_readb(bus, VS_D0I3C);
  130. /* Do not write to D0I3C until command in progress bit is cleared */
  131. while ((reg & AZX_REG_VS_D0I3C_CIP) && --timeout) {
  132. udelay(10);
  133. reg = snd_hdac_chip_readb(bus, VS_D0I3C);
  134. }
  135. /* Highly unlikely. But if it happens, flag error explicitly */
  136. if (!timeout) {
  137. dev_err(bus->dev, "Before D0I3C update: D0I3C CIP timeout\n");
  138. return;
  139. }
  140. if (enable)
  141. reg = reg | AZX_REG_VS_D0I3C_I3;
  142. else
  143. reg = reg & (~AZX_REG_VS_D0I3C_I3);
  144. snd_hdac_chip_writeb(bus, VS_D0I3C, reg);
  145. timeout = 50;
  146. /* Wait for cmd in progress to be cleared before exiting the function */
  147. reg = snd_hdac_chip_readb(bus, VS_D0I3C);
  148. while ((reg & AZX_REG_VS_D0I3C_CIP) && --timeout) {
  149. udelay(10);
  150. reg = snd_hdac_chip_readb(bus, VS_D0I3C);
  151. }
  152. /* Highly unlikely. But if it happens, flag error explicitly */
  153. if (!timeout) {
  154. dev_err(bus->dev, "After D0I3C update: D0I3C CIP timeout\n");
  155. return;
  156. }
  157. dev_dbg(bus->dev, "D0I3C register = 0x%x\n",
  158. snd_hdac_chip_readb(bus, VS_D0I3C));
  159. }
  160. /* called from IRQ */
  161. static void skl_stream_update(struct hdac_bus *bus, struct hdac_stream *hstr)
  162. {
  163. snd_pcm_period_elapsed(hstr->substream);
  164. }
  165. static irqreturn_t skl_interrupt(int irq, void *dev_id)
  166. {
  167. struct hdac_bus *bus = dev_id;
  168. u32 status;
  169. if (!pm_runtime_active(bus->dev))
  170. return IRQ_NONE;
  171. spin_lock(&bus->reg_lock);
  172. status = snd_hdac_chip_readl(bus, INTSTS);
  173. if (status == 0 || status == 0xffffffff) {
  174. spin_unlock(&bus->reg_lock);
  175. return IRQ_NONE;
  176. }
  177. /* clear rirb int */
  178. status = snd_hdac_chip_readb(bus, RIRBSTS);
  179. if (status & RIRB_INT_MASK) {
  180. if (status & RIRB_INT_RESPONSE)
  181. snd_hdac_bus_update_rirb(bus);
  182. snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
  183. }
  184. spin_unlock(&bus->reg_lock);
  185. return snd_hdac_chip_readl(bus, INTSTS) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
  186. }
  187. static irqreturn_t skl_threaded_handler(int irq, void *dev_id)
  188. {
  189. struct hdac_bus *bus = dev_id;
  190. u32 status;
  191. status = snd_hdac_chip_readl(bus, INTSTS);
  192. snd_hdac_bus_handle_stream_irq(bus, status, skl_stream_update);
  193. return IRQ_HANDLED;
  194. }
  195. static int skl_acquire_irq(struct hdac_bus *bus, int do_disconnect)
  196. {
  197. struct skl *skl = bus_to_skl(bus);
  198. int ret;
  199. ret = request_threaded_irq(skl->pci->irq, skl_interrupt,
  200. skl_threaded_handler,
  201. IRQF_SHARED,
  202. KBUILD_MODNAME, bus);
  203. if (ret) {
  204. dev_err(bus->dev,
  205. "unable to grab IRQ %d, disabling device\n",
  206. skl->pci->irq);
  207. return ret;
  208. }
  209. bus->irq = skl->pci->irq;
  210. pci_intx(skl->pci, 1);
  211. return 0;
  212. }
  213. static int skl_suspend_late(struct device *dev)
  214. {
  215. struct pci_dev *pci = to_pci_dev(dev);
  216. struct hdac_bus *bus = pci_get_drvdata(pci);
  217. struct skl *skl = bus_to_skl(bus);
  218. return skl_suspend_late_dsp(skl);
  219. }
  220. #ifdef CONFIG_PM
  221. static int _skl_suspend(struct hdac_bus *bus)
  222. {
  223. struct skl *skl = bus_to_skl(bus);
  224. struct pci_dev *pci = to_pci_dev(bus->dev);
  225. int ret;
  226. snd_hdac_ext_bus_link_power_down_all(bus);
  227. ret = skl_suspend_dsp(skl);
  228. if (ret < 0)
  229. return ret;
  230. snd_hdac_bus_stop_chip(bus);
  231. update_pci_dword(pci, AZX_PCIREG_PGCTL,
  232. AZX_PGCTL_LSRMD_MASK, AZX_PGCTL_LSRMD_MASK);
  233. skl_enable_miscbdcge(bus->dev, false);
  234. snd_hdac_bus_enter_link_reset(bus);
  235. skl_enable_miscbdcge(bus->dev, true);
  236. skl_cleanup_resources(skl);
  237. return 0;
  238. }
  239. static int _skl_resume(struct hdac_bus *bus)
  240. {
  241. struct skl *skl = bus_to_skl(bus);
  242. skl_init_pci(skl);
  243. skl_init_chip(bus, true);
  244. return skl_resume_dsp(skl);
  245. }
  246. #endif
  247. #ifdef CONFIG_PM_SLEEP
  248. /*
  249. * power management
  250. */
  251. static int skl_suspend(struct device *dev)
  252. {
  253. struct pci_dev *pci = to_pci_dev(dev);
  254. struct hdac_bus *bus = pci_get_drvdata(pci);
  255. struct skl *skl = bus_to_skl(bus);
  256. int ret = 0;
  257. /*
  258. * Do not suspend if streams which are marked ignore suspend are
  259. * running, we need to save the state for these and continue
  260. */
  261. if (skl->supend_active) {
  262. /* turn off the links and stop the CORB/RIRB DMA if it is On */
  263. snd_hdac_ext_bus_link_power_down_all(bus);
  264. if (bus->cmd_dma_state)
  265. snd_hdac_bus_stop_cmd_io(bus);
  266. enable_irq_wake(bus->irq);
  267. pci_save_state(pci);
  268. } else {
  269. ret = _skl_suspend(bus);
  270. if (ret < 0)
  271. return ret;
  272. skl->skl_sst->fw_loaded = false;
  273. }
  274. if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
  275. ret = snd_hdac_display_power(bus, false);
  276. if (ret < 0)
  277. dev_err(bus->dev,
  278. "Cannot turn OFF display power on i915\n");
  279. }
  280. return ret;
  281. }
  282. static int skl_resume(struct device *dev)
  283. {
  284. struct pci_dev *pci = to_pci_dev(dev);
  285. struct hdac_bus *bus = pci_get_drvdata(pci);
  286. struct skl *skl = bus_to_skl(bus);
  287. struct hdac_ext_link *hlink = NULL;
  288. int ret;
  289. /* Turned OFF in HDMI codec driver after codec reconfiguration */
  290. if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
  291. ret = snd_hdac_display_power(bus, true);
  292. if (ret < 0) {
  293. dev_err(bus->dev,
  294. "Cannot turn on display power on i915\n");
  295. return ret;
  296. }
  297. }
  298. /*
  299. * resume only when we are not in suspend active, otherwise need to
  300. * restore the device
  301. */
  302. if (skl->supend_active) {
  303. pci_restore_state(pci);
  304. snd_hdac_ext_bus_link_power_up_all(bus);
  305. disable_irq_wake(bus->irq);
  306. /*
  307. * turn On the links which are On before active suspend
  308. * and start the CORB/RIRB DMA if On before
  309. * active suspend.
  310. */
  311. list_for_each_entry(hlink, &bus->hlink_list, list) {
  312. if (hlink->ref_count)
  313. snd_hdac_ext_bus_link_power_up(hlink);
  314. }
  315. ret = 0;
  316. if (bus->cmd_dma_state)
  317. snd_hdac_bus_init_cmd_io(bus);
  318. } else {
  319. ret = _skl_resume(bus);
  320. /* turn off the links which are off before suspend */
  321. list_for_each_entry(hlink, &bus->hlink_list, list) {
  322. if (!hlink->ref_count)
  323. snd_hdac_ext_bus_link_power_down(hlink);
  324. }
  325. if (!bus->cmd_dma_state)
  326. snd_hdac_bus_stop_cmd_io(bus);
  327. }
  328. return ret;
  329. }
  330. #endif /* CONFIG_PM_SLEEP */
  331. #ifdef CONFIG_PM
  332. static int skl_runtime_suspend(struct device *dev)
  333. {
  334. struct pci_dev *pci = to_pci_dev(dev);
  335. struct hdac_bus *bus = pci_get_drvdata(pci);
  336. dev_dbg(bus->dev, "in %s\n", __func__);
  337. return _skl_suspend(bus);
  338. }
  339. static int skl_runtime_resume(struct device *dev)
  340. {
  341. struct pci_dev *pci = to_pci_dev(dev);
  342. struct hdac_bus *bus = pci_get_drvdata(pci);
  343. dev_dbg(bus->dev, "in %s\n", __func__);
  344. return _skl_resume(bus);
  345. }
  346. #endif /* CONFIG_PM */
  347. static const struct dev_pm_ops skl_pm = {
  348. SET_SYSTEM_SLEEP_PM_OPS(skl_suspend, skl_resume)
  349. SET_RUNTIME_PM_OPS(skl_runtime_suspend, skl_runtime_resume, NULL)
  350. .suspend_late = skl_suspend_late,
  351. };
  352. /*
  353. * destructor
  354. */
  355. static int skl_free(struct hdac_bus *bus)
  356. {
  357. struct skl *skl = bus_to_skl(bus);
  358. skl->init_done = 0; /* to be sure */
  359. snd_hdac_ext_stop_streams(bus);
  360. if (bus->irq >= 0)
  361. free_irq(bus->irq, (void *)bus);
  362. snd_hdac_bus_free_stream_pages(bus);
  363. snd_hdac_stream_free_all(bus);
  364. snd_hdac_link_free_all(bus);
  365. if (bus->remap_addr)
  366. iounmap(bus->remap_addr);
  367. pci_release_regions(skl->pci);
  368. pci_disable_device(skl->pci);
  369. snd_hdac_ext_bus_exit(bus);
  370. cancel_work_sync(&skl->probe_work);
  371. if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
  372. snd_hdac_i915_exit(bus);
  373. return 0;
  374. }
  375. /*
  376. * For each ssp there are 3 clocks (mclk/sclk/sclkfs).
  377. * e.g. for ssp0, clocks will be named as
  378. * "ssp0_mclk", "ssp0_sclk", "ssp0_sclkfs"
  379. * So for skl+, there are 6 ssps, so 18 clocks will be created.
  380. */
  381. static struct skl_ssp_clk skl_ssp_clks[] = {
  382. {.name = "ssp0_mclk"}, {.name = "ssp1_mclk"}, {.name = "ssp2_mclk"},
  383. {.name = "ssp3_mclk"}, {.name = "ssp4_mclk"}, {.name = "ssp5_mclk"},
  384. {.name = "ssp0_sclk"}, {.name = "ssp1_sclk"}, {.name = "ssp2_sclk"},
  385. {.name = "ssp3_sclk"}, {.name = "ssp4_sclk"}, {.name = "ssp5_sclk"},
  386. {.name = "ssp0_sclkfs"}, {.name = "ssp1_sclkfs"},
  387. {.name = "ssp2_sclkfs"},
  388. {.name = "ssp3_sclkfs"}, {.name = "ssp4_sclkfs"},
  389. {.name = "ssp5_sclkfs"},
  390. };
  391. static int skl_find_machine(struct skl *skl, void *driver_data)
  392. {
  393. struct hdac_bus *bus = skl_to_bus(skl);
  394. struct snd_soc_acpi_mach *mach = driver_data;
  395. struct skl_machine_pdata *pdata;
  396. mach = snd_soc_acpi_find_machine(mach);
  397. if (mach == NULL) {
  398. dev_err(bus->dev, "No matching machine driver found\n");
  399. return -ENODEV;
  400. }
  401. skl->mach = mach;
  402. skl->fw_name = mach->fw_filename;
  403. pdata = mach->pdata;
  404. if (pdata) {
  405. skl->use_tplg_pcm = pdata->use_tplg_pcm;
  406. pdata->dmic_num = skl_get_dmic_geo(skl);
  407. }
  408. return 0;
  409. }
  410. static int skl_machine_device_register(struct skl *skl)
  411. {
  412. struct hdac_bus *bus = skl_to_bus(skl);
  413. struct snd_soc_acpi_mach *mach = skl->mach;
  414. struct platform_device *pdev;
  415. int ret;
  416. pdev = platform_device_alloc(mach->drv_name, -1);
  417. if (pdev == NULL) {
  418. dev_err(bus->dev, "platform device alloc failed\n");
  419. return -EIO;
  420. }
  421. ret = platform_device_add(pdev);
  422. if (ret) {
  423. dev_err(bus->dev, "failed to add machine device\n");
  424. platform_device_put(pdev);
  425. return -EIO;
  426. }
  427. if (mach->pdata)
  428. dev_set_drvdata(&pdev->dev, mach->pdata);
  429. skl->i2s_dev = pdev;
  430. return 0;
  431. }
  432. static void skl_machine_device_unregister(struct skl *skl)
  433. {
  434. if (skl->i2s_dev)
  435. platform_device_unregister(skl->i2s_dev);
  436. }
  437. static int skl_dmic_device_register(struct skl *skl)
  438. {
  439. struct hdac_bus *bus = skl_to_bus(skl);
  440. struct platform_device *pdev;
  441. int ret;
  442. /* SKL has one dmic port, so allocate dmic device for this */
  443. pdev = platform_device_alloc("dmic-codec", -1);
  444. if (!pdev) {
  445. dev_err(bus->dev, "failed to allocate dmic device\n");
  446. return -ENOMEM;
  447. }
  448. ret = platform_device_add(pdev);
  449. if (ret) {
  450. dev_err(bus->dev, "failed to add dmic device: %d\n", ret);
  451. platform_device_put(pdev);
  452. return ret;
  453. }
  454. skl->dmic_dev = pdev;
  455. return 0;
  456. }
  457. static void skl_dmic_device_unregister(struct skl *skl)
  458. {
  459. if (skl->dmic_dev)
  460. platform_device_unregister(skl->dmic_dev);
  461. }
  462. static struct skl_clk_parent_src skl_clk_src[] = {
  463. { .clk_id = SKL_XTAL, .name = "xtal" },
  464. { .clk_id = SKL_CARDINAL, .name = "cardinal", .rate = 24576000 },
  465. { .clk_id = SKL_PLL, .name = "pll", .rate = 96000000 },
  466. };
  467. struct skl_clk_parent_src *skl_get_parent_clk(u8 clk_id)
  468. {
  469. unsigned int i;
  470. for (i = 0; i < ARRAY_SIZE(skl_clk_src); i++) {
  471. if (skl_clk_src[i].clk_id == clk_id)
  472. return &skl_clk_src[i];
  473. }
  474. return NULL;
  475. }
  476. static void init_skl_xtal_rate(int pci_id)
  477. {
  478. switch (pci_id) {
  479. case 0x9d70:
  480. case 0x9d71:
  481. skl_clk_src[0].rate = 24000000;
  482. return;
  483. default:
  484. skl_clk_src[0].rate = 19200000;
  485. return;
  486. }
  487. }
  488. static int skl_clock_device_register(struct skl *skl)
  489. {
  490. struct platform_device_info pdevinfo = {NULL};
  491. struct skl_clk_pdata *clk_pdata;
  492. clk_pdata = devm_kzalloc(&skl->pci->dev, sizeof(*clk_pdata),
  493. GFP_KERNEL);
  494. if (!clk_pdata)
  495. return -ENOMEM;
  496. init_skl_xtal_rate(skl->pci->device);
  497. clk_pdata->parent_clks = skl_clk_src;
  498. clk_pdata->ssp_clks = skl_ssp_clks;
  499. clk_pdata->num_clks = ARRAY_SIZE(skl_ssp_clks);
  500. /* Query NHLT to fill the rates and parent */
  501. skl_get_clks(skl, clk_pdata->ssp_clks);
  502. clk_pdata->pvt_data = skl;
  503. /* Register Platform device */
  504. pdevinfo.parent = &skl->pci->dev;
  505. pdevinfo.id = -1;
  506. pdevinfo.name = "skl-ssp-clk";
  507. pdevinfo.data = clk_pdata;
  508. pdevinfo.size_data = sizeof(*clk_pdata);
  509. skl->clk_dev = platform_device_register_full(&pdevinfo);
  510. return PTR_ERR_OR_ZERO(skl->clk_dev);
  511. }
  512. static void skl_clock_device_unregister(struct skl *skl)
  513. {
  514. if (skl->clk_dev)
  515. platform_device_unregister(skl->clk_dev);
  516. }
  517. /*
  518. * Probe the given codec address
  519. */
  520. static int probe_codec(struct hdac_bus *bus, int addr)
  521. {
  522. unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
  523. (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
  524. unsigned int res = -1;
  525. struct skl *skl = bus_to_skl(bus);
  526. struct hdac_device *hdev;
  527. mutex_lock(&bus->cmd_mutex);
  528. snd_hdac_bus_send_cmd(bus, cmd);
  529. snd_hdac_bus_get_response(bus, addr, &res);
  530. mutex_unlock(&bus->cmd_mutex);
  531. if (res == -1)
  532. return -EIO;
  533. dev_dbg(bus->dev, "codec #%d probed OK\n", addr);
  534. hdev = devm_kzalloc(&skl->pci->dev, sizeof(*hdev), GFP_KERNEL);
  535. if (!hdev)
  536. return -ENOMEM;
  537. return snd_hdac_ext_bus_device_init(bus, addr, hdev);
  538. }
  539. /* Codec initialization */
  540. static void skl_codec_create(struct hdac_bus *bus)
  541. {
  542. int c, max_slots;
  543. max_slots = HDA_MAX_CODECS;
  544. /* First try to probe all given codec slots */
  545. for (c = 0; c < max_slots; c++) {
  546. if ((bus->codec_mask & (1 << c))) {
  547. if (probe_codec(bus, c) < 0) {
  548. /*
  549. * Some BIOSen give you wrong codec addresses
  550. * that don't exist
  551. */
  552. dev_warn(bus->dev,
  553. "Codec #%d probe error; disabling it...\n", c);
  554. bus->codec_mask &= ~(1 << c);
  555. /*
  556. * More badly, accessing to a non-existing
  557. * codec often screws up the controller bus,
  558. * and disturbs the further communications.
  559. * Thus if an error occurs during probing,
  560. * better to reset the controller bus to get
  561. * back to the sanity state.
  562. */
  563. snd_hdac_bus_stop_chip(bus);
  564. skl_init_chip(bus, true);
  565. }
  566. }
  567. }
  568. }
  569. static const struct hdac_bus_ops bus_core_ops = {
  570. .command = snd_hdac_bus_send_cmd,
  571. .get_response = snd_hdac_bus_get_response,
  572. };
  573. static int skl_i915_init(struct hdac_bus *bus)
  574. {
  575. int err;
  576. /*
  577. * The HDMI codec is in GPU so we need to ensure that it is powered
  578. * up and ready for probe
  579. */
  580. err = snd_hdac_i915_init(bus);
  581. if (err < 0)
  582. return err;
  583. err = snd_hdac_display_power(bus, true);
  584. if (err < 0)
  585. dev_err(bus->dev, "Cannot turn on display power on i915\n");
  586. return err;
  587. }
  588. static void skl_probe_work(struct work_struct *work)
  589. {
  590. struct skl *skl = container_of(work, struct skl, probe_work);
  591. struct hdac_bus *bus = skl_to_bus(skl);
  592. struct hdac_ext_link *hlink = NULL;
  593. int err;
  594. if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
  595. err = skl_i915_init(bus);
  596. if (err < 0)
  597. return;
  598. }
  599. err = skl_init_chip(bus, true);
  600. if (err < 0) {
  601. dev_err(bus->dev, "Init chip failed with err: %d\n", err);
  602. goto out_err;
  603. }
  604. /* codec detection */
  605. if (!bus->codec_mask)
  606. dev_info(bus->dev, "no hda codecs found!\n");
  607. /* create codec instances */
  608. skl_codec_create(bus);
  609. /* register platform dai and controls */
  610. err = skl_platform_register(bus->dev);
  611. if (err < 0) {
  612. dev_err(bus->dev, "platform register failed: %d\n", err);
  613. return;
  614. }
  615. if (bus->ppcap) {
  616. err = skl_machine_device_register(skl);
  617. if (err < 0) {
  618. dev_err(bus->dev, "machine register failed: %d\n", err);
  619. goto out_err;
  620. }
  621. }
  622. /*
  623. * we are done probing so decrement link counts
  624. */
  625. list_for_each_entry(hlink, &bus->hlink_list, list)
  626. snd_hdac_ext_bus_link_put(bus, hlink);
  627. if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
  628. err = snd_hdac_display_power(bus, false);
  629. if (err < 0) {
  630. dev_err(bus->dev, "Cannot turn off display power on i915\n");
  631. skl_machine_device_unregister(skl);
  632. return;
  633. }
  634. }
  635. /* configure PM */
  636. pm_runtime_put_noidle(bus->dev);
  637. pm_runtime_allow(bus->dev);
  638. skl->init_done = 1;
  639. return;
  640. out_err:
  641. if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
  642. err = snd_hdac_display_power(bus, false);
  643. }
  644. /*
  645. * constructor
  646. */
  647. static int skl_create(struct pci_dev *pci,
  648. const struct hdac_io_ops *io_ops,
  649. struct skl **rskl)
  650. {
  651. struct skl *skl;
  652. struct hdac_bus *bus;
  653. int err;
  654. *rskl = NULL;
  655. err = pci_enable_device(pci);
  656. if (err < 0)
  657. return err;
  658. skl = devm_kzalloc(&pci->dev, sizeof(*skl), GFP_KERNEL);
  659. if (!skl) {
  660. pci_disable_device(pci);
  661. return -ENOMEM;
  662. }
  663. bus = skl_to_bus(skl);
  664. snd_hdac_ext_bus_init(bus, &pci->dev, &bus_core_ops, io_ops, NULL);
  665. bus->use_posbuf = 1;
  666. skl->pci = pci;
  667. INIT_WORK(&skl->probe_work, skl_probe_work);
  668. bus->bdl_pos_adj = 0;
  669. *rskl = skl;
  670. return 0;
  671. }
  672. static int skl_first_init(struct hdac_bus *bus)
  673. {
  674. struct skl *skl = bus_to_skl(bus);
  675. struct pci_dev *pci = skl->pci;
  676. int err;
  677. unsigned short gcap;
  678. int cp_streams, pb_streams, start_idx;
  679. err = pci_request_regions(pci, "Skylake HD audio");
  680. if (err < 0)
  681. return err;
  682. bus->addr = pci_resource_start(pci, 0);
  683. bus->remap_addr = pci_ioremap_bar(pci, 0);
  684. if (bus->remap_addr == NULL) {
  685. dev_err(bus->dev, "ioremap error\n");
  686. return -ENXIO;
  687. }
  688. snd_hdac_bus_reset_link(bus, true);
  689. snd_hdac_bus_parse_capabilities(bus);
  690. if (skl_acquire_irq(bus, 0) < 0)
  691. return -EBUSY;
  692. pci_set_master(pci);
  693. synchronize_irq(bus->irq);
  694. gcap = snd_hdac_chip_readw(bus, GCAP);
  695. dev_dbg(bus->dev, "chipset global capabilities = 0x%x\n", gcap);
  696. /* allow 64bit DMA address if supported by H/W */
  697. if (!dma_set_mask(bus->dev, DMA_BIT_MASK(64))) {
  698. dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(64));
  699. } else {
  700. dma_set_mask(bus->dev, DMA_BIT_MASK(32));
  701. dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(32));
  702. }
  703. /* read number of streams from GCAP register */
  704. cp_streams = (gcap >> 8) & 0x0f;
  705. pb_streams = (gcap >> 12) & 0x0f;
  706. if (!pb_streams && !cp_streams)
  707. return -EIO;
  708. bus->num_streams = cp_streams + pb_streams;
  709. /* initialize streams */
  710. snd_hdac_ext_stream_init_all
  711. (bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
  712. start_idx = cp_streams;
  713. snd_hdac_ext_stream_init_all
  714. (bus, start_idx, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
  715. err = snd_hdac_bus_alloc_stream_pages(bus);
  716. if (err < 0)
  717. return err;
  718. /* initialize chip */
  719. skl_init_pci(skl);
  720. return skl_init_chip(bus, true);
  721. }
  722. static int skl_probe(struct pci_dev *pci,
  723. const struct pci_device_id *pci_id)
  724. {
  725. struct skl *skl;
  726. struct hdac_bus *bus = NULL;
  727. int err;
  728. /* we use ext core ops, so provide NULL for ops here */
  729. err = skl_create(pci, NULL, &skl);
  730. if (err < 0)
  731. return err;
  732. bus = skl_to_bus(skl);
  733. err = skl_first_init(bus);
  734. if (err < 0)
  735. goto out_free;
  736. skl->pci_id = pci->device;
  737. device_disable_async_suspend(bus->dev);
  738. skl->nhlt = skl_nhlt_init(bus->dev);
  739. if (skl->nhlt == NULL) {
  740. err = -ENODEV;
  741. goto out_free;
  742. }
  743. err = skl_nhlt_create_sysfs(skl);
  744. if (err < 0)
  745. goto out_nhlt_free;
  746. skl_nhlt_update_topology_bin(skl);
  747. pci_set_drvdata(skl->pci, bus);
  748. /* check if dsp is there */
  749. if (bus->ppcap) {
  750. /* create device for dsp clk */
  751. err = skl_clock_device_register(skl);
  752. if (err < 0)
  753. goto out_clk_free;
  754. err = skl_find_machine(skl, (void *)pci_id->driver_data);
  755. if (err < 0)
  756. goto out_nhlt_free;
  757. err = skl_init_dsp(skl);
  758. if (err < 0) {
  759. dev_dbg(bus->dev, "error failed to register dsp\n");
  760. goto out_nhlt_free;
  761. }
  762. skl->skl_sst->enable_miscbdcge = skl_enable_miscbdcge;
  763. skl->skl_sst->clock_power_gating = skl_clock_power_gating;
  764. }
  765. if (bus->mlcap)
  766. snd_hdac_ext_bus_get_ml_capabilities(bus);
  767. snd_hdac_bus_stop_chip(bus);
  768. /* create device for soc dmic */
  769. err = skl_dmic_device_register(skl);
  770. if (err < 0)
  771. goto out_dsp_free;
  772. schedule_work(&skl->probe_work);
  773. return 0;
  774. out_dsp_free:
  775. skl_free_dsp(skl);
  776. out_clk_free:
  777. skl_clock_device_unregister(skl);
  778. out_nhlt_free:
  779. skl_nhlt_free(skl->nhlt);
  780. out_free:
  781. skl_free(bus);
  782. return err;
  783. }
  784. static void skl_shutdown(struct pci_dev *pci)
  785. {
  786. struct hdac_bus *bus = pci_get_drvdata(pci);
  787. struct hdac_stream *s;
  788. struct hdac_ext_stream *stream;
  789. struct skl *skl;
  790. if (!bus)
  791. return;
  792. skl = bus_to_skl(bus);
  793. if (!skl->init_done)
  794. return;
  795. snd_hdac_ext_stop_streams(bus);
  796. list_for_each_entry(s, &bus->stream_list, list) {
  797. stream = stream_to_hdac_ext_stream(s);
  798. snd_hdac_ext_stream_decouple(bus, stream, false);
  799. }
  800. snd_hdac_bus_stop_chip(bus);
  801. }
  802. static void skl_remove(struct pci_dev *pci)
  803. {
  804. struct hdac_bus *bus = pci_get_drvdata(pci);
  805. struct skl *skl = bus_to_skl(bus);
  806. release_firmware(skl->tplg);
  807. pm_runtime_get_noresume(&pci->dev);
  808. /* codec removal, invoke bus_device_remove */
  809. snd_hdac_ext_bus_device_remove(bus);
  810. skl->debugfs = NULL;
  811. skl_platform_unregister(&pci->dev);
  812. skl_free_dsp(skl);
  813. skl_machine_device_unregister(skl);
  814. skl_dmic_device_unregister(skl);
  815. skl_clock_device_unregister(skl);
  816. skl_nhlt_remove_sysfs(skl);
  817. skl_nhlt_free(skl->nhlt);
  818. skl_free(bus);
  819. dev_set_drvdata(&pci->dev, NULL);
  820. }
  821. /* PCI IDs */
  822. static const struct pci_device_id skl_ids[] = {
  823. /* Sunrise Point-LP */
  824. { PCI_DEVICE(0x8086, 0x9d70),
  825. .driver_data = (unsigned long)&snd_soc_acpi_intel_skl_machines},
  826. /* BXT-P */
  827. { PCI_DEVICE(0x8086, 0x5a98),
  828. .driver_data = (unsigned long)&snd_soc_acpi_intel_bxt_machines},
  829. /* KBL */
  830. { PCI_DEVICE(0x8086, 0x9D71),
  831. .driver_data = (unsigned long)&snd_soc_acpi_intel_kbl_machines},
  832. /* GLK */
  833. { PCI_DEVICE(0x8086, 0x3198),
  834. .driver_data = (unsigned long)&snd_soc_acpi_intel_glk_machines},
  835. /* CNL */
  836. { PCI_DEVICE(0x8086, 0x9dc8),
  837. .driver_data = (unsigned long)&snd_soc_acpi_intel_cnl_machines},
  838. { 0, }
  839. };
  840. MODULE_DEVICE_TABLE(pci, skl_ids);
  841. /* pci_driver definition */
  842. static struct pci_driver skl_driver = {
  843. .name = KBUILD_MODNAME,
  844. .id_table = skl_ids,
  845. .probe = skl_probe,
  846. .remove = skl_remove,
  847. .shutdown = skl_shutdown,
  848. .driver = {
  849. .pm = &skl_pm,
  850. },
  851. };
  852. module_pci_driver(skl_driver);
  853. MODULE_LICENSE("GPL v2");
  854. MODULE_DESCRIPTION("Intel Skylake ASoC HDA driver");