acpi_lpss.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342
  1. /*
  2. * ACPI support for Intel Lynxpoint LPSS.
  3. *
  4. * Copyright (C) 2013, Intel Corporation
  5. * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
  6. * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/acpi.h>
  13. #include <linux/clkdev.h>
  14. #include <linux/clk-provider.h>
  15. #include <linux/err.h>
  16. #include <linux/io.h>
  17. #include <linux/mutex.h>
  18. #include <linux/pci.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/platform_data/clk-lpss.h>
  21. #include <linux/platform_data/x86/pmc_atom.h>
  22. #include <linux/pm_domain.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/pwm.h>
  25. #include <linux/suspend.h>
  26. #include <linux/delay.h>
  27. #include "internal.h"
  28. ACPI_MODULE_NAME("acpi_lpss");
  29. #ifdef CONFIG_X86_INTEL_LPSS
  30. #include <asm/cpu_device_id.h>
  31. #include <asm/intel-family.h>
  32. #include <asm/iosf_mbi.h>
  33. #define LPSS_ADDR(desc) ((unsigned long)&desc)
  34. #define LPSS_CLK_SIZE 0x04
  35. #define LPSS_LTR_SIZE 0x18
  36. /* Offsets relative to LPSS_PRIVATE_OFFSET */
  37. #define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16))
  38. #define LPSS_RESETS 0x04
  39. #define LPSS_RESETS_RESET_FUNC BIT(0)
  40. #define LPSS_RESETS_RESET_APB BIT(1)
  41. #define LPSS_GENERAL 0x08
  42. #define LPSS_GENERAL_LTR_MODE_SW BIT(2)
  43. #define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
  44. #define LPSS_SW_LTR 0x10
  45. #define LPSS_AUTO_LTR 0x14
  46. #define LPSS_LTR_SNOOP_REQ BIT(15)
  47. #define LPSS_LTR_SNOOP_MASK 0x0000FFFF
  48. #define LPSS_LTR_SNOOP_LAT_1US 0x800
  49. #define LPSS_LTR_SNOOP_LAT_32US 0xC00
  50. #define LPSS_LTR_SNOOP_LAT_SHIFT 5
  51. #define LPSS_LTR_SNOOP_LAT_CUTOFF 3000
  52. #define LPSS_LTR_MAX_VAL 0x3FF
  53. #define LPSS_TX_INT 0x20
  54. #define LPSS_TX_INT_MASK BIT(1)
  55. #define LPSS_PRV_REG_COUNT 9
  56. /* LPSS Flags */
  57. #define LPSS_CLK BIT(0)
  58. #define LPSS_CLK_GATE BIT(1)
  59. #define LPSS_CLK_DIVIDER BIT(2)
  60. #define LPSS_LTR BIT(3)
  61. #define LPSS_SAVE_CTX BIT(4)
  62. #define LPSS_NO_D3_DELAY BIT(5)
  63. /* Crystal Cove PMIC shares same ACPI ID between different platforms */
  64. #define BYT_CRC_HRV 2
  65. #define CHT_CRC_HRV 3
  66. struct lpss_private_data;
  67. struct lpss_device_desc {
  68. unsigned int flags;
  69. const char *clk_con_id;
  70. unsigned int prv_offset;
  71. size_t prv_size_override;
  72. struct property_entry *properties;
  73. void (*setup)(struct lpss_private_data *pdata);
  74. bool resume_from_noirq;
  75. };
  76. static const struct lpss_device_desc lpss_dma_desc = {
  77. .flags = LPSS_CLK,
  78. };
  79. struct lpss_private_data {
  80. struct acpi_device *adev;
  81. void __iomem *mmio_base;
  82. resource_size_t mmio_size;
  83. unsigned int fixed_clk_rate;
  84. struct clk *clk;
  85. const struct lpss_device_desc *dev_desc;
  86. u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
  87. };
  88. /* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */
  89. static u32 pmc_atom_d3_mask = 0xfe000ffe;
  90. /* LPSS run time quirks */
  91. static unsigned int lpss_quirks;
  92. /*
  93. * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
  94. *
  95. * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
  96. * it can be powered off automatically whenever the last LPSS device goes down.
  97. * In case of no power any access to the DMA controller will hang the system.
  98. * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
  99. * well as on ASuS T100TA transformer.
  100. *
  101. * This quirk overrides power state of entire LPSS island to keep DMA powered
  102. * on whenever we have at least one other device in use.
  103. */
  104. #define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0)
  105. /* UART Component Parameter Register */
  106. #define LPSS_UART_CPR 0xF4
  107. #define LPSS_UART_CPR_AFCE BIT(4)
  108. static void lpss_uart_setup(struct lpss_private_data *pdata)
  109. {
  110. unsigned int offset;
  111. u32 val;
  112. offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
  113. val = readl(pdata->mmio_base + offset);
  114. writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
  115. val = readl(pdata->mmio_base + LPSS_UART_CPR);
  116. if (!(val & LPSS_UART_CPR_AFCE)) {
  117. offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
  118. val = readl(pdata->mmio_base + offset);
  119. val |= LPSS_GENERAL_UART_RTS_OVRD;
  120. writel(val, pdata->mmio_base + offset);
  121. }
  122. }
  123. static void lpss_deassert_reset(struct lpss_private_data *pdata)
  124. {
  125. unsigned int offset;
  126. u32 val;
  127. offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
  128. val = readl(pdata->mmio_base + offset);
  129. val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
  130. writel(val, pdata->mmio_base + offset);
  131. }
  132. /*
  133. * BYT PWM used for backlight control by the i915 driver on systems without
  134. * the Crystal Cove PMIC.
  135. */
  136. static struct pwm_lookup byt_pwm_lookup[] = {
  137. PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
  138. "pwm_backlight", 0, PWM_POLARITY_NORMAL,
  139. "pwm-lpss-platform"),
  140. };
  141. static void byt_pwm_setup(struct lpss_private_data *pdata)
  142. {
  143. struct acpi_device *adev = pdata->adev;
  144. /* Only call pwm_add_table for the first PWM controller */
  145. if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
  146. return;
  147. if (!acpi_dev_present("INT33FD", NULL, BYT_CRC_HRV))
  148. pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
  149. }
  150. #define LPSS_I2C_ENABLE 0x6c
  151. static void byt_i2c_setup(struct lpss_private_data *pdata)
  152. {
  153. const char *uid_str = acpi_device_uid(pdata->adev);
  154. acpi_handle handle = pdata->adev->handle;
  155. unsigned long long shared_host = 0;
  156. acpi_status status;
  157. long uid = 0;
  158. /* Expected to always be true, but better safe then sorry */
  159. if (uid_str)
  160. uid = simple_strtol(uid_str, NULL, 10);
  161. /* Detect I2C bus shared with PUNIT and ignore its d3 status */
  162. status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
  163. if (ACPI_SUCCESS(status) && shared_host && uid)
  164. pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1));
  165. lpss_deassert_reset(pdata);
  166. if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
  167. pdata->fixed_clk_rate = 133000000;
  168. writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
  169. }
  170. /* BSW PWM used for backlight control by the i915 driver */
  171. static struct pwm_lookup bsw_pwm_lookup[] = {
  172. PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
  173. "pwm_backlight", 0, PWM_POLARITY_NORMAL,
  174. "pwm-lpss-platform"),
  175. };
  176. static void bsw_pwm_setup(struct lpss_private_data *pdata)
  177. {
  178. struct acpi_device *adev = pdata->adev;
  179. /* Only call pwm_add_table for the first PWM controller */
  180. if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
  181. return;
  182. pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
  183. }
  184. static const struct lpss_device_desc lpt_dev_desc = {
  185. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
  186. .prv_offset = 0x800,
  187. };
  188. static const struct lpss_device_desc lpt_i2c_dev_desc = {
  189. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
  190. .prv_offset = 0x800,
  191. };
  192. static struct property_entry uart_properties[] = {
  193. PROPERTY_ENTRY_U32("reg-io-width", 4),
  194. PROPERTY_ENTRY_U32("reg-shift", 2),
  195. PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
  196. { },
  197. };
  198. static const struct lpss_device_desc lpt_uart_dev_desc = {
  199. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
  200. .clk_con_id = "baudclk",
  201. .prv_offset = 0x800,
  202. .setup = lpss_uart_setup,
  203. .properties = uart_properties,
  204. };
  205. static const struct lpss_device_desc lpt_sdio_dev_desc = {
  206. .flags = LPSS_LTR,
  207. .prv_offset = 0x1000,
  208. .prv_size_override = 0x1018,
  209. };
  210. static const struct lpss_device_desc byt_pwm_dev_desc = {
  211. .flags = LPSS_SAVE_CTX,
  212. .prv_offset = 0x800,
  213. .setup = byt_pwm_setup,
  214. };
  215. static const struct lpss_device_desc bsw_pwm_dev_desc = {
  216. .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
  217. .prv_offset = 0x800,
  218. .setup = bsw_pwm_setup,
  219. };
  220. static const struct lpss_device_desc byt_uart_dev_desc = {
  221. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
  222. .clk_con_id = "baudclk",
  223. .prv_offset = 0x800,
  224. .setup = lpss_uart_setup,
  225. .properties = uart_properties,
  226. };
  227. static const struct lpss_device_desc bsw_uart_dev_desc = {
  228. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
  229. | LPSS_NO_D3_DELAY,
  230. .clk_con_id = "baudclk",
  231. .prv_offset = 0x800,
  232. .setup = lpss_uart_setup,
  233. .properties = uart_properties,
  234. };
  235. static const struct lpss_device_desc byt_spi_dev_desc = {
  236. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
  237. .prv_offset = 0x400,
  238. };
  239. static const struct lpss_device_desc byt_sdio_dev_desc = {
  240. .flags = LPSS_CLK,
  241. };
  242. static const struct lpss_device_desc byt_i2c_dev_desc = {
  243. .flags = LPSS_CLK | LPSS_SAVE_CTX,
  244. .prv_offset = 0x800,
  245. .setup = byt_i2c_setup,
  246. .resume_from_noirq = true,
  247. };
  248. static const struct lpss_device_desc bsw_i2c_dev_desc = {
  249. .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
  250. .prv_offset = 0x800,
  251. .setup = byt_i2c_setup,
  252. .resume_from_noirq = true,
  253. };
  254. static const struct lpss_device_desc bsw_spi_dev_desc = {
  255. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
  256. | LPSS_NO_D3_DELAY,
  257. .prv_offset = 0x400,
  258. .setup = lpss_deassert_reset,
  259. };
  260. #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
  261. static const struct x86_cpu_id lpss_cpu_ids[] = {
  262. ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */
  263. ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
  264. {}
  265. };
  266. #else
  267. #define LPSS_ADDR(desc) (0UL)
  268. #endif /* CONFIG_X86_INTEL_LPSS */
  269. static const struct acpi_device_id acpi_lpss_device_ids[] = {
  270. /* Generic LPSS devices */
  271. { "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
  272. /* Lynxpoint LPSS devices */
  273. { "INT33C0", LPSS_ADDR(lpt_dev_desc) },
  274. { "INT33C1", LPSS_ADDR(lpt_dev_desc) },
  275. { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
  276. { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
  277. { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
  278. { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) },
  279. { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) },
  280. { "INT33C7", },
  281. /* BayTrail LPSS devices */
  282. { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) },
  283. { "80860F0A", LPSS_ADDR(byt_uart_dev_desc) },
  284. { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) },
  285. { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) },
  286. { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) },
  287. { "INT33B2", },
  288. { "INT33FC", },
  289. /* Braswell LPSS devices */
  290. { "80862286", LPSS_ADDR(lpss_dma_desc) },
  291. { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
  292. { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
  293. { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
  294. { "808622C0", LPSS_ADDR(lpss_dma_desc) },
  295. { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
  296. /* Broadwell LPSS devices */
  297. { "INT3430", LPSS_ADDR(lpt_dev_desc) },
  298. { "INT3431", LPSS_ADDR(lpt_dev_desc) },
  299. { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
  300. { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
  301. { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
  302. { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
  303. { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
  304. { "INT3437", },
  305. /* Wildcat Point LPSS devices */
  306. { "INT3438", LPSS_ADDR(lpt_dev_desc) },
  307. { }
  308. };
  309. #ifdef CONFIG_X86_INTEL_LPSS
  310. static int is_memory(struct acpi_resource *res, void *not_used)
  311. {
  312. struct resource r;
  313. return !acpi_dev_resource_memory(res, &r);
  314. }
  315. /* LPSS main clock device. */
  316. static struct platform_device *lpss_clk_dev;
  317. static inline void lpt_register_clock_device(void)
  318. {
  319. lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
  320. }
  321. static int register_device_clock(struct acpi_device *adev,
  322. struct lpss_private_data *pdata)
  323. {
  324. const struct lpss_device_desc *dev_desc = pdata->dev_desc;
  325. const char *devname = dev_name(&adev->dev);
  326. struct clk *clk;
  327. struct lpss_clk_data *clk_data;
  328. const char *parent, *clk_name;
  329. void __iomem *prv_base;
  330. if (!lpss_clk_dev)
  331. lpt_register_clock_device();
  332. clk_data = platform_get_drvdata(lpss_clk_dev);
  333. if (!clk_data)
  334. return -ENODEV;
  335. clk = clk_data->clk;
  336. if (!pdata->mmio_base
  337. || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
  338. return -ENODATA;
  339. parent = clk_data->name;
  340. prv_base = pdata->mmio_base + dev_desc->prv_offset;
  341. if (pdata->fixed_clk_rate) {
  342. clk = clk_register_fixed_rate(NULL, devname, parent, 0,
  343. pdata->fixed_clk_rate);
  344. goto out;
  345. }
  346. if (dev_desc->flags & LPSS_CLK_GATE) {
  347. clk = clk_register_gate(NULL, devname, parent, 0,
  348. prv_base, 0, 0, NULL);
  349. parent = devname;
  350. }
  351. if (dev_desc->flags & LPSS_CLK_DIVIDER) {
  352. /* Prevent division by zero */
  353. if (!readl(prv_base))
  354. writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
  355. clk_name = kasprintf(GFP_KERNEL, "%s-div", devname);
  356. if (!clk_name)
  357. return -ENOMEM;
  358. clk = clk_register_fractional_divider(NULL, clk_name, parent,
  359. 0, prv_base,
  360. 1, 15, 16, 15, 0, NULL);
  361. parent = clk_name;
  362. clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
  363. if (!clk_name) {
  364. kfree(parent);
  365. return -ENOMEM;
  366. }
  367. clk = clk_register_gate(NULL, clk_name, parent,
  368. CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
  369. prv_base, 31, 0, NULL);
  370. kfree(parent);
  371. kfree(clk_name);
  372. }
  373. out:
  374. if (IS_ERR(clk))
  375. return PTR_ERR(clk);
  376. pdata->clk = clk;
  377. clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
  378. return 0;
  379. }
  380. struct lpss_device_links {
  381. const char *supplier_hid;
  382. const char *supplier_uid;
  383. const char *consumer_hid;
  384. const char *consumer_uid;
  385. u32 flags;
  386. };
  387. /*
  388. * The _DEP method is used to identify dependencies but instead of creating
  389. * device links for every handle in _DEP, only links in the following list are
  390. * created. That is necessary because, in the general case, _DEP can refer to
  391. * devices that might not have drivers, or that are on different buses, or where
  392. * the supplier is not enumerated until after the consumer is probed.
  393. */
  394. static const struct lpss_device_links lpss_device_links[] = {
  395. {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
  396. };
  397. static bool hid_uid_match(const char *hid1, const char *uid1,
  398. const char *hid2, const char *uid2)
  399. {
  400. return !strcmp(hid1, hid2) && uid1 && uid2 && !strcmp(uid1, uid2);
  401. }
  402. static bool acpi_lpss_is_supplier(struct acpi_device *adev,
  403. const struct lpss_device_links *link)
  404. {
  405. return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
  406. link->supplier_hid, link->supplier_uid);
  407. }
  408. static bool acpi_lpss_is_consumer(struct acpi_device *adev,
  409. const struct lpss_device_links *link)
  410. {
  411. return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
  412. link->consumer_hid, link->consumer_uid);
  413. }
  414. struct hid_uid {
  415. const char *hid;
  416. const char *uid;
  417. };
  418. static int match_hid_uid(struct device *dev, void *data)
  419. {
  420. struct acpi_device *adev = ACPI_COMPANION(dev);
  421. struct hid_uid *id = data;
  422. if (!adev)
  423. return 0;
  424. return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
  425. id->hid, id->uid);
  426. }
  427. static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
  428. {
  429. struct device *dev;
  430. struct hid_uid data = {
  431. .hid = hid,
  432. .uid = uid,
  433. };
  434. dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
  435. if (dev)
  436. return dev;
  437. return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
  438. }
  439. static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
  440. {
  441. struct acpi_handle_list dep_devices;
  442. acpi_status status;
  443. int i;
  444. if (!acpi_has_method(adev->handle, "_DEP"))
  445. return false;
  446. status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
  447. &dep_devices);
  448. if (ACPI_FAILURE(status)) {
  449. dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
  450. return false;
  451. }
  452. for (i = 0; i < dep_devices.count; i++) {
  453. if (dep_devices.handles[i] == handle)
  454. return true;
  455. }
  456. return false;
  457. }
  458. static void acpi_lpss_link_consumer(struct device *dev1,
  459. const struct lpss_device_links *link)
  460. {
  461. struct device *dev2;
  462. dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
  463. if (!dev2)
  464. return;
  465. if (acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
  466. device_link_add(dev2, dev1, link->flags);
  467. put_device(dev2);
  468. }
  469. static void acpi_lpss_link_supplier(struct device *dev1,
  470. const struct lpss_device_links *link)
  471. {
  472. struct device *dev2;
  473. dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
  474. if (!dev2)
  475. return;
  476. if (acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
  477. device_link_add(dev1, dev2, link->flags);
  478. put_device(dev2);
  479. }
  480. static void acpi_lpss_create_device_links(struct acpi_device *adev,
  481. struct platform_device *pdev)
  482. {
  483. int i;
  484. for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
  485. const struct lpss_device_links *link = &lpss_device_links[i];
  486. if (acpi_lpss_is_supplier(adev, link))
  487. acpi_lpss_link_consumer(&pdev->dev, link);
  488. if (acpi_lpss_is_consumer(adev, link))
  489. acpi_lpss_link_supplier(&pdev->dev, link);
  490. }
  491. }
  492. static int acpi_lpss_create_device(struct acpi_device *adev,
  493. const struct acpi_device_id *id)
  494. {
  495. const struct lpss_device_desc *dev_desc;
  496. struct lpss_private_data *pdata;
  497. struct resource_entry *rentry;
  498. struct list_head resource_list;
  499. struct platform_device *pdev;
  500. int ret;
  501. dev_desc = (const struct lpss_device_desc *)id->driver_data;
  502. if (!dev_desc) {
  503. pdev = acpi_create_platform_device(adev, NULL);
  504. return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
  505. }
  506. pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
  507. if (!pdata)
  508. return -ENOMEM;
  509. INIT_LIST_HEAD(&resource_list);
  510. ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
  511. if (ret < 0)
  512. goto err_out;
  513. list_for_each_entry(rentry, &resource_list, node)
  514. if (resource_type(rentry->res) == IORESOURCE_MEM) {
  515. if (dev_desc->prv_size_override)
  516. pdata->mmio_size = dev_desc->prv_size_override;
  517. else
  518. pdata->mmio_size = resource_size(rentry->res);
  519. pdata->mmio_base = ioremap(rentry->res->start,
  520. pdata->mmio_size);
  521. break;
  522. }
  523. acpi_dev_free_resource_list(&resource_list);
  524. if (!pdata->mmio_base) {
  525. /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
  526. adev->pnp.type.platform_id = 0;
  527. /* Skip the device, but continue the namespace scan. */
  528. ret = 0;
  529. goto err_out;
  530. }
  531. pdata->adev = adev;
  532. pdata->dev_desc = dev_desc;
  533. if (dev_desc->setup)
  534. dev_desc->setup(pdata);
  535. if (dev_desc->flags & LPSS_CLK) {
  536. ret = register_device_clock(adev, pdata);
  537. if (ret) {
  538. /* Skip the device, but continue the namespace scan. */
  539. ret = 0;
  540. goto err_out;
  541. }
  542. }
  543. /*
  544. * This works around a known issue in ACPI tables where LPSS devices
  545. * have _PS0 and _PS3 without _PSC (and no power resources), so
  546. * acpi_bus_init_power() will assume that the BIOS has put them into D0.
  547. */
  548. acpi_device_fix_up_power(adev);
  549. adev->driver_data = pdata;
  550. pdev = acpi_create_platform_device(adev, dev_desc->properties);
  551. if (!IS_ERR_OR_NULL(pdev)) {
  552. acpi_lpss_create_device_links(adev, pdev);
  553. return 1;
  554. }
  555. ret = PTR_ERR(pdev);
  556. adev->driver_data = NULL;
  557. err_out:
  558. kfree(pdata);
  559. return ret;
  560. }
  561. static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
  562. {
  563. return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
  564. }
  565. static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
  566. unsigned int reg)
  567. {
  568. writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
  569. }
  570. static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
  571. {
  572. struct acpi_device *adev;
  573. struct lpss_private_data *pdata;
  574. unsigned long flags;
  575. int ret;
  576. ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
  577. if (WARN_ON(ret))
  578. return ret;
  579. spin_lock_irqsave(&dev->power.lock, flags);
  580. if (pm_runtime_suspended(dev)) {
  581. ret = -EAGAIN;
  582. goto out;
  583. }
  584. pdata = acpi_driver_data(adev);
  585. if (WARN_ON(!pdata || !pdata->mmio_base)) {
  586. ret = -ENODEV;
  587. goto out;
  588. }
  589. *val = __lpss_reg_read(pdata, reg);
  590. out:
  591. spin_unlock_irqrestore(&dev->power.lock, flags);
  592. return ret;
  593. }
  594. static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
  595. char *buf)
  596. {
  597. u32 ltr_value = 0;
  598. unsigned int reg;
  599. int ret;
  600. reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
  601. ret = lpss_reg_read(dev, reg, &ltr_value);
  602. if (ret)
  603. return ret;
  604. return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
  605. }
  606. static ssize_t lpss_ltr_mode_show(struct device *dev,
  607. struct device_attribute *attr, char *buf)
  608. {
  609. u32 ltr_mode = 0;
  610. char *outstr;
  611. int ret;
  612. ret = lpss_reg_read(dev, LPSS_GENERAL, &ltr_mode);
  613. if (ret)
  614. return ret;
  615. outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
  616. return sprintf(buf, "%s\n", outstr);
  617. }
  618. static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
  619. static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
  620. static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
  621. static struct attribute *lpss_attrs[] = {
  622. &dev_attr_auto_ltr.attr,
  623. &dev_attr_sw_ltr.attr,
  624. &dev_attr_ltr_mode.attr,
  625. NULL,
  626. };
  627. static const struct attribute_group lpss_attr_group = {
  628. .attrs = lpss_attrs,
  629. .name = "lpss_ltr",
  630. };
  631. static void acpi_lpss_set_ltr(struct device *dev, s32 val)
  632. {
  633. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  634. u32 ltr_mode, ltr_val;
  635. ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
  636. if (val < 0) {
  637. if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
  638. ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
  639. __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
  640. }
  641. return;
  642. }
  643. ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
  644. if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
  645. ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
  646. val = LPSS_LTR_MAX_VAL;
  647. } else if (val > LPSS_LTR_MAX_VAL) {
  648. ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
  649. val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
  650. } else {
  651. ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
  652. }
  653. ltr_val |= val;
  654. __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
  655. if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
  656. ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
  657. __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
  658. }
  659. }
  660. #ifdef CONFIG_PM
  661. /**
  662. * acpi_lpss_save_ctx() - Save the private registers of LPSS device
  663. * @dev: LPSS device
  664. * @pdata: pointer to the private data of the LPSS device
  665. *
  666. * Most LPSS devices have private registers which may loose their context when
  667. * the device is powered down. acpi_lpss_save_ctx() saves those registers into
  668. * prv_reg_ctx array.
  669. */
  670. static void acpi_lpss_save_ctx(struct device *dev,
  671. struct lpss_private_data *pdata)
  672. {
  673. unsigned int i;
  674. for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
  675. unsigned long offset = i * sizeof(u32);
  676. pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset);
  677. dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
  678. pdata->prv_reg_ctx[i], offset);
  679. }
  680. }
  681. /**
  682. * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
  683. * @dev: LPSS device
  684. * @pdata: pointer to the private data of the LPSS device
  685. *
  686. * Restores the registers that were previously stored with acpi_lpss_save_ctx().
  687. */
  688. static void acpi_lpss_restore_ctx(struct device *dev,
  689. struct lpss_private_data *pdata)
  690. {
  691. unsigned int i;
  692. for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
  693. unsigned long offset = i * sizeof(u32);
  694. __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
  695. dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
  696. pdata->prv_reg_ctx[i], offset);
  697. }
  698. }
  699. static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
  700. {
  701. /*
  702. * The following delay is needed or the subsequent write operations may
  703. * fail. The LPSS devices are actually PCI devices and the PCI spec
  704. * expects 10ms delay before the device can be accessed after D3 to D0
  705. * transition. However some platforms like BSW does not need this delay.
  706. */
  707. unsigned int delay = 10; /* default 10ms delay */
  708. if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
  709. delay = 0;
  710. msleep(delay);
  711. }
  712. static int acpi_lpss_activate(struct device *dev)
  713. {
  714. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  715. int ret;
  716. ret = acpi_dev_resume(dev);
  717. if (ret)
  718. return ret;
  719. acpi_lpss_d3_to_d0_delay(pdata);
  720. /*
  721. * This is called only on ->probe() stage where a device is either in
  722. * known state defined by BIOS or most likely powered off. Due to this
  723. * we have to deassert reset line to be sure that ->probe() will
  724. * recognize the device.
  725. */
  726. if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
  727. lpss_deassert_reset(pdata);
  728. return 0;
  729. }
  730. static void acpi_lpss_dismiss(struct device *dev)
  731. {
  732. acpi_dev_suspend(dev, false);
  733. }
  734. /* IOSF SB for LPSS island */
  735. #define LPSS_IOSF_UNIT_LPIOEP 0xA0
  736. #define LPSS_IOSF_UNIT_LPIO1 0xAB
  737. #define LPSS_IOSF_UNIT_LPIO2 0xAC
  738. #define LPSS_IOSF_PMCSR 0x84
  739. #define LPSS_PMCSR_D0 0
  740. #define LPSS_PMCSR_D3hot 3
  741. #define LPSS_PMCSR_Dx_MASK GENMASK(1, 0)
  742. #define LPSS_IOSF_GPIODEF0 0x154
  743. #define LPSS_GPIODEF0_DMA1_D3 BIT(2)
  744. #define LPSS_GPIODEF0_DMA2_D3 BIT(3)
  745. #define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
  746. #define LPSS_GPIODEF0_DMA_LLP BIT(13)
  747. static DEFINE_MUTEX(lpss_iosf_mutex);
  748. static bool lpss_iosf_d3_entered = true;
  749. static void lpss_iosf_enter_d3_state(void)
  750. {
  751. u32 value1 = 0;
  752. u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
  753. u32 value2 = LPSS_PMCSR_D3hot;
  754. u32 mask2 = LPSS_PMCSR_Dx_MASK;
  755. /*
  756. * PMC provides an information about actual status of the LPSS devices.
  757. * Here we read the values related to LPSS power island, i.e. LPSS
  758. * devices, excluding both LPSS DMA controllers, along with SCC domain.
  759. */
  760. u32 func_dis, d3_sts_0, pmc_status;
  761. int ret;
  762. ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
  763. if (ret)
  764. return;
  765. mutex_lock(&lpss_iosf_mutex);
  766. ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
  767. if (ret)
  768. goto exit;
  769. /*
  770. * Get the status of entire LPSS power island per device basis.
  771. * Shutdown both LPSS DMA controllers if and only if all other devices
  772. * are already in D3hot.
  773. */
  774. pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask;
  775. if (pmc_status)
  776. goto exit;
  777. iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
  778. LPSS_IOSF_PMCSR, value2, mask2);
  779. iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
  780. LPSS_IOSF_PMCSR, value2, mask2);
  781. iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
  782. LPSS_IOSF_GPIODEF0, value1, mask1);
  783. lpss_iosf_d3_entered = true;
  784. exit:
  785. mutex_unlock(&lpss_iosf_mutex);
  786. }
  787. static void lpss_iosf_exit_d3_state(void)
  788. {
  789. u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
  790. LPSS_GPIODEF0_DMA_LLP;
  791. u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
  792. u32 value2 = LPSS_PMCSR_D0;
  793. u32 mask2 = LPSS_PMCSR_Dx_MASK;
  794. mutex_lock(&lpss_iosf_mutex);
  795. if (!lpss_iosf_d3_entered)
  796. goto exit;
  797. lpss_iosf_d3_entered = false;
  798. iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
  799. LPSS_IOSF_GPIODEF0, value1, mask1);
  800. iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
  801. LPSS_IOSF_PMCSR, value2, mask2);
  802. iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
  803. LPSS_IOSF_PMCSR, value2, mask2);
  804. exit:
  805. mutex_unlock(&lpss_iosf_mutex);
  806. }
  807. static int acpi_lpss_suspend(struct device *dev, bool wakeup)
  808. {
  809. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  810. int ret;
  811. if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
  812. acpi_lpss_save_ctx(dev, pdata);
  813. ret = acpi_dev_suspend(dev, wakeup);
  814. /*
  815. * This call must be last in the sequence, otherwise PMC will return
  816. * wrong status for devices being about to be powered off. See
  817. * lpss_iosf_enter_d3_state() for further information.
  818. */
  819. if (acpi_target_system_state() == ACPI_STATE_S0 &&
  820. lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
  821. lpss_iosf_enter_d3_state();
  822. return ret;
  823. }
  824. static int acpi_lpss_resume(struct device *dev)
  825. {
  826. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  827. int ret;
  828. /*
  829. * This call is kept first to be in symmetry with
  830. * acpi_lpss_runtime_suspend() one.
  831. */
  832. if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
  833. lpss_iosf_exit_d3_state();
  834. ret = acpi_dev_resume(dev);
  835. if (ret)
  836. return ret;
  837. acpi_lpss_d3_to_d0_delay(pdata);
  838. if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
  839. acpi_lpss_restore_ctx(dev, pdata);
  840. return 0;
  841. }
  842. #ifdef CONFIG_PM_SLEEP
  843. static int acpi_lpss_do_suspend_late(struct device *dev)
  844. {
  845. int ret;
  846. if (dev_pm_smart_suspend_and_suspended(dev))
  847. return 0;
  848. ret = pm_generic_suspend_late(dev);
  849. return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
  850. }
  851. static int acpi_lpss_suspend_late(struct device *dev)
  852. {
  853. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  854. if (pdata->dev_desc->resume_from_noirq)
  855. return 0;
  856. return acpi_lpss_do_suspend_late(dev);
  857. }
  858. static int acpi_lpss_suspend_noirq(struct device *dev)
  859. {
  860. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  861. int ret;
  862. if (pdata->dev_desc->resume_from_noirq) {
  863. /*
  864. * The driver's ->suspend_late callback will be invoked by
  865. * acpi_lpss_do_suspend_late(), with the assumption that the
  866. * driver really wanted to run that code in ->suspend_noirq, but
  867. * it could not run after acpi_dev_suspend() and the driver
  868. * expected the latter to be called in the "late" phase.
  869. */
  870. ret = acpi_lpss_do_suspend_late(dev);
  871. if (ret)
  872. return ret;
  873. }
  874. return acpi_subsys_suspend_noirq(dev);
  875. }
  876. static int acpi_lpss_do_resume_early(struct device *dev)
  877. {
  878. int ret = acpi_lpss_resume(dev);
  879. return ret ? ret : pm_generic_resume_early(dev);
  880. }
  881. static int acpi_lpss_resume_early(struct device *dev)
  882. {
  883. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  884. if (pdata->dev_desc->resume_from_noirq)
  885. return 0;
  886. return acpi_lpss_do_resume_early(dev);
  887. }
  888. static int acpi_lpss_resume_noirq(struct device *dev)
  889. {
  890. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  891. int ret;
  892. /* Follow acpi_subsys_resume_noirq(). */
  893. if (dev_pm_may_skip_resume(dev))
  894. return 0;
  895. if (dev_pm_smart_suspend_and_suspended(dev))
  896. pm_runtime_set_active(dev);
  897. ret = pm_generic_resume_noirq(dev);
  898. if (ret)
  899. return ret;
  900. if (!pdata->dev_desc->resume_from_noirq)
  901. return 0;
  902. /*
  903. * The driver's ->resume_early callback will be invoked by
  904. * acpi_lpss_do_resume_early(), with the assumption that the driver
  905. * really wanted to run that code in ->resume_noirq, but it could not
  906. * run before acpi_dev_resume() and the driver expected the latter to be
  907. * called in the "early" phase.
  908. */
  909. return acpi_lpss_do_resume_early(dev);
  910. }
  911. static int acpi_lpss_do_restore_early(struct device *dev)
  912. {
  913. int ret = acpi_lpss_resume(dev);
  914. return ret ? ret : pm_generic_restore_early(dev);
  915. }
  916. static int acpi_lpss_restore_early(struct device *dev)
  917. {
  918. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  919. if (pdata->dev_desc->resume_from_noirq)
  920. return 0;
  921. return acpi_lpss_do_restore_early(dev);
  922. }
  923. static int acpi_lpss_restore_noirq(struct device *dev)
  924. {
  925. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  926. int ret;
  927. ret = pm_generic_restore_noirq(dev);
  928. if (ret)
  929. return ret;
  930. if (!pdata->dev_desc->resume_from_noirq)
  931. return 0;
  932. /* This is analogous to what happens in acpi_lpss_resume_noirq(). */
  933. return acpi_lpss_do_restore_early(dev);
  934. }
  935. static int acpi_lpss_do_poweroff_late(struct device *dev)
  936. {
  937. int ret = pm_generic_poweroff_late(dev);
  938. return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
  939. }
  940. static int acpi_lpss_poweroff_late(struct device *dev)
  941. {
  942. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  943. if (dev_pm_smart_suspend_and_suspended(dev))
  944. return 0;
  945. if (pdata->dev_desc->resume_from_noirq)
  946. return 0;
  947. return acpi_lpss_do_poweroff_late(dev);
  948. }
  949. static int acpi_lpss_poweroff_noirq(struct device *dev)
  950. {
  951. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  952. if (dev_pm_smart_suspend_and_suspended(dev))
  953. return 0;
  954. if (pdata->dev_desc->resume_from_noirq) {
  955. /* This is analogous to the acpi_lpss_suspend_noirq() case. */
  956. int ret = acpi_lpss_do_poweroff_late(dev);
  957. if (ret)
  958. return ret;
  959. }
  960. return pm_generic_poweroff_noirq(dev);
  961. }
  962. #endif /* CONFIG_PM_SLEEP */
  963. static int acpi_lpss_runtime_suspend(struct device *dev)
  964. {
  965. int ret = pm_generic_runtime_suspend(dev);
  966. return ret ? ret : acpi_lpss_suspend(dev, true);
  967. }
  968. static int acpi_lpss_runtime_resume(struct device *dev)
  969. {
  970. int ret = acpi_lpss_resume(dev);
  971. return ret ? ret : pm_generic_runtime_resume(dev);
  972. }
  973. #endif /* CONFIG_PM */
  974. static struct dev_pm_domain acpi_lpss_pm_domain = {
  975. #ifdef CONFIG_PM
  976. .activate = acpi_lpss_activate,
  977. .dismiss = acpi_lpss_dismiss,
  978. #endif
  979. .ops = {
  980. #ifdef CONFIG_PM
  981. #ifdef CONFIG_PM_SLEEP
  982. .prepare = acpi_subsys_prepare,
  983. .complete = acpi_subsys_complete,
  984. .suspend = acpi_subsys_suspend,
  985. .suspend_late = acpi_lpss_suspend_late,
  986. .suspend_noirq = acpi_lpss_suspend_noirq,
  987. .resume_noirq = acpi_lpss_resume_noirq,
  988. .resume_early = acpi_lpss_resume_early,
  989. .freeze = acpi_subsys_freeze,
  990. .poweroff = acpi_subsys_poweroff,
  991. .poweroff_late = acpi_lpss_poweroff_late,
  992. .poweroff_noirq = acpi_lpss_poweroff_noirq,
  993. .restore_noirq = acpi_lpss_restore_noirq,
  994. .restore_early = acpi_lpss_restore_early,
  995. #endif
  996. .runtime_suspend = acpi_lpss_runtime_suspend,
  997. .runtime_resume = acpi_lpss_runtime_resume,
  998. #endif
  999. },
  1000. };
  1001. static int acpi_lpss_platform_notify(struct notifier_block *nb,
  1002. unsigned long action, void *data)
  1003. {
  1004. struct platform_device *pdev = to_platform_device(data);
  1005. struct lpss_private_data *pdata;
  1006. struct acpi_device *adev;
  1007. const struct acpi_device_id *id;
  1008. id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
  1009. if (!id || !id->driver_data)
  1010. return 0;
  1011. if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
  1012. return 0;
  1013. pdata = acpi_driver_data(adev);
  1014. if (!pdata)
  1015. return 0;
  1016. if (pdata->mmio_base &&
  1017. pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
  1018. dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
  1019. return 0;
  1020. }
  1021. switch (action) {
  1022. case BUS_NOTIFY_BIND_DRIVER:
  1023. dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
  1024. break;
  1025. case BUS_NOTIFY_DRIVER_NOT_BOUND:
  1026. case BUS_NOTIFY_UNBOUND_DRIVER:
  1027. dev_pm_domain_set(&pdev->dev, NULL);
  1028. break;
  1029. case BUS_NOTIFY_ADD_DEVICE:
  1030. dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
  1031. if (pdata->dev_desc->flags & LPSS_LTR)
  1032. return sysfs_create_group(&pdev->dev.kobj,
  1033. &lpss_attr_group);
  1034. break;
  1035. case BUS_NOTIFY_DEL_DEVICE:
  1036. if (pdata->dev_desc->flags & LPSS_LTR)
  1037. sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
  1038. dev_pm_domain_set(&pdev->dev, NULL);
  1039. break;
  1040. default:
  1041. break;
  1042. }
  1043. return 0;
  1044. }
  1045. static struct notifier_block acpi_lpss_nb = {
  1046. .notifier_call = acpi_lpss_platform_notify,
  1047. };
  1048. static void acpi_lpss_bind(struct device *dev)
  1049. {
  1050. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  1051. if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
  1052. return;
  1053. if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
  1054. dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
  1055. else
  1056. dev_err(dev, "MMIO size insufficient to access LTR\n");
  1057. }
  1058. static void acpi_lpss_unbind(struct device *dev)
  1059. {
  1060. dev->power.set_latency_tolerance = NULL;
  1061. }
  1062. static struct acpi_scan_handler lpss_handler = {
  1063. .ids = acpi_lpss_device_ids,
  1064. .attach = acpi_lpss_create_device,
  1065. .bind = acpi_lpss_bind,
  1066. .unbind = acpi_lpss_unbind,
  1067. };
  1068. void __init acpi_lpss_init(void)
  1069. {
  1070. const struct x86_cpu_id *id;
  1071. int ret;
  1072. ret = lpt_clk_init();
  1073. if (ret)
  1074. return;
  1075. id = x86_match_cpu(lpss_cpu_ids);
  1076. if (id)
  1077. lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;
  1078. bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
  1079. acpi_scan_add_handler(&lpss_handler);
  1080. }
  1081. #else
  1082. static struct acpi_scan_handler lpss_handler = {
  1083. .ids = acpi_lpss_device_ids,
  1084. };
  1085. void __init acpi_lpss_init(void)
  1086. {
  1087. acpi_scan_add_handler(&lpss_handler);
  1088. }
  1089. #endif /* CONFIG_X86_INTEL_LPSS */