pci-acpi.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCI support in ACPI
  4. *
  5. * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
  6. * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
  7. * Copyright (C) 2004 Intel Corp.
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/init.h>
  11. #include <linux/irqdomain.h>
  12. #include <linux/pci.h>
  13. #include <linux/msi.h>
  14. #include <linux/pci_hotplug.h>
  15. #include <linux/module.h>
  16. #include <linux/pci-acpi.h>
  17. #include <linux/pci-ecam.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/pm_qos.h>
  20. #include <linux/rwsem.h>
  21. #include "pci.h"
  22. /*
  23. * The GUID is defined in the PCI Firmware Specification available
  24. * here to PCI-SIG members:
  25. * https://members.pcisig.com/wg/PCI-SIG/document/15350
  26. */
  27. const guid_t pci_acpi_dsm_guid =
  28. GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
  29. 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
  30. #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
  31. static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
  32. {
  33. struct device *dev = &adev->dev;
  34. struct resource_entry *entry;
  35. struct list_head list;
  36. unsigned long flags;
  37. int ret;
  38. INIT_LIST_HEAD(&list);
  39. flags = IORESOURCE_MEM;
  40. ret = acpi_dev_get_resources(adev, &list,
  41. acpi_dev_filter_resource_type_cb,
  42. (void *) flags);
  43. if (ret < 0) {
  44. dev_err(dev, "failed to parse _CRS method, error code %d\n",
  45. ret);
  46. return ret;
  47. }
  48. if (ret == 0) {
  49. dev_err(dev, "no IO and memory resources present in _CRS\n");
  50. return -EINVAL;
  51. }
  52. entry = list_first_entry(&list, struct resource_entry, node);
  53. *res = *entry->res;
  54. acpi_dev_free_resource_list(&list);
  55. return 0;
  56. }
  57. static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
  58. void **retval)
  59. {
  60. u16 *segment = context;
  61. unsigned long long uid;
  62. acpi_status status;
  63. status = acpi_evaluate_integer(handle, METHOD_NAME__UID, NULL, &uid);
  64. if (ACPI_FAILURE(status) || uid != *segment)
  65. return AE_CTRL_DEPTH;
  66. *(acpi_handle *)retval = handle;
  67. return AE_CTRL_TERMINATE;
  68. }
  69. int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
  70. struct resource *res)
  71. {
  72. struct acpi_device *adev;
  73. acpi_status status;
  74. acpi_handle handle;
  75. int ret;
  76. status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
  77. if (ACPI_FAILURE(status)) {
  78. dev_err(dev, "can't find _HID %s device to locate resources\n",
  79. hid);
  80. return -ENODEV;
  81. }
  82. adev = acpi_fetch_acpi_dev(handle);
  83. if (!adev)
  84. return -ENODEV;
  85. ret = acpi_get_rc_addr(adev, res);
  86. if (ret) {
  87. dev_err(dev, "can't get resource from %s\n",
  88. dev_name(&adev->dev));
  89. return ret;
  90. }
  91. return 0;
  92. }
  93. #endif
  94. phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
  95. {
  96. acpi_status status = AE_NOT_EXIST;
  97. unsigned long long mcfg_addr;
  98. if (handle)
  99. status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
  100. NULL, &mcfg_addr);
  101. if (ACPI_FAILURE(status))
  102. return 0;
  103. return (phys_addr_t)mcfg_addr;
  104. }
  105. bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
  106. {
  107. bool ret = false;
  108. if (ACPI_HANDLE(&host_bridge->dev)) {
  109. union acpi_object *obj;
  110. /*
  111. * Evaluate the "PCI Boot Configuration" _DSM Function. If it
  112. * exists and returns 0, we must preserve any PCI resource
  113. * assignments made by firmware for this host bridge.
  114. */
  115. obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(&host_bridge->dev),
  116. &pci_acpi_dsm_guid,
  117. 1, DSM_PCI_PRESERVE_BOOT_CONFIG,
  118. NULL, ACPI_TYPE_INTEGER);
  119. if (obj && obj->integer.value == 0)
  120. ret = true;
  121. ACPI_FREE(obj);
  122. }
  123. return ret;
  124. }
  125. /* _HPX PCI Setting Record (Type 0); same as _HPP */
  126. struct hpx_type0 {
  127. u32 revision; /* Not present in _HPP */
  128. u8 cache_line_size; /* Not applicable to PCIe */
  129. u8 latency_timer; /* Not applicable to PCIe */
  130. u8 enable_serr;
  131. u8 enable_perr;
  132. };
  133. static struct hpx_type0 pci_default_type0 = {
  134. .revision = 1,
  135. .cache_line_size = 8,
  136. .latency_timer = 0x40,
  137. .enable_serr = 0,
  138. .enable_perr = 0,
  139. };
  140. static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx)
  141. {
  142. u16 pci_cmd, pci_bctl;
  143. if (!hpx)
  144. hpx = &pci_default_type0;
  145. if (hpx->revision > 1) {
  146. pci_warn(dev, "PCI settings rev %d not supported; using defaults\n",
  147. hpx->revision);
  148. hpx = &pci_default_type0;
  149. }
  150. pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size);
  151. pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer);
  152. pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
  153. if (hpx->enable_serr)
  154. pci_cmd |= PCI_COMMAND_SERR;
  155. if (hpx->enable_perr)
  156. pci_cmd |= PCI_COMMAND_PARITY;
  157. pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
  158. /* Program bridge control value */
  159. if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
  160. pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
  161. hpx->latency_timer);
  162. pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
  163. if (hpx->enable_perr)
  164. pci_bctl |= PCI_BRIDGE_CTL_PARITY;
  165. pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
  166. }
  167. }
  168. static acpi_status decode_type0_hpx_record(union acpi_object *record,
  169. struct hpx_type0 *hpx0)
  170. {
  171. int i;
  172. union acpi_object *fields = record->package.elements;
  173. u32 revision = fields[1].integer.value;
  174. switch (revision) {
  175. case 1:
  176. if (record->package.count != 6)
  177. return AE_ERROR;
  178. for (i = 2; i < 6; i++)
  179. if (fields[i].type != ACPI_TYPE_INTEGER)
  180. return AE_ERROR;
  181. hpx0->revision = revision;
  182. hpx0->cache_line_size = fields[2].integer.value;
  183. hpx0->latency_timer = fields[3].integer.value;
  184. hpx0->enable_serr = fields[4].integer.value;
  185. hpx0->enable_perr = fields[5].integer.value;
  186. break;
  187. default:
  188. pr_warn("%s: Type 0 Revision %d record not supported\n",
  189. __func__, revision);
  190. return AE_ERROR;
  191. }
  192. return AE_OK;
  193. }
  194. /* _HPX PCI-X Setting Record (Type 1) */
  195. struct hpx_type1 {
  196. u32 revision;
  197. u8 max_mem_read;
  198. u8 avg_max_split;
  199. u16 tot_max_split;
  200. };
  201. static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx)
  202. {
  203. int pos;
  204. if (!hpx)
  205. return;
  206. pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
  207. if (!pos)
  208. return;
  209. pci_warn(dev, "PCI-X settings not supported\n");
  210. }
  211. static acpi_status decode_type1_hpx_record(union acpi_object *record,
  212. struct hpx_type1 *hpx1)
  213. {
  214. int i;
  215. union acpi_object *fields = record->package.elements;
  216. u32 revision = fields[1].integer.value;
  217. switch (revision) {
  218. case 1:
  219. if (record->package.count != 5)
  220. return AE_ERROR;
  221. for (i = 2; i < 5; i++)
  222. if (fields[i].type != ACPI_TYPE_INTEGER)
  223. return AE_ERROR;
  224. hpx1->revision = revision;
  225. hpx1->max_mem_read = fields[2].integer.value;
  226. hpx1->avg_max_split = fields[3].integer.value;
  227. hpx1->tot_max_split = fields[4].integer.value;
  228. break;
  229. default:
  230. pr_warn("%s: Type 1 Revision %d record not supported\n",
  231. __func__, revision);
  232. return AE_ERROR;
  233. }
  234. return AE_OK;
  235. }
  236. static bool pcie_root_rcb_set(struct pci_dev *dev)
  237. {
  238. struct pci_dev *rp = pcie_find_root_port(dev);
  239. u16 lnkctl;
  240. if (!rp)
  241. return false;
  242. pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
  243. if (lnkctl & PCI_EXP_LNKCTL_RCB)
  244. return true;
  245. return false;
  246. }
  247. /* _HPX PCI Express Setting Record (Type 2) */
  248. struct hpx_type2 {
  249. u32 revision;
  250. u32 unc_err_mask_and;
  251. u32 unc_err_mask_or;
  252. u32 unc_err_sever_and;
  253. u32 unc_err_sever_or;
  254. u32 cor_err_mask_and;
  255. u32 cor_err_mask_or;
  256. u32 adv_err_cap_and;
  257. u32 adv_err_cap_or;
  258. u16 pci_exp_devctl_and;
  259. u16 pci_exp_devctl_or;
  260. u16 pci_exp_lnkctl_and;
  261. u16 pci_exp_lnkctl_or;
  262. u32 sec_unc_err_sever_and;
  263. u32 sec_unc_err_sever_or;
  264. u32 sec_unc_err_mask_and;
  265. u32 sec_unc_err_mask_or;
  266. };
  267. static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx)
  268. {
  269. int pos;
  270. u32 reg32;
  271. if (!hpx)
  272. return;
  273. if (!pci_is_pcie(dev))
  274. return;
  275. if (hpx->revision > 1) {
  276. pci_warn(dev, "PCIe settings rev %d not supported\n",
  277. hpx->revision);
  278. return;
  279. }
  280. /*
  281. * Don't allow _HPX to change MPS or MRRS settings. We manage
  282. * those to make sure they're consistent with the rest of the
  283. * platform.
  284. */
  285. hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
  286. PCI_EXP_DEVCTL_READRQ;
  287. hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
  288. PCI_EXP_DEVCTL_READRQ);
  289. /* Initialize Device Control Register */
  290. pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
  291. ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or);
  292. /* Initialize Link Control Register */
  293. if (pcie_cap_has_lnkctl(dev)) {
  294. /*
  295. * If the Root Port supports Read Completion Boundary of
  296. * 128, set RCB to 128. Otherwise, clear it.
  297. */
  298. hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
  299. hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
  300. if (pcie_root_rcb_set(dev))
  301. hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
  302. pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
  303. ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or);
  304. }
  305. /* Find Advanced Error Reporting Enhanced Capability */
  306. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  307. if (!pos)
  308. return;
  309. /* Initialize Uncorrectable Error Mask Register */
  310. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
  311. reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or;
  312. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
  313. /* Initialize Uncorrectable Error Severity Register */
  314. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
  315. reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or;
  316. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
  317. /* Initialize Correctable Error Mask Register */
  318. pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
  319. reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or;
  320. pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
  321. /* Initialize Advanced Error Capabilities and Control Register */
  322. pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
  323. reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or;
  324. /* Don't enable ECRC generation or checking if unsupported */
  325. if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
  326. reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
  327. if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
  328. reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
  329. pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
  330. /*
  331. * FIXME: The following two registers are not supported yet.
  332. *
  333. * o Secondary Uncorrectable Error Severity Register
  334. * o Secondary Uncorrectable Error Mask Register
  335. */
  336. }
  337. static acpi_status decode_type2_hpx_record(union acpi_object *record,
  338. struct hpx_type2 *hpx2)
  339. {
  340. int i;
  341. union acpi_object *fields = record->package.elements;
  342. u32 revision = fields[1].integer.value;
  343. switch (revision) {
  344. case 1:
  345. if (record->package.count != 18)
  346. return AE_ERROR;
  347. for (i = 2; i < 18; i++)
  348. if (fields[i].type != ACPI_TYPE_INTEGER)
  349. return AE_ERROR;
  350. hpx2->revision = revision;
  351. hpx2->unc_err_mask_and = fields[2].integer.value;
  352. hpx2->unc_err_mask_or = fields[3].integer.value;
  353. hpx2->unc_err_sever_and = fields[4].integer.value;
  354. hpx2->unc_err_sever_or = fields[5].integer.value;
  355. hpx2->cor_err_mask_and = fields[6].integer.value;
  356. hpx2->cor_err_mask_or = fields[7].integer.value;
  357. hpx2->adv_err_cap_and = fields[8].integer.value;
  358. hpx2->adv_err_cap_or = fields[9].integer.value;
  359. hpx2->pci_exp_devctl_and = fields[10].integer.value;
  360. hpx2->pci_exp_devctl_or = fields[11].integer.value;
  361. hpx2->pci_exp_lnkctl_and = fields[12].integer.value;
  362. hpx2->pci_exp_lnkctl_or = fields[13].integer.value;
  363. hpx2->sec_unc_err_sever_and = fields[14].integer.value;
  364. hpx2->sec_unc_err_sever_or = fields[15].integer.value;
  365. hpx2->sec_unc_err_mask_and = fields[16].integer.value;
  366. hpx2->sec_unc_err_mask_or = fields[17].integer.value;
  367. break;
  368. default:
  369. pr_warn("%s: Type 2 Revision %d record not supported\n",
  370. __func__, revision);
  371. return AE_ERROR;
  372. }
  373. return AE_OK;
  374. }
  375. /* _HPX PCI Express Setting Record (Type 3) */
  376. struct hpx_type3 {
  377. u16 device_type;
  378. u16 function_type;
  379. u16 config_space_location;
  380. u16 pci_exp_cap_id;
  381. u16 pci_exp_cap_ver;
  382. u16 pci_exp_vendor_id;
  383. u16 dvsec_id;
  384. u16 dvsec_rev;
  385. u16 match_offset;
  386. u32 match_mask_and;
  387. u32 match_value;
  388. u16 reg_offset;
  389. u32 reg_mask_and;
  390. u32 reg_mask_or;
  391. };
  392. enum hpx_type3_dev_type {
  393. HPX_TYPE_ENDPOINT = BIT(0),
  394. HPX_TYPE_LEG_END = BIT(1),
  395. HPX_TYPE_RC_END = BIT(2),
  396. HPX_TYPE_RC_EC = BIT(3),
  397. HPX_TYPE_ROOT_PORT = BIT(4),
  398. HPX_TYPE_UPSTREAM = BIT(5),
  399. HPX_TYPE_DOWNSTREAM = BIT(6),
  400. HPX_TYPE_PCI_BRIDGE = BIT(7),
  401. HPX_TYPE_PCIE_BRIDGE = BIT(8),
  402. };
  403. static u16 hpx3_device_type(struct pci_dev *dev)
  404. {
  405. u16 pcie_type = pci_pcie_type(dev);
  406. static const int pcie_to_hpx3_type[] = {
  407. [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT,
  408. [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END,
  409. [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END,
  410. [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC,
  411. [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT,
  412. [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM,
  413. [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM,
  414. [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE,
  415. [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
  416. };
  417. if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
  418. return 0;
  419. return pcie_to_hpx3_type[pcie_type];
  420. }
  421. enum hpx_type3_fn_type {
  422. HPX_FN_NORMAL = BIT(0),
  423. HPX_FN_SRIOV_PHYS = BIT(1),
  424. HPX_FN_SRIOV_VIRT = BIT(2),
  425. };
  426. static u8 hpx3_function_type(struct pci_dev *dev)
  427. {
  428. if (dev->is_virtfn)
  429. return HPX_FN_SRIOV_VIRT;
  430. else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
  431. return HPX_FN_SRIOV_PHYS;
  432. else
  433. return HPX_FN_NORMAL;
  434. }
  435. static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
  436. {
  437. u8 cap_ver = hpx3_cap_id & 0xf;
  438. if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
  439. return true;
  440. else if (cap_ver == pcie_cap_id)
  441. return true;
  442. return false;
  443. }
  444. enum hpx_type3_cfg_loc {
  445. HPX_CFG_PCICFG = 0,
  446. HPX_CFG_PCIE_CAP = 1,
  447. HPX_CFG_PCIE_CAP_EXT = 2,
  448. HPX_CFG_VEND_CAP = 3,
  449. HPX_CFG_DVSEC = 4,
  450. HPX_CFG_MAX,
  451. };
  452. static void program_hpx_type3_register(struct pci_dev *dev,
  453. const struct hpx_type3 *reg)
  454. {
  455. u32 match_reg, write_reg, header, orig_value;
  456. u16 pos;
  457. if (!(hpx3_device_type(dev) & reg->device_type))
  458. return;
  459. if (!(hpx3_function_type(dev) & reg->function_type))
  460. return;
  461. switch (reg->config_space_location) {
  462. case HPX_CFG_PCICFG:
  463. pos = 0;
  464. break;
  465. case HPX_CFG_PCIE_CAP:
  466. pos = pci_find_capability(dev, reg->pci_exp_cap_id);
  467. if (pos == 0)
  468. return;
  469. break;
  470. case HPX_CFG_PCIE_CAP_EXT:
  471. pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
  472. if (pos == 0)
  473. return;
  474. pci_read_config_dword(dev, pos, &header);
  475. if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
  476. reg->pci_exp_cap_ver))
  477. return;
  478. break;
  479. case HPX_CFG_VEND_CAP:
  480. case HPX_CFG_DVSEC:
  481. default:
  482. pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
  483. return;
  484. }
  485. pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
  486. if ((match_reg & reg->match_mask_and) != reg->match_value)
  487. return;
  488. pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
  489. orig_value = write_reg;
  490. write_reg &= reg->reg_mask_and;
  491. write_reg |= reg->reg_mask_or;
  492. if (orig_value == write_reg)
  493. return;
  494. pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
  495. pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
  496. pos, orig_value, write_reg);
  497. }
  498. static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx)
  499. {
  500. if (!hpx)
  501. return;
  502. if (!pci_is_pcie(dev))
  503. return;
  504. program_hpx_type3_register(dev, hpx);
  505. }
  506. static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
  507. union acpi_object *reg_fields)
  508. {
  509. hpx3_reg->device_type = reg_fields[0].integer.value;
  510. hpx3_reg->function_type = reg_fields[1].integer.value;
  511. hpx3_reg->config_space_location = reg_fields[2].integer.value;
  512. hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value;
  513. hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value;
  514. hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value;
  515. hpx3_reg->dvsec_id = reg_fields[6].integer.value;
  516. hpx3_reg->dvsec_rev = reg_fields[7].integer.value;
  517. hpx3_reg->match_offset = reg_fields[8].integer.value;
  518. hpx3_reg->match_mask_and = reg_fields[9].integer.value;
  519. hpx3_reg->match_value = reg_fields[10].integer.value;
  520. hpx3_reg->reg_offset = reg_fields[11].integer.value;
  521. hpx3_reg->reg_mask_and = reg_fields[12].integer.value;
  522. hpx3_reg->reg_mask_or = reg_fields[13].integer.value;
  523. }
  524. static acpi_status program_type3_hpx_record(struct pci_dev *dev,
  525. union acpi_object *record)
  526. {
  527. union acpi_object *fields = record->package.elements;
  528. u32 desc_count, expected_length, revision;
  529. union acpi_object *reg_fields;
  530. struct hpx_type3 hpx3;
  531. int i;
  532. revision = fields[1].integer.value;
  533. switch (revision) {
  534. case 1:
  535. desc_count = fields[2].integer.value;
  536. expected_length = 3 + desc_count * 14;
  537. if (record->package.count != expected_length)
  538. return AE_ERROR;
  539. for (i = 2; i < expected_length; i++)
  540. if (fields[i].type != ACPI_TYPE_INTEGER)
  541. return AE_ERROR;
  542. for (i = 0; i < desc_count; i++) {
  543. reg_fields = fields + 3 + i * 14;
  544. parse_hpx3_register(&hpx3, reg_fields);
  545. program_hpx_type3(dev, &hpx3);
  546. }
  547. break;
  548. default:
  549. printk(KERN_WARNING
  550. "%s: Type 3 Revision %d record not supported\n",
  551. __func__, revision);
  552. return AE_ERROR;
  553. }
  554. return AE_OK;
  555. }
  556. static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle)
  557. {
  558. acpi_status status;
  559. struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
  560. union acpi_object *package, *record, *fields;
  561. struct hpx_type0 hpx0;
  562. struct hpx_type1 hpx1;
  563. struct hpx_type2 hpx2;
  564. u32 type;
  565. int i;
  566. status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
  567. if (ACPI_FAILURE(status))
  568. return status;
  569. package = (union acpi_object *)buffer.pointer;
  570. if (package->type != ACPI_TYPE_PACKAGE) {
  571. status = AE_ERROR;
  572. goto exit;
  573. }
  574. for (i = 0; i < package->package.count; i++) {
  575. record = &package->package.elements[i];
  576. if (record->type != ACPI_TYPE_PACKAGE) {
  577. status = AE_ERROR;
  578. goto exit;
  579. }
  580. fields = record->package.elements;
  581. if (fields[0].type != ACPI_TYPE_INTEGER ||
  582. fields[1].type != ACPI_TYPE_INTEGER) {
  583. status = AE_ERROR;
  584. goto exit;
  585. }
  586. type = fields[0].integer.value;
  587. switch (type) {
  588. case 0:
  589. memset(&hpx0, 0, sizeof(hpx0));
  590. status = decode_type0_hpx_record(record, &hpx0);
  591. if (ACPI_FAILURE(status))
  592. goto exit;
  593. program_hpx_type0(dev, &hpx0);
  594. break;
  595. case 1:
  596. memset(&hpx1, 0, sizeof(hpx1));
  597. status = decode_type1_hpx_record(record, &hpx1);
  598. if (ACPI_FAILURE(status))
  599. goto exit;
  600. program_hpx_type1(dev, &hpx1);
  601. break;
  602. case 2:
  603. memset(&hpx2, 0, sizeof(hpx2));
  604. status = decode_type2_hpx_record(record, &hpx2);
  605. if (ACPI_FAILURE(status))
  606. goto exit;
  607. program_hpx_type2(dev, &hpx2);
  608. break;
  609. case 3:
  610. status = program_type3_hpx_record(dev, record);
  611. if (ACPI_FAILURE(status))
  612. goto exit;
  613. break;
  614. default:
  615. pr_err("%s: Type %d record not supported\n",
  616. __func__, type);
  617. status = AE_ERROR;
  618. goto exit;
  619. }
  620. }
  621. exit:
  622. kfree(buffer.pointer);
  623. return status;
  624. }
  625. static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle)
  626. {
  627. acpi_status status;
  628. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  629. union acpi_object *package, *fields;
  630. struct hpx_type0 hpx0;
  631. int i;
  632. memset(&hpx0, 0, sizeof(hpx0));
  633. status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
  634. if (ACPI_FAILURE(status))
  635. return status;
  636. package = (union acpi_object *) buffer.pointer;
  637. if (package->type != ACPI_TYPE_PACKAGE ||
  638. package->package.count != 4) {
  639. status = AE_ERROR;
  640. goto exit;
  641. }
  642. fields = package->package.elements;
  643. for (i = 0; i < 4; i++) {
  644. if (fields[i].type != ACPI_TYPE_INTEGER) {
  645. status = AE_ERROR;
  646. goto exit;
  647. }
  648. }
  649. hpx0.revision = 1;
  650. hpx0.cache_line_size = fields[0].integer.value;
  651. hpx0.latency_timer = fields[1].integer.value;
  652. hpx0.enable_serr = fields[2].integer.value;
  653. hpx0.enable_perr = fields[3].integer.value;
  654. program_hpx_type0(dev, &hpx0);
  655. exit:
  656. kfree(buffer.pointer);
  657. return status;
  658. }
  659. /* pci_acpi_program_hp_params
  660. *
  661. * @dev - the pci_dev for which we want parameters
  662. */
  663. int pci_acpi_program_hp_params(struct pci_dev *dev)
  664. {
  665. acpi_status status;
  666. acpi_handle handle, phandle;
  667. struct pci_bus *pbus;
  668. if (acpi_pci_disabled)
  669. return -ENODEV;
  670. handle = NULL;
  671. for (pbus = dev->bus; pbus; pbus = pbus->parent) {
  672. handle = acpi_pci_get_bridge_handle(pbus);
  673. if (handle)
  674. break;
  675. }
  676. /*
  677. * _HPP settings apply to all child buses, until another _HPP is
  678. * encountered. If we don't find an _HPP for the input pci dev,
  679. * look for it in the parent device scope since that would apply to
  680. * this pci dev.
  681. */
  682. while (handle) {
  683. status = acpi_run_hpx(dev, handle);
  684. if (ACPI_SUCCESS(status))
  685. return 0;
  686. status = acpi_run_hpp(dev, handle);
  687. if (ACPI_SUCCESS(status))
  688. return 0;
  689. if (acpi_is_root_bridge(handle))
  690. break;
  691. status = acpi_get_parent(handle, &phandle);
  692. if (ACPI_FAILURE(status))
  693. break;
  694. handle = phandle;
  695. }
  696. return -ENODEV;
  697. }
  698. /**
  699. * pciehp_is_native - Check whether a hotplug port is handled by the OS
  700. * @bridge: Hotplug port to check
  701. *
  702. * Returns true if the given @bridge is handled by the native PCIe hotplug
  703. * driver.
  704. */
  705. bool pciehp_is_native(struct pci_dev *bridge)
  706. {
  707. const struct pci_host_bridge *host;
  708. if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
  709. return false;
  710. if (!bridge->is_pciehp)
  711. return false;
  712. if (pcie_ports_native)
  713. return true;
  714. host = pci_find_host_bridge(bridge->bus);
  715. return host->native_pcie_hotplug;
  716. }
  717. /**
  718. * shpchp_is_native - Check whether a hotplug port is handled by the OS
  719. * @bridge: Hotplug port to check
  720. *
  721. * Returns true if the given @bridge is handled by the native SHPC hotplug
  722. * driver.
  723. */
  724. bool shpchp_is_native(struct pci_dev *bridge)
  725. {
  726. return bridge->shpc_managed;
  727. }
  728. /**
  729. * pci_acpi_wake_bus - Root bus wakeup notification fork function.
  730. * @context: Device wakeup context.
  731. */
  732. static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context)
  733. {
  734. struct acpi_device *adev;
  735. struct acpi_pci_root *root;
  736. adev = container_of(context, struct acpi_device, wakeup.context);
  737. root = acpi_driver_data(adev);
  738. pci_pme_wakeup_bus(root->bus);
  739. }
  740. /**
  741. * pci_acpi_wake_dev - PCI device wakeup notification work function.
  742. * @context: Device wakeup context.
  743. */
  744. static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context)
  745. {
  746. struct pci_dev *pci_dev;
  747. pci_dev = to_pci_dev(context->dev);
  748. if (pci_dev->pme_poll)
  749. pci_dev->pme_poll = false;
  750. if (pci_dev->current_state == PCI_D3cold) {
  751. pci_wakeup_event(pci_dev);
  752. pm_request_resume(&pci_dev->dev);
  753. return;
  754. }
  755. /* Clear PME Status if set. */
  756. if (pci_dev->pme_support)
  757. pci_check_pme_status(pci_dev);
  758. pci_wakeup_event(pci_dev);
  759. pm_request_resume(&pci_dev->dev);
  760. pci_pme_wakeup_bus(pci_dev->subordinate);
  761. }
  762. /**
  763. * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
  764. * @dev: PCI root bridge ACPI device.
  765. */
  766. acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
  767. {
  768. return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
  769. }
  770. /**
  771. * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
  772. * @dev: ACPI device to add the notifier for.
  773. * @pci_dev: PCI device to check for the PME status if an event is signaled.
  774. */
  775. acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
  776. struct pci_dev *pci_dev)
  777. {
  778. return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
  779. }
  780. /*
  781. * _SxD returns the D-state with the highest power
  782. * (lowest D-state number) supported in the S-state "x".
  783. *
  784. * If the devices does not have a _PRW
  785. * (Power Resources for Wake) supporting system wakeup from "x"
  786. * then the OS is free to choose a lower power (higher number
  787. * D-state) than the return value from _SxD.
  788. *
  789. * But if _PRW is enabled at S-state "x", the OS
  790. * must not choose a power lower than _SxD --
  791. * unless the device has an _SxW method specifying
  792. * the lowest power (highest D-state number) the device
  793. * may enter while still able to wake the system.
  794. *
  795. * ie. depending on global OS policy:
  796. *
  797. * if (_PRW at S-state x)
  798. * choose from highest power _SxD to lowest power _SxW
  799. * else // no _PRW at S-state x
  800. * choose highest power _SxD or any lower power
  801. */
  802. pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
  803. {
  804. int acpi_state, d_max;
  805. if (pdev->no_d3cold || !pdev->d3cold_allowed)
  806. d_max = ACPI_STATE_D3_HOT;
  807. else
  808. d_max = ACPI_STATE_D3_COLD;
  809. acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
  810. if (acpi_state < 0)
  811. return PCI_POWER_ERROR;
  812. switch (acpi_state) {
  813. case ACPI_STATE_D0:
  814. return PCI_D0;
  815. case ACPI_STATE_D1:
  816. return PCI_D1;
  817. case ACPI_STATE_D2:
  818. return PCI_D2;
  819. case ACPI_STATE_D3_HOT:
  820. return PCI_D3hot;
  821. case ACPI_STATE_D3_COLD:
  822. return PCI_D3cold;
  823. }
  824. return PCI_POWER_ERROR;
  825. }
  826. static struct acpi_device *acpi_pci_find_companion(struct device *dev);
  827. void pci_set_acpi_fwnode(struct pci_dev *dev)
  828. {
  829. if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev))
  830. ACPI_COMPANION_SET(&dev->dev,
  831. acpi_pci_find_companion(&dev->dev));
  832. }
  833. /**
  834. * pci_dev_acpi_reset - do a function level reset using _RST method
  835. * @dev: device to reset
  836. * @probe: if true, return 0 if device supports _RST
  837. */
  838. int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
  839. {
  840. acpi_handle handle = ACPI_HANDLE(&dev->dev);
  841. if (!handle || !acpi_has_method(handle, "_RST"))
  842. return -ENOTTY;
  843. if (probe)
  844. return 0;
  845. if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL, NULL))) {
  846. pci_warn(dev, "ACPI _RST failed\n");
  847. return -ENOTTY;
  848. }
  849. return 0;
  850. }
  851. bool acpi_pci_power_manageable(struct pci_dev *dev)
  852. {
  853. struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
  854. return adev && acpi_device_power_manageable(adev);
  855. }
  856. bool acpi_pci_bridge_d3(struct pci_dev *dev)
  857. {
  858. struct pci_dev *rpdev;
  859. struct acpi_device *adev, *rpadev;
  860. const union acpi_object *obj;
  861. if (acpi_pci_disabled || !dev->is_hotplug_bridge)
  862. return false;
  863. adev = ACPI_COMPANION(&dev->dev);
  864. if (adev) {
  865. /*
  866. * If the bridge has _S0W, whether or not it can go into D3
  867. * depends on what is returned by that object. In particular,
  868. * if the power state returned by _S0W is D2 or shallower,
  869. * entering D3 should not be allowed.
  870. */
  871. if (acpi_dev_power_state_for_wake(adev) <= ACPI_STATE_D2)
  872. return false;
  873. /*
  874. * Otherwise, assume that the bridge can enter D3 so long as it
  875. * is power-manageable via ACPI.
  876. */
  877. if (acpi_device_power_manageable(adev))
  878. return true;
  879. }
  880. rpdev = pcie_find_root_port(dev);
  881. if (!rpdev)
  882. return false;
  883. if (rpdev == dev)
  884. rpadev = adev;
  885. else
  886. rpadev = ACPI_COMPANION(&rpdev->dev);
  887. if (!rpadev)
  888. return false;
  889. /*
  890. * If the Root Port cannot signal wakeup signals at all, i.e., it
  891. * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug
  892. * events from low-power states including D3hot and D3cold.
  893. */
  894. if (!rpadev->wakeup.flags.valid)
  895. return false;
  896. /*
  897. * In the bridge-below-a-Root-Port case, evaluate _S0W for the Root Port
  898. * to verify whether or not it can signal wakeup from D3.
  899. */
  900. if (rpadev != adev &&
  901. acpi_dev_power_state_for_wake(rpadev) <= ACPI_STATE_D2)
  902. return false;
  903. /*
  904. * The "HotPlugSupportInD3" property in a Root Port _DSD indicates
  905. * the Port can signal hotplug events while in D3. We assume any
  906. * bridges *below* that Root Port can also signal hotplug events
  907. * while in D3.
  908. */
  909. if (!acpi_dev_get_property(rpadev, "HotPlugSupportInD3",
  910. ACPI_TYPE_INTEGER, &obj) &&
  911. obj->integer.value == 1)
  912. return true;
  913. return false;
  914. }
  915. static void acpi_pci_config_space_access(struct pci_dev *dev, bool enable)
  916. {
  917. int val = enable ? ACPI_REG_CONNECT : ACPI_REG_DISCONNECT;
  918. int ret = acpi_evaluate_reg(ACPI_HANDLE(&dev->dev),
  919. ACPI_ADR_SPACE_PCI_CONFIG, val);
  920. if (ret)
  921. pci_dbg(dev, "ACPI _REG %s evaluation failed (%d)\n",
  922. enable ? "connect" : "disconnect", ret);
  923. }
  924. int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
  925. {
  926. struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
  927. static const u8 state_conv[] = {
  928. [PCI_D0] = ACPI_STATE_D0,
  929. [PCI_D1] = ACPI_STATE_D1,
  930. [PCI_D2] = ACPI_STATE_D2,
  931. [PCI_D3hot] = ACPI_STATE_D3_HOT,
  932. [PCI_D3cold] = ACPI_STATE_D3_COLD,
  933. };
  934. int error;
  935. /* If the ACPI device has _EJ0, ignore the device */
  936. if (!adev || acpi_has_method(adev->handle, "_EJ0"))
  937. return -ENODEV;
  938. switch (state) {
  939. case PCI_D0:
  940. case PCI_D1:
  941. case PCI_D2:
  942. case PCI_D3hot:
  943. case PCI_D3cold:
  944. break;
  945. default:
  946. return -EINVAL;
  947. }
  948. if (state == PCI_D3cold) {
  949. if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
  950. PM_QOS_FLAGS_ALL)
  951. return -EBUSY;
  952. /* Notify AML lack of PCI config space availability */
  953. acpi_pci_config_space_access(dev, false);
  954. }
  955. error = acpi_device_set_power(adev, state_conv[state]);
  956. if (error)
  957. return error;
  958. pci_dbg(dev, "power state changed by ACPI to %s\n",
  959. acpi_power_state_string(adev->power.state));
  960. /*
  961. * Notify AML of PCI config space availability. Config space is
  962. * accessible in all states except D3cold; the only transitions
  963. * that change availability are transitions to D3cold and from
  964. * D3cold to D0.
  965. */
  966. if (state == PCI_D0)
  967. acpi_pci_config_space_access(dev, true);
  968. return 0;
  969. }
  970. pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
  971. {
  972. struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
  973. static const pci_power_t state_conv[] = {
  974. [ACPI_STATE_D0] = PCI_D0,
  975. [ACPI_STATE_D1] = PCI_D1,
  976. [ACPI_STATE_D2] = PCI_D2,
  977. [ACPI_STATE_D3_HOT] = PCI_D3hot,
  978. [ACPI_STATE_D3_COLD] = PCI_D3cold,
  979. };
  980. int state;
  981. if (!adev || !acpi_device_power_manageable(adev))
  982. return PCI_UNKNOWN;
  983. state = adev->power.state;
  984. if (state == ACPI_STATE_UNKNOWN)
  985. return PCI_UNKNOWN;
  986. return state_conv[state];
  987. }
  988. void acpi_pci_refresh_power_state(struct pci_dev *dev)
  989. {
  990. struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
  991. if (adev && acpi_device_power_manageable(adev))
  992. acpi_device_update_power(adev, NULL);
  993. }
  994. static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
  995. {
  996. while (bus->parent) {
  997. if (acpi_pm_device_can_wakeup(&bus->self->dev))
  998. return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
  999. bus = bus->parent;
  1000. }
  1001. /* We have reached the root bus. */
  1002. if (bus->bridge) {
  1003. if (acpi_pm_device_can_wakeup(bus->bridge))
  1004. return acpi_pm_set_device_wakeup(bus->bridge, enable);
  1005. }
  1006. return 0;
  1007. }
  1008. int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
  1009. {
  1010. if (acpi_pci_disabled)
  1011. return 0;
  1012. if (acpi_pm_device_can_wakeup(&dev->dev))
  1013. return acpi_pm_set_device_wakeup(&dev->dev, enable);
  1014. return acpi_pci_propagate_wakeup(dev->bus, enable);
  1015. }
  1016. bool acpi_pci_need_resume(struct pci_dev *dev)
  1017. {
  1018. struct acpi_device *adev;
  1019. if (acpi_pci_disabled)
  1020. return false;
  1021. /*
  1022. * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
  1023. * system-wide suspend/resume confuses the platform firmware, so avoid
  1024. * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint
  1025. * devices are expected to be in D3 before invoking the S3 entry path
  1026. * from the firmware, so they should not be affected by this issue.
  1027. */
  1028. if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
  1029. return true;
  1030. adev = ACPI_COMPANION(&dev->dev);
  1031. if (!adev || !acpi_device_power_manageable(adev))
  1032. return false;
  1033. if (adev->wakeup.flags.valid &&
  1034. device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
  1035. return true;
  1036. if (acpi_target_system_state() == ACPI_STATE_S0)
  1037. return false;
  1038. return !!adev->power.flags.dsw_present;
  1039. }
  1040. void acpi_pci_add_bus(struct pci_bus *bus)
  1041. {
  1042. union acpi_object *obj;
  1043. struct pci_host_bridge *bridge;
  1044. if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
  1045. return;
  1046. acpi_pci_slot_enumerate(bus);
  1047. acpiphp_enumerate_slots(bus);
  1048. /*
  1049. * For a host bridge, check its _DSM for function 8 and if
  1050. * that is available, mark it in pci_host_bridge.
  1051. */
  1052. if (!pci_is_root_bus(bus))
  1053. return;
  1054. obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
  1055. DSM_PCI_POWER_ON_RESET_DELAY, NULL, ACPI_TYPE_INTEGER);
  1056. if (!obj)
  1057. return;
  1058. if (obj->integer.value == 1) {
  1059. bridge = pci_find_host_bridge(bus);
  1060. bridge->ignore_reset_delay = 1;
  1061. }
  1062. ACPI_FREE(obj);
  1063. }
  1064. void acpi_pci_remove_bus(struct pci_bus *bus)
  1065. {
  1066. if (acpi_pci_disabled || !bus->bridge)
  1067. return;
  1068. acpiphp_remove_slots(bus);
  1069. acpi_pci_slot_remove(bus);
  1070. }
  1071. /* ACPI bus type */
  1072. static DECLARE_RWSEM(pci_acpi_companion_lookup_sem);
  1073. static struct acpi_device *(*pci_acpi_find_companion_hook)(struct pci_dev *);
  1074. /**
  1075. * pci_acpi_set_companion_lookup_hook - Set ACPI companion lookup callback.
  1076. * @func: ACPI companion lookup callback pointer or NULL.
  1077. *
  1078. * Set a special ACPI companion lookup callback for PCI devices whose companion
  1079. * objects in the ACPI namespace have _ADR with non-standard bus-device-function
  1080. * encodings.
  1081. *
  1082. * Return 0 on success or a negative error code on failure (in which case no
  1083. * changes are made).
  1084. *
  1085. * The caller is responsible for the appropriate ordering of the invocations of
  1086. * this function with respect to the enumeration of the PCI devices needing the
  1087. * callback installed by it.
  1088. */
  1089. int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *))
  1090. {
  1091. int ret;
  1092. if (!func)
  1093. return -EINVAL;
  1094. down_write(&pci_acpi_companion_lookup_sem);
  1095. if (pci_acpi_find_companion_hook) {
  1096. ret = -EBUSY;
  1097. } else {
  1098. pci_acpi_find_companion_hook = func;
  1099. ret = 0;
  1100. }
  1101. up_write(&pci_acpi_companion_lookup_sem);
  1102. return ret;
  1103. }
  1104. EXPORT_SYMBOL_GPL(pci_acpi_set_companion_lookup_hook);
  1105. /**
  1106. * pci_acpi_clear_companion_lookup_hook - Clear ACPI companion lookup callback.
  1107. *
  1108. * Clear the special ACPI companion lookup callback previously set by
  1109. * pci_acpi_set_companion_lookup_hook(). Block until the last running instance
  1110. * of the callback returns before clearing it.
  1111. *
  1112. * The caller is responsible for the appropriate ordering of the invocations of
  1113. * this function with respect to the enumeration of the PCI devices needing the
  1114. * callback cleared by it.
  1115. */
  1116. void pci_acpi_clear_companion_lookup_hook(void)
  1117. {
  1118. down_write(&pci_acpi_companion_lookup_sem);
  1119. pci_acpi_find_companion_hook = NULL;
  1120. up_write(&pci_acpi_companion_lookup_sem);
  1121. }
  1122. EXPORT_SYMBOL_GPL(pci_acpi_clear_companion_lookup_hook);
  1123. static struct acpi_device *acpi_pci_find_companion(struct device *dev)
  1124. {
  1125. struct pci_dev *pci_dev = to_pci_dev(dev);
  1126. struct acpi_device *adev;
  1127. bool check_children;
  1128. u64 addr;
  1129. if (!dev->parent)
  1130. return NULL;
  1131. down_read(&pci_acpi_companion_lookup_sem);
  1132. adev = pci_acpi_find_companion_hook ?
  1133. pci_acpi_find_companion_hook(pci_dev) : NULL;
  1134. up_read(&pci_acpi_companion_lookup_sem);
  1135. if (adev)
  1136. return adev;
  1137. check_children = pci_is_bridge(pci_dev);
  1138. /* Please ref to ACPI spec for the syntax of _ADR */
  1139. addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
  1140. adev = acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
  1141. check_children);
  1142. /*
  1143. * There may be ACPI device objects in the ACPI namespace that are
  1144. * children of the device object representing the host bridge, but don't
  1145. * represent PCI devices. Both _HID and _ADR may be present for them,
  1146. * even though that is against the specification (for example, see
  1147. * Section 6.1 of ACPI 6.3), but in many cases the _ADR returns 0 which
  1148. * appears to indicate that they should not be taken into consideration
  1149. * as potential companions of PCI devices on the root bus.
  1150. *
  1151. * To catch this special case, disregard the returned device object if
  1152. * it has a valid _HID, addr is 0 and the PCI device at hand is on the
  1153. * root bus.
  1154. */
  1155. if (adev && adev->pnp.type.platform_id && !addr &&
  1156. pci_is_root_bus(pci_dev->bus))
  1157. return NULL;
  1158. return adev;
  1159. }
  1160. /**
  1161. * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
  1162. * @pdev: the PCI device whose delay is to be updated
  1163. * @handle: ACPI handle of this device
  1164. *
  1165. * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM
  1166. * control method of either the device itself or the PCI host bridge.
  1167. *
  1168. * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
  1169. * host bridge. If it returns one, the OS may assume that all devices in
  1170. * the hierarchy have already completed power-on reset delays.
  1171. *
  1172. * Function 9, "Device Readiness Durations," applies only to the object
  1173. * where it is located. It returns delay durations required after various
  1174. * events if the device requires less time than the spec requires. Delays
  1175. * from this function take precedence over the Reset Delay function.
  1176. *
  1177. * These _DSM functions are defined by the draft ECN of January 28, 2014,
  1178. * titled "ACPI additions for FW latency optimizations."
  1179. */
  1180. static void pci_acpi_optimize_delay(struct pci_dev *pdev,
  1181. acpi_handle handle)
  1182. {
  1183. struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
  1184. int value;
  1185. union acpi_object *obj, *elements;
  1186. if (bridge->ignore_reset_delay)
  1187. pdev->d3cold_delay = 0;
  1188. obj = acpi_evaluate_dsm_typed(handle, &pci_acpi_dsm_guid, 3,
  1189. DSM_PCI_DEVICE_READINESS_DURATIONS, NULL,
  1190. ACPI_TYPE_PACKAGE);
  1191. if (!obj)
  1192. return;
  1193. if (obj->package.count == 5) {
  1194. elements = obj->package.elements;
  1195. if (elements[0].type == ACPI_TYPE_INTEGER) {
  1196. value = (int)elements[0].integer.value / 1000;
  1197. if (value < PCI_PM_D3COLD_WAIT)
  1198. pdev->d3cold_delay = value;
  1199. }
  1200. if (elements[3].type == ACPI_TYPE_INTEGER) {
  1201. value = (int)elements[3].integer.value / 1000;
  1202. if (value < PCI_PM_D3HOT_WAIT)
  1203. pdev->d3hot_delay = value;
  1204. }
  1205. }
  1206. ACPI_FREE(obj);
  1207. }
  1208. static void pci_acpi_set_external_facing(struct pci_dev *dev)
  1209. {
  1210. u8 val;
  1211. if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
  1212. return;
  1213. if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val))
  1214. return;
  1215. /*
  1216. * These root ports expose PCIe (including DMA) outside of the
  1217. * system. Everything downstream from them is external.
  1218. */
  1219. if (val)
  1220. dev->external_facing = 1;
  1221. }
  1222. void pci_acpi_setup(struct device *dev, struct acpi_device *adev)
  1223. {
  1224. struct pci_dev *pci_dev = to_pci_dev(dev);
  1225. pci_acpi_optimize_delay(pci_dev, adev->handle);
  1226. pci_acpi_set_external_facing(pci_dev);
  1227. pci_acpi_add_edr_notifier(pci_dev);
  1228. pci_acpi_add_pm_notifier(adev, pci_dev);
  1229. if (!adev->wakeup.flags.valid)
  1230. return;
  1231. device_set_wakeup_capable(dev, true);
  1232. /*
  1233. * For bridges that can do D3 we enable wake automatically (as
  1234. * we do for the power management itself in that case). The
  1235. * reason is that the bridge may have additional methods such as
  1236. * _DSW that need to be called.
  1237. */
  1238. if (pci_dev->bridge_d3)
  1239. device_wakeup_enable(dev);
  1240. acpi_pci_wakeup(pci_dev, false);
  1241. acpi_device_power_add_dependent(adev, dev);
  1242. if (pci_is_bridge(pci_dev))
  1243. acpi_dev_power_up_children_with_adr(adev);
  1244. }
  1245. void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev)
  1246. {
  1247. struct pci_dev *pci_dev = to_pci_dev(dev);
  1248. pci_acpi_remove_edr_notifier(pci_dev);
  1249. pci_acpi_remove_pm_notifier(adev);
  1250. if (adev->wakeup.flags.valid) {
  1251. acpi_device_power_remove_dependent(adev, dev);
  1252. if (pci_dev->bridge_d3)
  1253. device_wakeup_disable(dev);
  1254. device_set_wakeup_capable(dev, false);
  1255. }
  1256. }
  1257. static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
  1258. /**
  1259. * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
  1260. * @fn: Callback matching a device to a fwnode that identifies a PCI
  1261. * MSI domain.
  1262. *
  1263. * This should be called by irqchip driver, which is the parent of
  1264. * the MSI domain to provide callback interface to query fwnode.
  1265. */
  1266. void
  1267. pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
  1268. {
  1269. pci_msi_get_fwnode_cb = fn;
  1270. }
  1271. /**
  1272. * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
  1273. * @bus: The PCI host bridge bus.
  1274. *
  1275. * This function uses the callback function registered by
  1276. * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
  1277. * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
  1278. * This returns NULL on error or when the domain is not found.
  1279. */
  1280. struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
  1281. {
  1282. struct fwnode_handle *fwnode;
  1283. if (!pci_msi_get_fwnode_cb)
  1284. return NULL;
  1285. fwnode = pci_msi_get_fwnode_cb(&bus->dev);
  1286. if (!fwnode)
  1287. return NULL;
  1288. return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
  1289. }
  1290. static int __init acpi_pci_init(void)
  1291. {
  1292. if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
  1293. pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
  1294. pci_no_msi();
  1295. }
  1296. if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
  1297. pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
  1298. pcie_no_aspm();
  1299. }
  1300. if (acpi_pci_disabled)
  1301. return 0;
  1302. acpi_pci_slot_init();
  1303. acpiphp_init();
  1304. return 0;
  1305. }
  1306. arch_initcall(acpi_pci_init);
  1307. #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
  1308. /*
  1309. * Try to assign the IRQ number when probing a new device
  1310. */
  1311. int pcibios_alloc_irq(struct pci_dev *dev)
  1312. {
  1313. if (!acpi_disabled)
  1314. acpi_pci_irq_enable(dev);
  1315. return 0;
  1316. }
  1317. struct acpi_pci_generic_root_info {
  1318. struct acpi_pci_root_info common;
  1319. struct pci_config_window *cfg; /* config space mapping */
  1320. };
  1321. int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
  1322. {
  1323. struct pci_config_window *cfg = bus->sysdata;
  1324. struct acpi_device *adev = to_acpi_device(cfg->parent);
  1325. struct acpi_pci_root *root = acpi_driver_data(adev);
  1326. return root->segment;
  1327. }
  1328. int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
  1329. {
  1330. struct pci_config_window *cfg;
  1331. struct acpi_device *adev;
  1332. struct device *bus_dev;
  1333. if (acpi_disabled)
  1334. return 0;
  1335. cfg = bridge->bus->sysdata;
  1336. /*
  1337. * On Hyper-V there is no corresponding ACPI device for a root bridge,
  1338. * therefore ->parent is set as NULL by the driver. And set 'adev' as
  1339. * NULL in this case because there is no proper ACPI device.
  1340. */
  1341. if (!cfg->parent)
  1342. adev = NULL;
  1343. else
  1344. adev = to_acpi_device(cfg->parent);
  1345. bus_dev = &bridge->bus->dev;
  1346. ACPI_COMPANION_SET(&bridge->dev, adev);
  1347. set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev)));
  1348. return 0;
  1349. }
  1350. static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
  1351. {
  1352. struct resource_entry *entry, *tmp;
  1353. int status;
  1354. status = acpi_pci_probe_root_resources(ci);
  1355. resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
  1356. if (!(entry->res->flags & IORESOURCE_WINDOW))
  1357. resource_list_destroy_entry(entry);
  1358. }
  1359. return status;
  1360. }
  1361. /*
  1362. * Lookup the bus range for the domain in MCFG, and set up config space
  1363. * mapping.
  1364. */
  1365. static struct pci_config_window *
  1366. pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
  1367. {
  1368. struct device *dev = &root->device->dev;
  1369. struct resource *bus_res = &root->secondary;
  1370. u16 seg = root->segment;
  1371. const struct pci_ecam_ops *ecam_ops;
  1372. struct resource cfgres;
  1373. struct acpi_device *adev;
  1374. struct pci_config_window *cfg;
  1375. int ret;
  1376. ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops);
  1377. if (ret) {
  1378. dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res);
  1379. return NULL;
  1380. }
  1381. adev = acpi_resource_consumer(&cfgres);
  1382. if (adev)
  1383. dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres,
  1384. dev_name(&adev->dev));
  1385. else
  1386. dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n",
  1387. &cfgres);
  1388. cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
  1389. if (IS_ERR(cfg)) {
  1390. dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res,
  1391. PTR_ERR(cfg));
  1392. return NULL;
  1393. }
  1394. return cfg;
  1395. }
  1396. /* release_info: free resources allocated by init_info */
  1397. static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
  1398. {
  1399. struct acpi_pci_generic_root_info *ri;
  1400. ri = container_of(ci, struct acpi_pci_generic_root_info, common);
  1401. pci_ecam_free(ri->cfg);
  1402. kfree(ci->ops);
  1403. kfree(ri);
  1404. }
  1405. /* Interface called from ACPI code to setup PCI host controller */
  1406. struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
  1407. {
  1408. struct acpi_pci_generic_root_info *ri;
  1409. struct pci_bus *bus, *child;
  1410. struct acpi_pci_root_ops *root_ops;
  1411. struct pci_host_bridge *host;
  1412. ri = kzalloc(sizeof(*ri), GFP_KERNEL);
  1413. if (!ri)
  1414. return NULL;
  1415. root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
  1416. if (!root_ops) {
  1417. kfree(ri);
  1418. return NULL;
  1419. }
  1420. ri->cfg = pci_acpi_setup_ecam_mapping(root);
  1421. if (!ri->cfg) {
  1422. kfree(ri);
  1423. kfree(root_ops);
  1424. return NULL;
  1425. }
  1426. root_ops->release_info = pci_acpi_generic_release_info;
  1427. root_ops->prepare_resources = pci_acpi_root_prepare_resources;
  1428. root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
  1429. bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
  1430. if (!bus)
  1431. return NULL;
  1432. /* If we must preserve the resource configuration, claim now */
  1433. host = pci_find_host_bridge(bus);
  1434. if (host->preserve_config)
  1435. pci_bus_claim_resources(bus);
  1436. /*
  1437. * Assign whatever was left unassigned. If we didn't claim above,
  1438. * this will reassign everything.
  1439. */
  1440. pci_assign_unassigned_root_bus_resources(bus);
  1441. list_for_each_entry(child, &bus->children, node)
  1442. pcie_bus_configure_settings(child);
  1443. return bus;
  1444. }
  1445. void pcibios_add_bus(struct pci_bus *bus)
  1446. {
  1447. acpi_pci_add_bus(bus);
  1448. }
  1449. void pcibios_remove_bus(struct pci_bus *bus)
  1450. {
  1451. acpi_pci_remove_bus(bus);
  1452. }
  1453. #endif