pci-driver.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com>
  4. * (C) Copyright 2007 Novell Inc.
  5. */
  6. #include <linux/pci.h>
  7. #include <linux/module.h>
  8. #include <linux/init.h>
  9. #include <linux/device.h>
  10. #include <linux/mempolicy.h>
  11. #include <linux/string.h>
  12. #include <linux/slab.h>
  13. #include <linux/sched.h>
  14. #include <linux/cpu.h>
  15. #include <linux/pm_runtime.h>
  16. #include <linux/suspend.h>
  17. #include <linux/kexec.h>
  18. #include <linux/of_device.h>
  19. #include <linux/acpi.h>
  20. #include "pci.h"
  21. #include "pcie/portdrv.h"
  22. struct pci_dynid {
  23. struct list_head node;
  24. struct pci_device_id id;
  25. };
  26. /**
  27. * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices
  28. * @drv: target pci driver
  29. * @vendor: PCI vendor ID
  30. * @device: PCI device ID
  31. * @subvendor: PCI subvendor ID
  32. * @subdevice: PCI subdevice ID
  33. * @class: PCI class
  34. * @class_mask: PCI class mask
  35. * @driver_data: private driver data
  36. *
  37. * Adds a new dynamic pci device ID to this driver and causes the
  38. * driver to probe for all devices again. @drv must have been
  39. * registered prior to calling this function.
  40. *
  41. * CONTEXT:
  42. * Does GFP_KERNEL allocation.
  43. *
  44. * RETURNS:
  45. * 0 on success, -errno on failure.
  46. */
  47. int pci_add_dynid(struct pci_driver *drv,
  48. unsigned int vendor, unsigned int device,
  49. unsigned int subvendor, unsigned int subdevice,
  50. unsigned int class, unsigned int class_mask,
  51. unsigned long driver_data)
  52. {
  53. struct pci_dynid *dynid;
  54. dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
  55. if (!dynid)
  56. return -ENOMEM;
  57. dynid->id.vendor = vendor;
  58. dynid->id.device = device;
  59. dynid->id.subvendor = subvendor;
  60. dynid->id.subdevice = subdevice;
  61. dynid->id.class = class;
  62. dynid->id.class_mask = class_mask;
  63. dynid->id.driver_data = driver_data;
  64. spin_lock(&drv->dynids.lock);
  65. list_add_tail(&dynid->node, &drv->dynids.list);
  66. spin_unlock(&drv->dynids.lock);
  67. return driver_attach(&drv->driver);
  68. }
  69. EXPORT_SYMBOL_GPL(pci_add_dynid);
  70. static void pci_free_dynids(struct pci_driver *drv)
  71. {
  72. struct pci_dynid *dynid, *n;
  73. spin_lock(&drv->dynids.lock);
  74. list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
  75. list_del(&dynid->node);
  76. kfree(dynid);
  77. }
  78. spin_unlock(&drv->dynids.lock);
  79. }
  80. /**
  81. * store_new_id - sysfs frontend to pci_add_dynid()
  82. * @driver: target device driver
  83. * @buf: buffer for scanning device ID data
  84. * @count: input size
  85. *
  86. * Allow PCI IDs to be added to an existing driver via sysfs.
  87. */
  88. static ssize_t new_id_store(struct device_driver *driver, const char *buf,
  89. size_t count)
  90. {
  91. struct pci_driver *pdrv = to_pci_driver(driver);
  92. const struct pci_device_id *ids = pdrv->id_table;
  93. __u32 vendor, device, subvendor = PCI_ANY_ID,
  94. subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
  95. unsigned long driver_data = 0;
  96. int fields = 0;
  97. int retval = 0;
  98. fields = sscanf(buf, "%x %x %x %x %x %x %lx",
  99. &vendor, &device, &subvendor, &subdevice,
  100. &class, &class_mask, &driver_data);
  101. if (fields < 2)
  102. return -EINVAL;
  103. if (fields != 7) {
  104. struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
  105. if (!pdev)
  106. return -ENOMEM;
  107. pdev->vendor = vendor;
  108. pdev->device = device;
  109. pdev->subsystem_vendor = subvendor;
  110. pdev->subsystem_device = subdevice;
  111. pdev->class = class;
  112. if (pci_match_id(pdrv->id_table, pdev))
  113. retval = -EEXIST;
  114. kfree(pdev);
  115. if (retval)
  116. return retval;
  117. }
  118. /* Only accept driver_data values that match an existing id_table
  119. entry */
  120. if (ids) {
  121. retval = -EINVAL;
  122. while (ids->vendor || ids->subvendor || ids->class_mask) {
  123. if (driver_data == ids->driver_data) {
  124. retval = 0;
  125. break;
  126. }
  127. ids++;
  128. }
  129. if (retval) /* No match */
  130. return retval;
  131. }
  132. retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice,
  133. class, class_mask, driver_data);
  134. if (retval)
  135. return retval;
  136. return count;
  137. }
  138. static DRIVER_ATTR_WO(new_id);
  139. /**
  140. * store_remove_id - remove a PCI device ID from this driver
  141. * @driver: target device driver
  142. * @buf: buffer for scanning device ID data
  143. * @count: input size
  144. *
  145. * Removes a dynamic pci device ID to this driver.
  146. */
  147. static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
  148. size_t count)
  149. {
  150. struct pci_dynid *dynid, *n;
  151. struct pci_driver *pdrv = to_pci_driver(driver);
  152. __u32 vendor, device, subvendor = PCI_ANY_ID,
  153. subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
  154. int fields = 0;
  155. size_t retval = -ENODEV;
  156. fields = sscanf(buf, "%x %x %x %x %x %x",
  157. &vendor, &device, &subvendor, &subdevice,
  158. &class, &class_mask);
  159. if (fields < 2)
  160. return -EINVAL;
  161. spin_lock(&pdrv->dynids.lock);
  162. list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
  163. struct pci_device_id *id = &dynid->id;
  164. if ((id->vendor == vendor) &&
  165. (id->device == device) &&
  166. (subvendor == PCI_ANY_ID || id->subvendor == subvendor) &&
  167. (subdevice == PCI_ANY_ID || id->subdevice == subdevice) &&
  168. !((id->class ^ class) & class_mask)) {
  169. list_del(&dynid->node);
  170. kfree(dynid);
  171. retval = count;
  172. break;
  173. }
  174. }
  175. spin_unlock(&pdrv->dynids.lock);
  176. return retval;
  177. }
  178. static DRIVER_ATTR_WO(remove_id);
  179. static struct attribute *pci_drv_attrs[] = {
  180. &driver_attr_new_id.attr,
  181. &driver_attr_remove_id.attr,
  182. NULL,
  183. };
  184. ATTRIBUTE_GROUPS(pci_drv);
  185. /**
  186. * pci_match_id - See if a pci device matches a given pci_id table
  187. * @ids: array of PCI device id structures to search in
  188. * @dev: the PCI device structure to match against.
  189. *
  190. * Used by a driver to check whether a PCI device present in the
  191. * system is in its list of supported devices. Returns the matching
  192. * pci_device_id structure or %NULL if there is no match.
  193. *
  194. * Deprecated, don't use this as it will not catch any dynamic ids
  195. * that a driver might want to check for.
  196. */
  197. const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
  198. struct pci_dev *dev)
  199. {
  200. if (ids) {
  201. while (ids->vendor || ids->subvendor || ids->class_mask) {
  202. if (pci_match_one_device(ids, dev))
  203. return ids;
  204. ids++;
  205. }
  206. }
  207. return NULL;
  208. }
  209. EXPORT_SYMBOL(pci_match_id);
  210. static const struct pci_device_id pci_device_id_any = {
  211. .vendor = PCI_ANY_ID,
  212. .device = PCI_ANY_ID,
  213. .subvendor = PCI_ANY_ID,
  214. .subdevice = PCI_ANY_ID,
  215. };
  216. /**
  217. * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
  218. * @drv: the PCI driver to match against
  219. * @dev: the PCI device structure to match against
  220. *
  221. * Used by a driver to check whether a PCI device present in the
  222. * system is in its list of supported devices. Returns the matching
  223. * pci_device_id structure or %NULL if there is no match.
  224. */
  225. static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
  226. struct pci_dev *dev)
  227. {
  228. struct pci_dynid *dynid;
  229. const struct pci_device_id *found_id = NULL;
  230. /* When driver_override is set, only bind to the matching driver */
  231. if (dev->driver_override && strcmp(dev->driver_override, drv->name))
  232. return NULL;
  233. /* Look at the dynamic ids first, before the static ones */
  234. spin_lock(&drv->dynids.lock);
  235. list_for_each_entry(dynid, &drv->dynids.list, node) {
  236. if (pci_match_one_device(&dynid->id, dev)) {
  237. found_id = &dynid->id;
  238. break;
  239. }
  240. }
  241. spin_unlock(&drv->dynids.lock);
  242. if (!found_id)
  243. found_id = pci_match_id(drv->id_table, dev);
  244. /* driver_override will always match, send a dummy id */
  245. if (!found_id && dev->driver_override)
  246. found_id = &pci_device_id_any;
  247. return found_id;
  248. }
  249. struct drv_dev_and_id {
  250. struct pci_driver *drv;
  251. struct pci_dev *dev;
  252. const struct pci_device_id *id;
  253. };
  254. static long local_pci_probe(void *_ddi)
  255. {
  256. struct drv_dev_and_id *ddi = _ddi;
  257. struct pci_dev *pci_dev = ddi->dev;
  258. struct pci_driver *pci_drv = ddi->drv;
  259. struct device *dev = &pci_dev->dev;
  260. int rc;
  261. /*
  262. * Unbound PCI devices are always put in D0, regardless of
  263. * runtime PM status. During probe, the device is set to
  264. * active and the usage count is incremented. If the driver
  265. * supports runtime PM, it should call pm_runtime_put_noidle(),
  266. * or any other runtime PM helper function decrementing the usage
  267. * count, in its probe routine and pm_runtime_get_noresume() in
  268. * its remove routine.
  269. */
  270. pm_runtime_get_sync(dev);
  271. pci_dev->driver = pci_drv;
  272. rc = pci_drv->probe(pci_dev, ddi->id);
  273. if (!rc)
  274. return rc;
  275. if (rc < 0) {
  276. pci_dev->driver = NULL;
  277. pm_runtime_put_sync(dev);
  278. return rc;
  279. }
  280. /*
  281. * Probe function should return < 0 for failure, 0 for success
  282. * Treat values > 0 as success, but warn.
  283. */
  284. dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc);
  285. return 0;
  286. }
  287. static bool pci_physfn_is_probed(struct pci_dev *dev)
  288. {
  289. #ifdef CONFIG_PCI_IOV
  290. return dev->is_virtfn && dev->physfn->is_probed;
  291. #else
  292. return false;
  293. #endif
  294. }
  295. static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
  296. const struct pci_device_id *id)
  297. {
  298. int error, node, cpu;
  299. struct drv_dev_and_id ddi = { drv, dev, id };
  300. /*
  301. * Execute driver initialization on node where the device is
  302. * attached. This way the driver likely allocates its local memory
  303. * on the right node.
  304. */
  305. node = dev_to_node(&dev->dev);
  306. dev->is_probed = 1;
  307. cpu_hotplug_disable();
  308. /*
  309. * Prevent nesting work_on_cpu() for the case where a Virtual Function
  310. * device is probed from work_on_cpu() of the Physical device.
  311. */
  312. if (node < 0 || node >= MAX_NUMNODES || !node_online(node) ||
  313. pci_physfn_is_probed(dev))
  314. cpu = nr_cpu_ids;
  315. else
  316. cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
  317. if (cpu < nr_cpu_ids)
  318. error = work_on_cpu(cpu, local_pci_probe, &ddi);
  319. else
  320. error = local_pci_probe(&ddi);
  321. dev->is_probed = 0;
  322. cpu_hotplug_enable();
  323. return error;
  324. }
  325. /**
  326. * __pci_device_probe - check if a driver wants to claim a specific PCI device
  327. * @drv: driver to call to check if it wants the PCI device
  328. * @pci_dev: PCI device being probed
  329. *
  330. * returns 0 on success, else error.
  331. * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
  332. */
  333. static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
  334. {
  335. const struct pci_device_id *id;
  336. int error = 0;
  337. if (!pci_dev->driver && drv->probe) {
  338. error = -ENODEV;
  339. id = pci_match_device(drv, pci_dev);
  340. if (id)
  341. error = pci_call_probe(drv, pci_dev, id);
  342. }
  343. return error;
  344. }
  345. int __weak pcibios_alloc_irq(struct pci_dev *dev)
  346. {
  347. return 0;
  348. }
  349. void __weak pcibios_free_irq(struct pci_dev *dev)
  350. {
  351. }
  352. #ifdef CONFIG_PCI_IOV
  353. static inline bool pci_device_can_probe(struct pci_dev *pdev)
  354. {
  355. return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe ||
  356. pdev->driver_override);
  357. }
  358. #else
  359. static inline bool pci_device_can_probe(struct pci_dev *pdev)
  360. {
  361. return true;
  362. }
  363. #endif
  364. static int pci_device_probe(struct device *dev)
  365. {
  366. int error;
  367. struct pci_dev *pci_dev = to_pci_dev(dev);
  368. struct pci_driver *drv = to_pci_driver(dev->driver);
  369. if (!pci_device_can_probe(pci_dev))
  370. return -ENODEV;
  371. pci_assign_irq(pci_dev);
  372. error = pcibios_alloc_irq(pci_dev);
  373. if (error < 0)
  374. return error;
  375. pci_dev_get(pci_dev);
  376. error = __pci_device_probe(drv, pci_dev);
  377. if (error) {
  378. pcibios_free_irq(pci_dev);
  379. pci_dev_put(pci_dev);
  380. }
  381. return error;
  382. }
  383. static int pci_device_remove(struct device *dev)
  384. {
  385. struct pci_dev *pci_dev = to_pci_dev(dev);
  386. struct pci_driver *drv = pci_dev->driver;
  387. if (drv) {
  388. if (drv->remove) {
  389. pm_runtime_get_sync(dev);
  390. drv->remove(pci_dev);
  391. pm_runtime_put_noidle(dev);
  392. }
  393. pcibios_free_irq(pci_dev);
  394. pci_dev->driver = NULL;
  395. pci_iov_remove(pci_dev);
  396. }
  397. /* Undo the runtime PM settings in local_pci_probe() */
  398. pm_runtime_put_sync(dev);
  399. /*
  400. * If the device is still on, set the power state as "unknown",
  401. * since it might change by the next time we load the driver.
  402. */
  403. if (pci_dev->current_state == PCI_D0)
  404. pci_dev->current_state = PCI_UNKNOWN;
  405. /*
  406. * We would love to complain here if pci_dev->is_enabled is set, that
  407. * the driver should have called pci_disable_device(), but the
  408. * unfortunate fact is there are too many odd BIOS and bridge setups
  409. * that don't like drivers doing that all of the time.
  410. * Oh well, we can dream of sane hardware when we sleep, no matter how
  411. * horrible the crap we have to deal with is when we are awake...
  412. */
  413. pci_dev_put(pci_dev);
  414. return 0;
  415. }
  416. static void pci_device_shutdown(struct device *dev)
  417. {
  418. struct pci_dev *pci_dev = to_pci_dev(dev);
  419. struct pci_driver *drv = pci_dev->driver;
  420. pm_runtime_resume(dev);
  421. if (drv && drv->shutdown)
  422. drv->shutdown(pci_dev);
  423. /*
  424. * If this is a kexec reboot, turn off Bus Master bit on the
  425. * device to tell it to not continue to do DMA. Don't touch
  426. * devices in D3cold or unknown states.
  427. * If it is not a kexec reboot, firmware will hit the PCI
  428. * devices with big hammer and stop their DMA any way.
  429. */
  430. if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
  431. pci_clear_master(pci_dev);
  432. }
  433. #ifdef CONFIG_PM
  434. /* Auxiliary functions used for system resume and run-time resume. */
  435. /**
  436. * pci_restore_standard_config - restore standard config registers of PCI device
  437. * @pci_dev: PCI device to handle
  438. */
  439. static int pci_restore_standard_config(struct pci_dev *pci_dev)
  440. {
  441. pci_update_current_state(pci_dev, PCI_UNKNOWN);
  442. if (pci_dev->current_state != PCI_D0) {
  443. int error = pci_set_power_state(pci_dev, PCI_D0);
  444. if (error)
  445. return error;
  446. }
  447. pci_restore_state(pci_dev);
  448. pci_pme_restore(pci_dev);
  449. return 0;
  450. }
  451. #endif
  452. #ifdef CONFIG_PM_SLEEP
  453. static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
  454. {
  455. pci_power_up(pci_dev);
  456. pci_restore_state(pci_dev);
  457. pci_pme_restore(pci_dev);
  458. pci_fixup_device(pci_fixup_resume_early, pci_dev);
  459. }
  460. /*
  461. * Default "suspend" method for devices that have no driver provided suspend,
  462. * or not even a driver at all (second part).
  463. */
  464. static void pci_pm_set_unknown_state(struct pci_dev *pci_dev)
  465. {
  466. /*
  467. * mark its power state as "unknown", since we don't know if
  468. * e.g. the BIOS will change its device state when we suspend.
  469. */
  470. if (pci_dev->current_state == PCI_D0)
  471. pci_dev->current_state = PCI_UNKNOWN;
  472. }
  473. /*
  474. * Default "resume" method for devices that have no driver provided resume,
  475. * or not even a driver at all (second part).
  476. */
  477. static int pci_pm_reenable_device(struct pci_dev *pci_dev)
  478. {
  479. int retval;
  480. /* if the device was enabled before suspend, reenable */
  481. retval = pci_reenable_device(pci_dev);
  482. /*
  483. * if the device was busmaster before the suspend, make it busmaster
  484. * again
  485. */
  486. if (pci_dev->is_busmaster)
  487. pci_set_master(pci_dev);
  488. return retval;
  489. }
  490. static int pci_legacy_suspend(struct device *dev, pm_message_t state)
  491. {
  492. struct pci_dev *pci_dev = to_pci_dev(dev);
  493. struct pci_driver *drv = pci_dev->driver;
  494. if (drv && drv->suspend) {
  495. pci_power_t prev = pci_dev->current_state;
  496. int error;
  497. error = drv->suspend(pci_dev, state);
  498. suspend_report_result(drv->suspend, error);
  499. if (error)
  500. return error;
  501. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  502. && pci_dev->current_state != PCI_UNKNOWN) {
  503. WARN_ONCE(pci_dev->current_state != prev,
  504. "PCI PM: Device state not saved by %pF\n",
  505. drv->suspend);
  506. }
  507. }
  508. pci_fixup_device(pci_fixup_suspend, pci_dev);
  509. return 0;
  510. }
  511. static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
  512. {
  513. struct pci_dev *pci_dev = to_pci_dev(dev);
  514. struct pci_driver *drv = pci_dev->driver;
  515. if (drv && drv->suspend_late) {
  516. pci_power_t prev = pci_dev->current_state;
  517. int error;
  518. error = drv->suspend_late(pci_dev, state);
  519. suspend_report_result(drv->suspend_late, error);
  520. if (error)
  521. return error;
  522. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  523. && pci_dev->current_state != PCI_UNKNOWN) {
  524. WARN_ONCE(pci_dev->current_state != prev,
  525. "PCI PM: Device state not saved by %pF\n",
  526. drv->suspend_late);
  527. goto Fixup;
  528. }
  529. }
  530. if (!pci_dev->state_saved)
  531. pci_save_state(pci_dev);
  532. pci_pm_set_unknown_state(pci_dev);
  533. Fixup:
  534. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  535. return 0;
  536. }
  537. static int pci_legacy_resume_early(struct device *dev)
  538. {
  539. struct pci_dev *pci_dev = to_pci_dev(dev);
  540. struct pci_driver *drv = pci_dev->driver;
  541. return drv && drv->resume_early ?
  542. drv->resume_early(pci_dev) : 0;
  543. }
  544. static int pci_legacy_resume(struct device *dev)
  545. {
  546. struct pci_dev *pci_dev = to_pci_dev(dev);
  547. struct pci_driver *drv = pci_dev->driver;
  548. pci_fixup_device(pci_fixup_resume, pci_dev);
  549. return drv && drv->resume ?
  550. drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev);
  551. }
  552. /* Auxiliary functions used by the new power management framework */
  553. static void pci_pm_default_resume(struct pci_dev *pci_dev)
  554. {
  555. pci_fixup_device(pci_fixup_resume, pci_dev);
  556. pci_enable_wake(pci_dev, PCI_D0, false);
  557. }
  558. static void pci_pm_default_suspend(struct pci_dev *pci_dev)
  559. {
  560. /* Disable non-bridge devices without PM support */
  561. if (!pci_has_subordinate(pci_dev))
  562. pci_disable_enabled_device(pci_dev);
  563. }
  564. static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
  565. {
  566. struct pci_driver *drv = pci_dev->driver;
  567. bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
  568. || drv->resume_early);
  569. /*
  570. * Legacy PM support is used by default, so warn if the new framework is
  571. * supported as well. Drivers are supposed to support either the
  572. * former, or the latter, but not both at the same time.
  573. */
  574. WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
  575. drv->name, pci_dev->vendor, pci_dev->device);
  576. return ret;
  577. }
  578. /* New power management framework */
  579. static int pci_pm_prepare(struct device *dev)
  580. {
  581. struct device_driver *drv = dev->driver;
  582. if (drv && drv->pm && drv->pm->prepare) {
  583. int error = drv->pm->prepare(dev);
  584. if (error < 0)
  585. return error;
  586. if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
  587. return 0;
  588. }
  589. return pci_dev_keep_suspended(to_pci_dev(dev));
  590. }
  591. static void pci_pm_complete(struct device *dev)
  592. {
  593. struct pci_dev *pci_dev = to_pci_dev(dev);
  594. pci_dev_complete_resume(pci_dev);
  595. pm_generic_complete(dev);
  596. /* Resume device if platform firmware has put it in reset-power-on */
  597. if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
  598. pci_power_t pre_sleep_state = pci_dev->current_state;
  599. pci_update_current_state(pci_dev, pci_dev->current_state);
  600. if (pci_dev->current_state < pre_sleep_state)
  601. pm_request_resume(dev);
  602. }
  603. }
  604. #else /* !CONFIG_PM_SLEEP */
  605. #define pci_pm_prepare NULL
  606. #define pci_pm_complete NULL
  607. #endif /* !CONFIG_PM_SLEEP */
  608. #ifdef CONFIG_SUSPEND
  609. static void pcie_pme_root_status_cleanup(struct pci_dev *pci_dev)
  610. {
  611. /*
  612. * Some BIOSes forget to clear Root PME Status bits after system
  613. * wakeup, which breaks ACPI-based runtime wakeup on PCI Express.
  614. * Clear those bits now just in case (shouldn't hurt).
  615. */
  616. if (pci_is_pcie(pci_dev) &&
  617. (pci_pcie_type(pci_dev) == PCI_EXP_TYPE_ROOT_PORT ||
  618. pci_pcie_type(pci_dev) == PCI_EXP_TYPE_RC_EC))
  619. pcie_clear_root_pme_status(pci_dev);
  620. }
  621. static int pci_pm_suspend(struct device *dev)
  622. {
  623. struct pci_dev *pci_dev = to_pci_dev(dev);
  624. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  625. if (pci_has_legacy_pm_support(pci_dev))
  626. return pci_legacy_suspend(dev, PMSG_SUSPEND);
  627. if (!pm) {
  628. pci_pm_default_suspend(pci_dev);
  629. return 0;
  630. }
  631. /*
  632. * PCI devices suspended at run time may need to be resumed at this
  633. * point, because in general it may be necessary to reconfigure them for
  634. * system suspend. Namely, if the device is expected to wake up the
  635. * system from the sleep state, it may have to be reconfigured for this
  636. * purpose, or if the device is not expected to wake up the system from
  637. * the sleep state, it should be prevented from signaling wakeup events
  638. * going forward.
  639. *
  640. * Also if the driver of the device does not indicate that its system
  641. * suspend callbacks can cope with runtime-suspended devices, it is
  642. * better to resume the device from runtime suspend here.
  643. */
  644. if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
  645. !pci_dev_keep_suspended(pci_dev)) {
  646. pm_runtime_resume(dev);
  647. pci_dev->state_saved = false;
  648. }
  649. if (pm->suspend) {
  650. pci_power_t prev = pci_dev->current_state;
  651. int error;
  652. error = pm->suspend(dev);
  653. suspend_report_result(pm->suspend, error);
  654. if (error)
  655. return error;
  656. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  657. && pci_dev->current_state != PCI_UNKNOWN) {
  658. WARN_ONCE(pci_dev->current_state != prev,
  659. "PCI PM: State of device not saved by %pF\n",
  660. pm->suspend);
  661. }
  662. }
  663. return 0;
  664. }
  665. static int pci_pm_suspend_late(struct device *dev)
  666. {
  667. if (dev_pm_smart_suspend_and_suspended(dev))
  668. return 0;
  669. pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
  670. return pm_generic_suspend_late(dev);
  671. }
  672. static int pci_pm_suspend_noirq(struct device *dev)
  673. {
  674. struct pci_dev *pci_dev = to_pci_dev(dev);
  675. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  676. if (dev_pm_smart_suspend_and_suspended(dev)) {
  677. dev->power.may_skip_resume = true;
  678. return 0;
  679. }
  680. if (pci_has_legacy_pm_support(pci_dev))
  681. return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
  682. if (!pm) {
  683. pci_save_state(pci_dev);
  684. goto Fixup;
  685. }
  686. if (pm->suspend_noirq) {
  687. pci_power_t prev = pci_dev->current_state;
  688. int error;
  689. error = pm->suspend_noirq(dev);
  690. suspend_report_result(pm->suspend_noirq, error);
  691. if (error)
  692. return error;
  693. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  694. && pci_dev->current_state != PCI_UNKNOWN) {
  695. WARN_ONCE(pci_dev->current_state != prev,
  696. "PCI PM: State of device not saved by %pF\n",
  697. pm->suspend_noirq);
  698. goto Fixup;
  699. }
  700. }
  701. if (!pci_dev->state_saved) {
  702. pci_save_state(pci_dev);
  703. if (pci_power_manageable(pci_dev))
  704. pci_prepare_to_sleep(pci_dev);
  705. }
  706. dev_dbg(dev, "PCI PM: Suspend power state: %s\n",
  707. pci_power_name(pci_dev->current_state));
  708. pci_pm_set_unknown_state(pci_dev);
  709. /*
  710. * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
  711. * PCI COMMAND register isn't 0, the BIOS assumes that the controller
  712. * hasn't been quiesced and tries to turn it off. If the controller
  713. * is already in D3, this can hang or cause memory corruption.
  714. *
  715. * Since the value of the COMMAND register doesn't matter once the
  716. * device has been suspended, we can safely set it to 0 here.
  717. */
  718. if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
  719. pci_write_config_word(pci_dev, PCI_COMMAND, 0);
  720. Fixup:
  721. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  722. /*
  723. * If the target system sleep state is suspend-to-idle, it is sufficient
  724. * to check whether or not the device's wakeup settings are good for
  725. * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
  726. * pci_pm_complete() to take care of fixing up the device's state
  727. * anyway, if need be.
  728. */
  729. dev->power.may_skip_resume = device_may_wakeup(dev) ||
  730. !device_can_wakeup(dev);
  731. return 0;
  732. }
  733. static int pci_pm_resume_noirq(struct device *dev)
  734. {
  735. struct pci_dev *pci_dev = to_pci_dev(dev);
  736. struct device_driver *drv = dev->driver;
  737. int error = 0;
  738. if (dev_pm_may_skip_resume(dev))
  739. return 0;
  740. /*
  741. * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
  742. * during system suspend, so update their runtime PM status to "active"
  743. * as they are going to be put into D0 shortly.
  744. */
  745. if (dev_pm_smart_suspend_and_suspended(dev))
  746. pm_runtime_set_active(dev);
  747. pci_pm_default_resume_early(pci_dev);
  748. if (pci_has_legacy_pm_support(pci_dev))
  749. return pci_legacy_resume_early(dev);
  750. pcie_pme_root_status_cleanup(pci_dev);
  751. if (drv && drv->pm && drv->pm->resume_noirq)
  752. error = drv->pm->resume_noirq(dev);
  753. return error;
  754. }
  755. static int pci_pm_resume(struct device *dev)
  756. {
  757. struct pci_dev *pci_dev = to_pci_dev(dev);
  758. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  759. int error = 0;
  760. /*
  761. * This is necessary for the suspend error path in which resume is
  762. * called without restoring the standard config registers of the device.
  763. */
  764. if (pci_dev->state_saved)
  765. pci_restore_standard_config(pci_dev);
  766. if (pci_has_legacy_pm_support(pci_dev))
  767. return pci_legacy_resume(dev);
  768. pci_pm_default_resume(pci_dev);
  769. if (pm) {
  770. if (pm->resume)
  771. error = pm->resume(dev);
  772. } else {
  773. pci_pm_reenable_device(pci_dev);
  774. }
  775. return error;
  776. }
  777. #else /* !CONFIG_SUSPEND */
  778. #define pci_pm_suspend NULL
  779. #define pci_pm_suspend_late NULL
  780. #define pci_pm_suspend_noirq NULL
  781. #define pci_pm_resume NULL
  782. #define pci_pm_resume_noirq NULL
  783. #endif /* !CONFIG_SUSPEND */
  784. #ifdef CONFIG_HIBERNATE_CALLBACKS
  785. /*
  786. * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
  787. * a hibernate transition
  788. */
  789. struct dev_pm_ops __weak pcibios_pm_ops;
  790. static int pci_pm_freeze(struct device *dev)
  791. {
  792. struct pci_dev *pci_dev = to_pci_dev(dev);
  793. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  794. if (pci_has_legacy_pm_support(pci_dev))
  795. return pci_legacy_suspend(dev, PMSG_FREEZE);
  796. if (!pm) {
  797. pci_pm_default_suspend(pci_dev);
  798. return 0;
  799. }
  800. /*
  801. * Resume all runtime-suspended devices before creating a snapshot
  802. * image of system memory, because the restore kernel generally cannot
  803. * be expected to always handle them consistently and they need to be
  804. * put into the runtime-active metastate during system resume anyway,
  805. * so it is better to ensure that the state saved in the image will be
  806. * always consistent with that.
  807. */
  808. pm_runtime_resume(dev);
  809. pci_dev->state_saved = false;
  810. if (pm->freeze) {
  811. int error;
  812. error = pm->freeze(dev);
  813. suspend_report_result(pm->freeze, error);
  814. if (error)
  815. return error;
  816. }
  817. return 0;
  818. }
  819. static int pci_pm_freeze_late(struct device *dev)
  820. {
  821. if (dev_pm_smart_suspend_and_suspended(dev))
  822. return 0;
  823. return pm_generic_freeze_late(dev);
  824. }
  825. static int pci_pm_freeze_noirq(struct device *dev)
  826. {
  827. struct pci_dev *pci_dev = to_pci_dev(dev);
  828. struct device_driver *drv = dev->driver;
  829. if (dev_pm_smart_suspend_and_suspended(dev))
  830. return 0;
  831. if (pci_has_legacy_pm_support(pci_dev))
  832. return pci_legacy_suspend_late(dev, PMSG_FREEZE);
  833. if (drv && drv->pm && drv->pm->freeze_noirq) {
  834. int error;
  835. error = drv->pm->freeze_noirq(dev);
  836. suspend_report_result(drv->pm->freeze_noirq, error);
  837. if (error)
  838. return error;
  839. }
  840. if (!pci_dev->state_saved)
  841. pci_save_state(pci_dev);
  842. pci_pm_set_unknown_state(pci_dev);
  843. if (pcibios_pm_ops.freeze_noirq)
  844. return pcibios_pm_ops.freeze_noirq(dev);
  845. return 0;
  846. }
  847. static int pci_pm_thaw_noirq(struct device *dev)
  848. {
  849. struct pci_dev *pci_dev = to_pci_dev(dev);
  850. struct device_driver *drv = dev->driver;
  851. int error = 0;
  852. /*
  853. * If the device is in runtime suspend, the code below may not work
  854. * correctly with it, so skip that code and make the PM core skip all of
  855. * the subsequent "thaw" callbacks for the device.
  856. */
  857. if (dev_pm_smart_suspend_and_suspended(dev)) {
  858. dev_pm_skip_next_resume_phases(dev);
  859. return 0;
  860. }
  861. if (pcibios_pm_ops.thaw_noirq) {
  862. error = pcibios_pm_ops.thaw_noirq(dev);
  863. if (error)
  864. return error;
  865. }
  866. /*
  867. * Both the legacy ->resume_early() and the new pm->thaw_noirq()
  868. * callbacks assume the device has been returned to D0 and its
  869. * config state has been restored.
  870. *
  871. * In addition, pci_restore_state() restores MSI-X state in MMIO
  872. * space, which requires the device to be in D0, so return it to D0
  873. * in case the driver's "freeze" callbacks put it into a low-power
  874. * state.
  875. */
  876. pci_set_power_state(pci_dev, PCI_D0);
  877. pci_restore_state(pci_dev);
  878. if (pci_has_legacy_pm_support(pci_dev))
  879. return pci_legacy_resume_early(dev);
  880. if (drv && drv->pm && drv->pm->thaw_noirq)
  881. error = drv->pm->thaw_noirq(dev);
  882. return error;
  883. }
  884. static int pci_pm_thaw(struct device *dev)
  885. {
  886. struct pci_dev *pci_dev = to_pci_dev(dev);
  887. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  888. int error = 0;
  889. if (pci_has_legacy_pm_support(pci_dev))
  890. return pci_legacy_resume(dev);
  891. if (pm) {
  892. if (pm->thaw)
  893. error = pm->thaw(dev);
  894. } else {
  895. pci_pm_reenable_device(pci_dev);
  896. }
  897. pci_dev->state_saved = false;
  898. return error;
  899. }
  900. static int pci_pm_poweroff(struct device *dev)
  901. {
  902. struct pci_dev *pci_dev = to_pci_dev(dev);
  903. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  904. if (pci_has_legacy_pm_support(pci_dev))
  905. return pci_legacy_suspend(dev, PMSG_HIBERNATE);
  906. if (!pm) {
  907. pci_pm_default_suspend(pci_dev);
  908. return 0;
  909. }
  910. /* The reason to do that is the same as in pci_pm_suspend(). */
  911. if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
  912. !pci_dev_keep_suspended(pci_dev))
  913. pm_runtime_resume(dev);
  914. pci_dev->state_saved = false;
  915. if (pm->poweroff) {
  916. int error;
  917. error = pm->poweroff(dev);
  918. suspend_report_result(pm->poweroff, error);
  919. if (error)
  920. return error;
  921. }
  922. return 0;
  923. }
  924. static int pci_pm_poweroff_late(struct device *dev)
  925. {
  926. if (dev_pm_smart_suspend_and_suspended(dev))
  927. return 0;
  928. pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
  929. return pm_generic_poweroff_late(dev);
  930. }
  931. static int pci_pm_poweroff_noirq(struct device *dev)
  932. {
  933. struct pci_dev *pci_dev = to_pci_dev(dev);
  934. struct device_driver *drv = dev->driver;
  935. if (dev_pm_smart_suspend_and_suspended(dev))
  936. return 0;
  937. if (pci_has_legacy_pm_support(to_pci_dev(dev)))
  938. return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
  939. if (!drv || !drv->pm) {
  940. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  941. return 0;
  942. }
  943. if (drv->pm->poweroff_noirq) {
  944. int error;
  945. error = drv->pm->poweroff_noirq(dev);
  946. suspend_report_result(drv->pm->poweroff_noirq, error);
  947. if (error)
  948. return error;
  949. }
  950. if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev))
  951. pci_prepare_to_sleep(pci_dev);
  952. /*
  953. * The reason for doing this here is the same as for the analogous code
  954. * in pci_pm_suspend_noirq().
  955. */
  956. if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
  957. pci_write_config_word(pci_dev, PCI_COMMAND, 0);
  958. pci_fixup_device(pci_fixup_suspend_late, pci_dev);
  959. if (pcibios_pm_ops.poweroff_noirq)
  960. return pcibios_pm_ops.poweroff_noirq(dev);
  961. return 0;
  962. }
  963. static int pci_pm_restore_noirq(struct device *dev)
  964. {
  965. struct pci_dev *pci_dev = to_pci_dev(dev);
  966. struct device_driver *drv = dev->driver;
  967. int error = 0;
  968. /* This is analogous to the pci_pm_resume_noirq() case. */
  969. if (dev_pm_smart_suspend_and_suspended(dev))
  970. pm_runtime_set_active(dev);
  971. if (pcibios_pm_ops.restore_noirq) {
  972. error = pcibios_pm_ops.restore_noirq(dev);
  973. if (error)
  974. return error;
  975. }
  976. pci_pm_default_resume_early(pci_dev);
  977. if (pci_has_legacy_pm_support(pci_dev))
  978. return pci_legacy_resume_early(dev);
  979. if (drv && drv->pm && drv->pm->restore_noirq)
  980. error = drv->pm->restore_noirq(dev);
  981. return error;
  982. }
  983. static int pci_pm_restore(struct device *dev)
  984. {
  985. struct pci_dev *pci_dev = to_pci_dev(dev);
  986. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  987. int error = 0;
  988. /*
  989. * This is necessary for the hibernation error path in which restore is
  990. * called without restoring the standard config registers of the device.
  991. */
  992. if (pci_dev->state_saved)
  993. pci_restore_standard_config(pci_dev);
  994. if (pci_has_legacy_pm_support(pci_dev))
  995. return pci_legacy_resume(dev);
  996. pci_pm_default_resume(pci_dev);
  997. if (pm) {
  998. if (pm->restore)
  999. error = pm->restore(dev);
  1000. } else {
  1001. pci_pm_reenable_device(pci_dev);
  1002. }
  1003. return error;
  1004. }
  1005. #else /* !CONFIG_HIBERNATE_CALLBACKS */
  1006. #define pci_pm_freeze NULL
  1007. #define pci_pm_freeze_late NULL
  1008. #define pci_pm_freeze_noirq NULL
  1009. #define pci_pm_thaw NULL
  1010. #define pci_pm_thaw_noirq NULL
  1011. #define pci_pm_poweroff NULL
  1012. #define pci_pm_poweroff_late NULL
  1013. #define pci_pm_poweroff_noirq NULL
  1014. #define pci_pm_restore NULL
  1015. #define pci_pm_restore_noirq NULL
  1016. #endif /* !CONFIG_HIBERNATE_CALLBACKS */
  1017. #ifdef CONFIG_PM
  1018. static int pci_pm_runtime_suspend(struct device *dev)
  1019. {
  1020. struct pci_dev *pci_dev = to_pci_dev(dev);
  1021. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1022. pci_power_t prev = pci_dev->current_state;
  1023. int error;
  1024. /*
  1025. * If pci_dev->driver is not set (unbound), we leave the device in D0,
  1026. * but it may go to D3cold when the bridge above it runtime suspends.
  1027. * Save its config space in case that happens.
  1028. */
  1029. if (!pci_dev->driver) {
  1030. pci_save_state(pci_dev);
  1031. return 0;
  1032. }
  1033. pci_dev->state_saved = false;
  1034. if (pm && pm->runtime_suspend) {
  1035. error = pm->runtime_suspend(dev);
  1036. /*
  1037. * -EBUSY and -EAGAIN is used to request the runtime PM core
  1038. * to schedule a new suspend, so log the event only with debug
  1039. * log level.
  1040. */
  1041. if (error == -EBUSY || error == -EAGAIN) {
  1042. dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
  1043. pm->runtime_suspend, error);
  1044. return error;
  1045. } else if (error) {
  1046. dev_err(dev, "can't suspend (%pf returned %d)\n",
  1047. pm->runtime_suspend, error);
  1048. return error;
  1049. }
  1050. }
  1051. pci_fixup_device(pci_fixup_suspend, pci_dev);
  1052. if (pm && pm->runtime_suspend
  1053. && !pci_dev->state_saved && pci_dev->current_state != PCI_D0
  1054. && pci_dev->current_state != PCI_UNKNOWN) {
  1055. WARN_ONCE(pci_dev->current_state != prev,
  1056. "PCI PM: State of device not saved by %pF\n",
  1057. pm->runtime_suspend);
  1058. return 0;
  1059. }
  1060. if (!pci_dev->state_saved) {
  1061. pci_save_state(pci_dev);
  1062. pci_finish_runtime_suspend(pci_dev);
  1063. }
  1064. return 0;
  1065. }
  1066. static int pci_pm_runtime_resume(struct device *dev)
  1067. {
  1068. int rc = 0;
  1069. struct pci_dev *pci_dev = to_pci_dev(dev);
  1070. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1071. /*
  1072. * Restoring config space is necessary even if the device is not bound
  1073. * to a driver because although we left it in D0, it may have gone to
  1074. * D3cold when the bridge above it runtime suspended.
  1075. */
  1076. pci_restore_standard_config(pci_dev);
  1077. if (!pci_dev->driver)
  1078. return 0;
  1079. pci_fixup_device(pci_fixup_resume_early, pci_dev);
  1080. pci_enable_wake(pci_dev, PCI_D0, false);
  1081. pci_fixup_device(pci_fixup_resume, pci_dev);
  1082. if (pm && pm->runtime_resume)
  1083. rc = pm->runtime_resume(dev);
  1084. pci_dev->runtime_d3cold = false;
  1085. return rc;
  1086. }
  1087. static int pci_pm_runtime_idle(struct device *dev)
  1088. {
  1089. struct pci_dev *pci_dev = to_pci_dev(dev);
  1090. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  1091. int ret = 0;
  1092. /*
  1093. * If pci_dev->driver is not set (unbound), the device should
  1094. * always remain in D0 regardless of the runtime PM status
  1095. */
  1096. if (!pci_dev->driver)
  1097. return 0;
  1098. if (!pm)
  1099. return -ENOSYS;
  1100. if (pm->runtime_idle)
  1101. ret = pm->runtime_idle(dev);
  1102. return ret;
  1103. }
  1104. static const struct dev_pm_ops pci_dev_pm_ops = {
  1105. .prepare = pci_pm_prepare,
  1106. .complete = pci_pm_complete,
  1107. .suspend = pci_pm_suspend,
  1108. .suspend_late = pci_pm_suspend_late,
  1109. .resume = pci_pm_resume,
  1110. .freeze = pci_pm_freeze,
  1111. .freeze_late = pci_pm_freeze_late,
  1112. .thaw = pci_pm_thaw,
  1113. .poweroff = pci_pm_poweroff,
  1114. .poweroff_late = pci_pm_poweroff_late,
  1115. .restore = pci_pm_restore,
  1116. .suspend_noirq = pci_pm_suspend_noirq,
  1117. .resume_noirq = pci_pm_resume_noirq,
  1118. .freeze_noirq = pci_pm_freeze_noirq,
  1119. .thaw_noirq = pci_pm_thaw_noirq,
  1120. .poweroff_noirq = pci_pm_poweroff_noirq,
  1121. .restore_noirq = pci_pm_restore_noirq,
  1122. .runtime_suspend = pci_pm_runtime_suspend,
  1123. .runtime_resume = pci_pm_runtime_resume,
  1124. .runtime_idle = pci_pm_runtime_idle,
  1125. };
  1126. #define PCI_PM_OPS_PTR (&pci_dev_pm_ops)
  1127. #else /* !CONFIG_PM */
  1128. #define pci_pm_runtime_suspend NULL
  1129. #define pci_pm_runtime_resume NULL
  1130. #define pci_pm_runtime_idle NULL
  1131. #define PCI_PM_OPS_PTR NULL
  1132. #endif /* !CONFIG_PM */
  1133. /**
  1134. * __pci_register_driver - register a new pci driver
  1135. * @drv: the driver structure to register
  1136. * @owner: owner module of drv
  1137. * @mod_name: module name string
  1138. *
  1139. * Adds the driver structure to the list of registered drivers.
  1140. * Returns a negative value on error, otherwise 0.
  1141. * If no error occurred, the driver remains registered even if
  1142. * no device was claimed during registration.
  1143. */
  1144. int __pci_register_driver(struct pci_driver *drv, struct module *owner,
  1145. const char *mod_name)
  1146. {
  1147. /* initialize common driver fields */
  1148. drv->driver.name = drv->name;
  1149. drv->driver.bus = &pci_bus_type;
  1150. drv->driver.owner = owner;
  1151. drv->driver.mod_name = mod_name;
  1152. drv->driver.groups = drv->groups;
  1153. spin_lock_init(&drv->dynids.lock);
  1154. INIT_LIST_HEAD(&drv->dynids.list);
  1155. /* register with core */
  1156. return driver_register(&drv->driver);
  1157. }
  1158. EXPORT_SYMBOL(__pci_register_driver);
  1159. /**
  1160. * pci_unregister_driver - unregister a pci driver
  1161. * @drv: the driver structure to unregister
  1162. *
  1163. * Deletes the driver structure from the list of registered PCI drivers,
  1164. * gives it a chance to clean up by calling its remove() function for
  1165. * each device it was responsible for, and marks those devices as
  1166. * driverless.
  1167. */
  1168. void pci_unregister_driver(struct pci_driver *drv)
  1169. {
  1170. driver_unregister(&drv->driver);
  1171. pci_free_dynids(drv);
  1172. }
  1173. EXPORT_SYMBOL(pci_unregister_driver);
  1174. static struct pci_driver pci_compat_driver = {
  1175. .name = "compat"
  1176. };
  1177. /**
  1178. * pci_dev_driver - get the pci_driver of a device
  1179. * @dev: the device to query
  1180. *
  1181. * Returns the appropriate pci_driver structure or %NULL if there is no
  1182. * registered driver for the device.
  1183. */
  1184. struct pci_driver *pci_dev_driver(const struct pci_dev *dev)
  1185. {
  1186. if (dev->driver)
  1187. return dev->driver;
  1188. else {
  1189. int i;
  1190. for (i = 0; i <= PCI_ROM_RESOURCE; i++)
  1191. if (dev->resource[i].flags & IORESOURCE_BUSY)
  1192. return &pci_compat_driver;
  1193. }
  1194. return NULL;
  1195. }
  1196. EXPORT_SYMBOL(pci_dev_driver);
  1197. /**
  1198. * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
  1199. * @dev: the PCI device structure to match against
  1200. * @drv: the device driver to search for matching PCI device id structures
  1201. *
  1202. * Used by a driver to check whether a PCI device present in the
  1203. * system is in its list of supported devices. Returns the matching
  1204. * pci_device_id structure or %NULL if there is no match.
  1205. */
  1206. static int pci_bus_match(struct device *dev, struct device_driver *drv)
  1207. {
  1208. struct pci_dev *pci_dev = to_pci_dev(dev);
  1209. struct pci_driver *pci_drv;
  1210. const struct pci_device_id *found_id;
  1211. if (!pci_dev->match_driver)
  1212. return 0;
  1213. pci_drv = to_pci_driver(drv);
  1214. found_id = pci_match_device(pci_drv, pci_dev);
  1215. if (found_id)
  1216. return 1;
  1217. return 0;
  1218. }
  1219. /**
  1220. * pci_dev_get - increments the reference count of the pci device structure
  1221. * @dev: the device being referenced
  1222. *
  1223. * Each live reference to a device should be refcounted.
  1224. *
  1225. * Drivers for PCI devices should normally record such references in
  1226. * their probe() methods, when they bind to a device, and release
  1227. * them by calling pci_dev_put(), in their disconnect() methods.
  1228. *
  1229. * A pointer to the device with the incremented reference counter is returned.
  1230. */
  1231. struct pci_dev *pci_dev_get(struct pci_dev *dev)
  1232. {
  1233. if (dev)
  1234. get_device(&dev->dev);
  1235. return dev;
  1236. }
  1237. EXPORT_SYMBOL(pci_dev_get);
  1238. /**
  1239. * pci_dev_put - release a use of the pci device structure
  1240. * @dev: device that's been disconnected
  1241. *
  1242. * Must be called when a user of a device is finished with it. When the last
  1243. * user of the device calls this function, the memory of the device is freed.
  1244. */
  1245. void pci_dev_put(struct pci_dev *dev)
  1246. {
  1247. if (dev)
  1248. put_device(&dev->dev);
  1249. }
  1250. EXPORT_SYMBOL(pci_dev_put);
  1251. static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
  1252. {
  1253. struct pci_dev *pdev;
  1254. if (!dev)
  1255. return -ENODEV;
  1256. pdev = to_pci_dev(dev);
  1257. if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class))
  1258. return -ENOMEM;
  1259. if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device))
  1260. return -ENOMEM;
  1261. if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor,
  1262. pdev->subsystem_device))
  1263. return -ENOMEM;
  1264. if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
  1265. return -ENOMEM;
  1266. if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
  1267. pdev->vendor, pdev->device,
  1268. pdev->subsystem_vendor, pdev->subsystem_device,
  1269. (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
  1270. (u8)(pdev->class)))
  1271. return -ENOMEM;
  1272. return 0;
  1273. }
  1274. #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
  1275. /**
  1276. * pci_uevent_ers - emit a uevent during recovery path of PCI device
  1277. * @pdev: PCI device undergoing error recovery
  1278. * @err_type: type of error event
  1279. */
  1280. void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type)
  1281. {
  1282. int idx = 0;
  1283. char *envp[3];
  1284. switch (err_type) {
  1285. case PCI_ERS_RESULT_NONE:
  1286. case PCI_ERS_RESULT_CAN_RECOVER:
  1287. envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
  1288. envp[idx++] = "DEVICE_ONLINE=0";
  1289. break;
  1290. case PCI_ERS_RESULT_RECOVERED:
  1291. envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY";
  1292. envp[idx++] = "DEVICE_ONLINE=1";
  1293. break;
  1294. case PCI_ERS_RESULT_DISCONNECT:
  1295. envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY";
  1296. envp[idx++] = "DEVICE_ONLINE=0";
  1297. break;
  1298. default:
  1299. break;
  1300. }
  1301. if (idx > 0) {
  1302. envp[idx++] = NULL;
  1303. kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp);
  1304. }
  1305. }
  1306. #endif
  1307. static int pci_bus_num_vf(struct device *dev)
  1308. {
  1309. return pci_num_vf(to_pci_dev(dev));
  1310. }
  1311. /**
  1312. * pci_dma_configure - Setup DMA configuration
  1313. * @dev: ptr to dev structure
  1314. *
  1315. * Function to update PCI devices's DMA configuration using the same
  1316. * info from the OF node or ACPI node of host bridge's parent (if any).
  1317. */
  1318. static int pci_dma_configure(struct device *dev)
  1319. {
  1320. struct device *bridge;
  1321. int ret = 0;
  1322. bridge = pci_get_host_bridge_device(to_pci_dev(dev));
  1323. if (IS_ENABLED(CONFIG_OF) && bridge->parent &&
  1324. bridge->parent->of_node) {
  1325. ret = of_dma_configure(dev, bridge->parent->of_node, true);
  1326. } else if (has_acpi_companion(bridge)) {
  1327. struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
  1328. enum dev_dma_attr attr = acpi_get_dma_attr(adev);
  1329. if (attr != DEV_DMA_NOT_SUPPORTED)
  1330. ret = acpi_dma_configure(dev, attr);
  1331. }
  1332. pci_put_host_bridge_device(bridge);
  1333. return ret;
  1334. }
  1335. struct bus_type pci_bus_type = {
  1336. .name = "pci",
  1337. .match = pci_bus_match,
  1338. .uevent = pci_uevent,
  1339. .probe = pci_device_probe,
  1340. .remove = pci_device_remove,
  1341. .shutdown = pci_device_shutdown,
  1342. .dev_groups = pci_dev_groups,
  1343. .bus_groups = pci_bus_groups,
  1344. .drv_groups = pci_drv_groups,
  1345. .pm = PCI_PM_OPS_PTR,
  1346. .num_vf = pci_bus_num_vf,
  1347. .dma_configure = pci_dma_configure,
  1348. };
  1349. EXPORT_SYMBOL(pci_bus_type);
  1350. #ifdef CONFIG_PCIEPORTBUS
  1351. static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
  1352. {
  1353. struct pcie_device *pciedev;
  1354. struct pcie_port_service_driver *driver;
  1355. if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
  1356. return 0;
  1357. pciedev = to_pcie_device(dev);
  1358. driver = to_service_driver(drv);
  1359. if (driver->service != pciedev->service)
  1360. return 0;
  1361. if (driver->port_type != PCIE_ANY_PORT &&
  1362. driver->port_type != pci_pcie_type(pciedev->port))
  1363. return 0;
  1364. return 1;
  1365. }
  1366. struct bus_type pcie_port_bus_type = {
  1367. .name = "pci_express",
  1368. .match = pcie_port_bus_match,
  1369. };
  1370. EXPORT_SYMBOL_GPL(pcie_port_bus_type);
  1371. #endif
  1372. static int __init pci_driver_init(void)
  1373. {
  1374. int ret;
  1375. ret = bus_register(&pci_bus_type);
  1376. if (ret)
  1377. return ret;
  1378. #ifdef CONFIG_PCIEPORTBUS
  1379. ret = bus_register(&pcie_port_bus_type);
  1380. if (ret)
  1381. return ret;
  1382. #endif
  1383. dma_debug_add_bus(&pci_bus_type);
  1384. return 0;
  1385. }
  1386. postcore_initcall(pci_driver_init);