pci.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2009, Intel Corporation.
  4. *
  5. * Author: Weidong Han <weidong.han@intel.com>
  6. */
  7. #include <linux/pci.h>
  8. #include <linux/acpi.h>
  9. #include <linux/pci-acpi.h>
  10. #include <xen/pci.h>
  11. #include <xen/xen.h>
  12. #include <xen/interface/physdev.h>
  13. #include <xen/interface/xen.h>
  14. #include <asm/xen/hypervisor.h>
  15. #include <asm/xen/hypercall.h>
  16. #include "../pci/pci.h"
  17. #ifdef CONFIG_PCI_MMCONFIG
  18. #include <asm/pci_x86.h>
  19. static int xen_mcfg_late(void);
  20. #endif
  21. static bool __read_mostly pci_seg_supported = true;
  22. static int xen_add_device(struct device *dev)
  23. {
  24. int r;
  25. struct pci_dev *pci_dev = to_pci_dev(dev);
  26. #ifdef CONFIG_PCI_IOV
  27. struct pci_dev *physfn = pci_dev->physfn;
  28. #endif
  29. #ifdef CONFIG_PCI_MMCONFIG
  30. static bool pci_mcfg_reserved = false;
  31. /*
  32. * Reserve MCFG areas in Xen on first invocation due to this being
  33. * potentially called from inside of acpi_init immediately after
  34. * MCFG table has been finally parsed.
  35. */
  36. if (!pci_mcfg_reserved) {
  37. xen_mcfg_late();
  38. pci_mcfg_reserved = true;
  39. }
  40. #endif
  41. if (pci_domain_nr(pci_dev->bus) >> 16) {
  42. /*
  43. * The hypercall interface is limited to 16bit PCI segment
  44. * values, do not attempt to register devices with Xen in
  45. * segments greater or equal than 0x10000.
  46. */
  47. dev_info(dev,
  48. "not registering with Xen: invalid PCI segment\n");
  49. return 0;
  50. }
  51. if (pci_seg_supported) {
  52. DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
  53. add->seg = pci_domain_nr(pci_dev->bus);
  54. add->bus = pci_dev->bus->number;
  55. add->devfn = pci_dev->devfn;
  56. #ifdef CONFIG_ACPI
  57. acpi_handle handle;
  58. #endif
  59. #ifdef CONFIG_PCI_IOV
  60. if (pci_dev->is_virtfn) {
  61. add->flags = XEN_PCI_DEV_VIRTFN;
  62. add->physfn.bus = physfn->bus->number;
  63. add->physfn.devfn = physfn->devfn;
  64. } else
  65. #endif
  66. if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
  67. add->flags = XEN_PCI_DEV_EXTFN;
  68. #ifdef CONFIG_ACPI
  69. handle = ACPI_HANDLE(&pci_dev->dev);
  70. #ifdef CONFIG_PCI_IOV
  71. if (!handle && pci_dev->is_virtfn)
  72. handle = ACPI_HANDLE(physfn->bus->bridge);
  73. #endif
  74. if (!handle) {
  75. /*
  76. * This device was not listed in the ACPI name space at
  77. * all. Try to get acpi handle of parent pci bus.
  78. */
  79. struct pci_bus *pbus;
  80. for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) {
  81. handle = acpi_pci_get_bridge_handle(pbus);
  82. if (handle)
  83. break;
  84. }
  85. }
  86. if (handle) {
  87. acpi_status status;
  88. do {
  89. unsigned long long pxm;
  90. status = acpi_evaluate_integer(handle, "_PXM",
  91. NULL, &pxm);
  92. if (ACPI_SUCCESS(status)) {
  93. add->optarr[0] = pxm;
  94. add->flags |= XEN_PCI_DEV_PXM;
  95. break;
  96. }
  97. status = acpi_get_parent(handle, &handle);
  98. } while (ACPI_SUCCESS(status));
  99. }
  100. #endif /* CONFIG_ACPI */
  101. r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
  102. if (r != -ENOSYS)
  103. return r;
  104. pci_seg_supported = false;
  105. }
  106. if (pci_domain_nr(pci_dev->bus))
  107. r = -ENOSYS;
  108. #ifdef CONFIG_PCI_IOV
  109. else if (pci_dev->is_virtfn) {
  110. struct physdev_manage_pci_ext manage_pci_ext = {
  111. .bus = pci_dev->bus->number,
  112. .devfn = pci_dev->devfn,
  113. .is_virtfn = 1,
  114. .physfn.bus = physfn->bus->number,
  115. .physfn.devfn = physfn->devfn,
  116. };
  117. r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
  118. &manage_pci_ext);
  119. }
  120. #endif
  121. else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
  122. struct physdev_manage_pci_ext manage_pci_ext = {
  123. .bus = pci_dev->bus->number,
  124. .devfn = pci_dev->devfn,
  125. .is_extfn = 1,
  126. };
  127. r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
  128. &manage_pci_ext);
  129. } else {
  130. struct physdev_manage_pci manage_pci = {
  131. .bus = pci_dev->bus->number,
  132. .devfn = pci_dev->devfn,
  133. };
  134. r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
  135. &manage_pci);
  136. }
  137. return r;
  138. }
  139. static int xen_remove_device(struct device *dev)
  140. {
  141. int r;
  142. struct pci_dev *pci_dev = to_pci_dev(dev);
  143. if (pci_domain_nr(pci_dev->bus) >> 16) {
  144. /*
  145. * The hypercall interface is limited to 16bit PCI segment
  146. * values.
  147. */
  148. dev_info(dev,
  149. "not unregistering with Xen: invalid PCI segment\n");
  150. return 0;
  151. }
  152. if (pci_seg_supported) {
  153. struct physdev_pci_device device = {
  154. .seg = pci_domain_nr(pci_dev->bus),
  155. .bus = pci_dev->bus->number,
  156. .devfn = pci_dev->devfn
  157. };
  158. r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
  159. &device);
  160. } else if (pci_domain_nr(pci_dev->bus))
  161. r = -ENOSYS;
  162. else {
  163. struct physdev_manage_pci manage_pci = {
  164. .bus = pci_dev->bus->number,
  165. .devfn = pci_dev->devfn
  166. };
  167. r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
  168. &manage_pci);
  169. }
  170. return r;
  171. }
  172. int xen_reset_device(const struct pci_dev *dev)
  173. {
  174. struct pci_device_reset device = {
  175. .dev.seg = pci_domain_nr(dev->bus),
  176. .dev.bus = dev->bus->number,
  177. .dev.devfn = dev->devfn,
  178. .flags = PCI_DEVICE_RESET_FLR,
  179. };
  180. if (pci_domain_nr(dev->bus) >> 16) {
  181. /*
  182. * The hypercall interface is limited to 16bit PCI segment
  183. * values.
  184. */
  185. dev_info(&dev->dev,
  186. "unable to notify Xen of device reset: invalid PCI segment\n");
  187. return 0;
  188. }
  189. return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device);
  190. }
  191. EXPORT_SYMBOL_GPL(xen_reset_device);
  192. static int xen_pci_notifier(struct notifier_block *nb,
  193. unsigned long action, void *data)
  194. {
  195. struct device *dev = data;
  196. int r = 0;
  197. switch (action) {
  198. case BUS_NOTIFY_ADD_DEVICE:
  199. r = xen_add_device(dev);
  200. break;
  201. case BUS_NOTIFY_DEL_DEVICE:
  202. r = xen_remove_device(dev);
  203. break;
  204. default:
  205. return NOTIFY_DONE;
  206. }
  207. if (r)
  208. dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
  209. action == BUS_NOTIFY_ADD_DEVICE ? "add" :
  210. (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
  211. return NOTIFY_OK;
  212. }
  213. static struct notifier_block device_nb = {
  214. .notifier_call = xen_pci_notifier,
  215. };
  216. static int __init register_xen_pci_notifier(void)
  217. {
  218. if (!xen_initial_domain())
  219. return 0;
  220. return bus_register_notifier(&pci_bus_type, &device_nb);
  221. }
  222. arch_initcall(register_xen_pci_notifier);
  223. #ifdef CONFIG_PCI_MMCONFIG
  224. static int xen_mcfg_late(void)
  225. {
  226. struct pci_mmcfg_region *cfg;
  227. int rc;
  228. if (!xen_initial_domain())
  229. return 0;
  230. if ((pci_probe & PCI_PROBE_MMCONF) == 0)
  231. return 0;
  232. if (list_empty(&pci_mmcfg_list))
  233. return 0;
  234. /* Check whether they are in the right area. */
  235. list_for_each_entry(cfg, &pci_mmcfg_list, list) {
  236. struct physdev_pci_mmcfg_reserved r;
  237. r.address = cfg->address;
  238. r.segment = cfg->segment;
  239. r.start_bus = cfg->start_bus;
  240. r.end_bus = cfg->end_bus;
  241. r.flags = XEN_PCI_MMCFG_RESERVED;
  242. rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
  243. switch (rc) {
  244. case 0:
  245. case -ENOSYS:
  246. continue;
  247. default:
  248. pr_warn("Failed to report MMCONFIG reservation"
  249. " state for %s to hypervisor"
  250. " (%d)\n",
  251. cfg->name, rc);
  252. }
  253. }
  254. return 0;
  255. }
  256. #endif
  257. #ifdef CONFIG_XEN_DOM0
  258. struct xen_device_domain_owner {
  259. domid_t domain;
  260. struct pci_dev *dev;
  261. struct list_head list;
  262. };
  263. static DEFINE_SPINLOCK(dev_domain_list_spinlock);
  264. static LIST_HEAD(dev_domain_list);
  265. static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
  266. {
  267. struct xen_device_domain_owner *owner;
  268. list_for_each_entry(owner, &dev_domain_list, list) {
  269. if (owner->dev == dev)
  270. return owner;
  271. }
  272. return NULL;
  273. }
  274. int xen_find_device_domain_owner(struct pci_dev *dev)
  275. {
  276. struct xen_device_domain_owner *owner;
  277. int domain = -ENODEV;
  278. spin_lock(&dev_domain_list_spinlock);
  279. owner = find_device(dev);
  280. if (owner)
  281. domain = owner->domain;
  282. spin_unlock(&dev_domain_list_spinlock);
  283. return domain;
  284. }
  285. EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
  286. int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
  287. {
  288. struct xen_device_domain_owner *owner;
  289. owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
  290. if (!owner)
  291. return -ENODEV;
  292. spin_lock(&dev_domain_list_spinlock);
  293. if (find_device(dev)) {
  294. spin_unlock(&dev_domain_list_spinlock);
  295. kfree(owner);
  296. return -EEXIST;
  297. }
  298. owner->domain = domain;
  299. owner->dev = dev;
  300. list_add_tail(&owner->list, &dev_domain_list);
  301. spin_unlock(&dev_domain_list_spinlock);
  302. return 0;
  303. }
  304. EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
  305. int xen_unregister_device_domain_owner(struct pci_dev *dev)
  306. {
  307. struct xen_device_domain_owner *owner;
  308. spin_lock(&dev_domain_list_spinlock);
  309. owner = find_device(dev);
  310. if (!owner) {
  311. spin_unlock(&dev_domain_list_spinlock);
  312. return -ENODEV;
  313. }
  314. list_del(&owner->list);
  315. spin_unlock(&dev_domain_list_spinlock);
  316. kfree(owner);
  317. return 0;
  318. }
  319. EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
  320. #endif