pci.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2009, Intel Corporation.
  4. *
  5. * Author: Weidong Han <weidong.han@intel.com>
  6. */
  7. #include <linux/pci.h>
  8. #include <linux/acpi.h>
  9. #include <linux/pci-acpi.h>
  10. #include <xen/pci.h>
  11. #include <xen/xen.h>
  12. #include <xen/interface/physdev.h>
  13. #include <xen/interface/xen.h>
  14. #include <asm/xen/hypervisor.h>
  15. #include <asm/xen/hypercall.h>
  16. #include "../pci/pci.h"
  17. #ifdef CONFIG_PCI_MMCONFIG
  18. #include <asm/pci_x86.h>
  19. static int xen_mcfg_late(void);
  20. #endif
  21. static bool __read_mostly pci_seg_supported = true;
  22. static int xen_add_device(struct device *dev)
  23. {
  24. int r;
  25. struct pci_dev *pci_dev = to_pci_dev(dev);
  26. #ifdef CONFIG_PCI_IOV
  27. struct pci_dev *physfn = pci_dev->physfn;
  28. #endif
  29. #ifdef CONFIG_PCI_MMCONFIG
  30. static bool pci_mcfg_reserved = false;
  31. /*
  32. * Reserve MCFG areas in Xen on first invocation due to this being
  33. * potentially called from inside of acpi_init immediately after
  34. * MCFG table has been finally parsed.
  35. */
  36. if (!pci_mcfg_reserved) {
  37. xen_mcfg_late();
  38. pci_mcfg_reserved = true;
  39. }
  40. #endif
  41. if (pci_seg_supported) {
  42. DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
  43. add->seg = pci_domain_nr(pci_dev->bus);
  44. add->bus = pci_dev->bus->number;
  45. add->devfn = pci_dev->devfn;
  46. #ifdef CONFIG_ACPI
  47. acpi_handle handle;
  48. #endif
  49. #ifdef CONFIG_PCI_IOV
  50. if (pci_dev->is_virtfn) {
  51. add->flags = XEN_PCI_DEV_VIRTFN;
  52. add->physfn.bus = physfn->bus->number;
  53. add->physfn.devfn = physfn->devfn;
  54. } else
  55. #endif
  56. if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
  57. add->flags = XEN_PCI_DEV_EXTFN;
  58. #ifdef CONFIG_ACPI
  59. handle = ACPI_HANDLE(&pci_dev->dev);
  60. #ifdef CONFIG_PCI_IOV
  61. if (!handle && pci_dev->is_virtfn)
  62. handle = ACPI_HANDLE(physfn->bus->bridge);
  63. #endif
  64. if (!handle) {
  65. /*
  66. * This device was not listed in the ACPI name space at
  67. * all. Try to get acpi handle of parent pci bus.
  68. */
  69. struct pci_bus *pbus;
  70. for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) {
  71. handle = acpi_pci_get_bridge_handle(pbus);
  72. if (handle)
  73. break;
  74. }
  75. }
  76. if (handle) {
  77. acpi_status status;
  78. do {
  79. unsigned long long pxm;
  80. status = acpi_evaluate_integer(handle, "_PXM",
  81. NULL, &pxm);
  82. if (ACPI_SUCCESS(status)) {
  83. add->optarr[0] = pxm;
  84. add->flags |= XEN_PCI_DEV_PXM;
  85. break;
  86. }
  87. status = acpi_get_parent(handle, &handle);
  88. } while (ACPI_SUCCESS(status));
  89. }
  90. #endif /* CONFIG_ACPI */
  91. r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
  92. if (r != -ENOSYS)
  93. return r;
  94. pci_seg_supported = false;
  95. }
  96. if (pci_domain_nr(pci_dev->bus))
  97. r = -ENOSYS;
  98. #ifdef CONFIG_PCI_IOV
  99. else if (pci_dev->is_virtfn) {
  100. struct physdev_manage_pci_ext manage_pci_ext = {
  101. .bus = pci_dev->bus->number,
  102. .devfn = pci_dev->devfn,
  103. .is_virtfn = 1,
  104. .physfn.bus = physfn->bus->number,
  105. .physfn.devfn = physfn->devfn,
  106. };
  107. r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
  108. &manage_pci_ext);
  109. }
  110. #endif
  111. else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
  112. struct physdev_manage_pci_ext manage_pci_ext = {
  113. .bus = pci_dev->bus->number,
  114. .devfn = pci_dev->devfn,
  115. .is_extfn = 1,
  116. };
  117. r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
  118. &manage_pci_ext);
  119. } else {
  120. struct physdev_manage_pci manage_pci = {
  121. .bus = pci_dev->bus->number,
  122. .devfn = pci_dev->devfn,
  123. };
  124. r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add,
  125. &manage_pci);
  126. }
  127. return r;
  128. }
  129. static int xen_remove_device(struct device *dev)
  130. {
  131. int r;
  132. struct pci_dev *pci_dev = to_pci_dev(dev);
  133. if (pci_seg_supported) {
  134. struct physdev_pci_device device = {
  135. .seg = pci_domain_nr(pci_dev->bus),
  136. .bus = pci_dev->bus->number,
  137. .devfn = pci_dev->devfn
  138. };
  139. r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
  140. &device);
  141. } else if (pci_domain_nr(pci_dev->bus))
  142. r = -ENOSYS;
  143. else {
  144. struct physdev_manage_pci manage_pci = {
  145. .bus = pci_dev->bus->number,
  146. .devfn = pci_dev->devfn
  147. };
  148. r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
  149. &manage_pci);
  150. }
  151. return r;
  152. }
  153. int xen_reset_device(const struct pci_dev *dev)
  154. {
  155. struct pci_device_reset device = {
  156. .dev.seg = pci_domain_nr(dev->bus),
  157. .dev.bus = dev->bus->number,
  158. .dev.devfn = dev->devfn,
  159. .flags = PCI_DEVICE_RESET_FLR,
  160. };
  161. return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device);
  162. }
  163. EXPORT_SYMBOL_GPL(xen_reset_device);
  164. static int xen_pci_notifier(struct notifier_block *nb,
  165. unsigned long action, void *data)
  166. {
  167. struct device *dev = data;
  168. int r = 0;
  169. switch (action) {
  170. case BUS_NOTIFY_ADD_DEVICE:
  171. r = xen_add_device(dev);
  172. break;
  173. case BUS_NOTIFY_DEL_DEVICE:
  174. r = xen_remove_device(dev);
  175. break;
  176. default:
  177. return NOTIFY_DONE;
  178. }
  179. if (r)
  180. dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
  181. action == BUS_NOTIFY_ADD_DEVICE ? "add" :
  182. (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
  183. return NOTIFY_OK;
  184. }
  185. static struct notifier_block device_nb = {
  186. .notifier_call = xen_pci_notifier,
  187. };
  188. static int __init register_xen_pci_notifier(void)
  189. {
  190. if (!xen_initial_domain())
  191. return 0;
  192. return bus_register_notifier(&pci_bus_type, &device_nb);
  193. }
  194. arch_initcall(register_xen_pci_notifier);
  195. #ifdef CONFIG_PCI_MMCONFIG
  196. static int xen_mcfg_late(void)
  197. {
  198. struct pci_mmcfg_region *cfg;
  199. int rc;
  200. if (!xen_initial_domain())
  201. return 0;
  202. if ((pci_probe & PCI_PROBE_MMCONF) == 0)
  203. return 0;
  204. if (list_empty(&pci_mmcfg_list))
  205. return 0;
  206. /* Check whether they are in the right area. */
  207. list_for_each_entry(cfg, &pci_mmcfg_list, list) {
  208. struct physdev_pci_mmcfg_reserved r;
  209. r.address = cfg->address;
  210. r.segment = cfg->segment;
  211. r.start_bus = cfg->start_bus;
  212. r.end_bus = cfg->end_bus;
  213. r.flags = XEN_PCI_MMCFG_RESERVED;
  214. rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
  215. switch (rc) {
  216. case 0:
  217. case -ENOSYS:
  218. continue;
  219. default:
  220. pr_warn("Failed to report MMCONFIG reservation"
  221. " state for %s to hypervisor"
  222. " (%d)\n",
  223. cfg->name, rc);
  224. }
  225. }
  226. return 0;
  227. }
  228. #endif
  229. #ifdef CONFIG_XEN_DOM0
  230. struct xen_device_domain_owner {
  231. domid_t domain;
  232. struct pci_dev *dev;
  233. struct list_head list;
  234. };
  235. static DEFINE_SPINLOCK(dev_domain_list_spinlock);
  236. static LIST_HEAD(dev_domain_list);
  237. static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
  238. {
  239. struct xen_device_domain_owner *owner;
  240. list_for_each_entry(owner, &dev_domain_list, list) {
  241. if (owner->dev == dev)
  242. return owner;
  243. }
  244. return NULL;
  245. }
  246. int xen_find_device_domain_owner(struct pci_dev *dev)
  247. {
  248. struct xen_device_domain_owner *owner;
  249. int domain = -ENODEV;
  250. spin_lock(&dev_domain_list_spinlock);
  251. owner = find_device(dev);
  252. if (owner)
  253. domain = owner->domain;
  254. spin_unlock(&dev_domain_list_spinlock);
  255. return domain;
  256. }
  257. EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
  258. int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
  259. {
  260. struct xen_device_domain_owner *owner;
  261. owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
  262. if (!owner)
  263. return -ENODEV;
  264. spin_lock(&dev_domain_list_spinlock);
  265. if (find_device(dev)) {
  266. spin_unlock(&dev_domain_list_spinlock);
  267. kfree(owner);
  268. return -EEXIST;
  269. }
  270. owner->domain = domain;
  271. owner->dev = dev;
  272. list_add_tail(&owner->list, &dev_domain_list);
  273. spin_unlock(&dev_domain_list_spinlock);
  274. return 0;
  275. }
  276. EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
  277. int xen_unregister_device_domain_owner(struct pci_dev *dev)
  278. {
  279. struct xen_device_domain_owner *owner;
  280. spin_lock(&dev_domain_list_spinlock);
  281. owner = find_device(dev);
  282. if (!owner) {
  283. spin_unlock(&dev_domain_list_spinlock);
  284. return -ENODEV;
  285. }
  286. list_del(&owner->list);
  287. spin_unlock(&dev_domain_list_spinlock);
  288. kfree(owner);
  289. return 0;
  290. }
  291. EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
  292. #endif