iov.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCI Express I/O Virtualization (IOV) support
  4. * Single Root IOV 1.0
  5. * Address Translation Service 1.0
  6. *
  7. * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
  8. */
  9. #include <linux/pci.h>
  10. #include <linux/slab.h>
  11. #include <linux/export.h>
  12. #include <linux/string.h>
  13. #include <linux/delay.h>
  14. #include "pci.h"
  15. #define VIRTFN_ID_LEN 17 /* "virtfn%u\0" for 2^32 - 1 */
  16. int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id)
  17. {
  18. if (!dev->is_physfn)
  19. return -EINVAL;
  20. return dev->bus->number + ((dev->devfn + dev->sriov->offset +
  21. dev->sriov->stride * vf_id) >> 8);
  22. }
  23. int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id)
  24. {
  25. if (!dev->is_physfn)
  26. return -EINVAL;
  27. return (dev->devfn + dev->sriov->offset +
  28. dev->sriov->stride * vf_id) & 0xff;
  29. }
  30. EXPORT_SYMBOL_GPL(pci_iov_virtfn_devfn);
  31. int pci_iov_vf_id(struct pci_dev *dev)
  32. {
  33. struct pci_dev *pf;
  34. if (!dev->is_virtfn)
  35. return -EINVAL;
  36. pf = pci_physfn(dev);
  37. return (pci_dev_id(dev) - (pci_dev_id(pf) + pf->sriov->offset)) /
  38. pf->sriov->stride;
  39. }
  40. EXPORT_SYMBOL_GPL(pci_iov_vf_id);
  41. /**
  42. * pci_iov_get_pf_drvdata - Return the drvdata of a PF
  43. * @dev: VF pci_dev
  44. * @pf_driver: Device driver required to own the PF
  45. *
  46. * This must be called from a context that ensures that a VF driver is attached.
  47. * The value returned is invalid once the VF driver completes its remove()
  48. * callback.
  49. *
  50. * Locking is achieved by the driver core. A VF driver cannot be probed until
  51. * pci_enable_sriov() is called and pci_disable_sriov() does not return until
  52. * all VF drivers have completed their remove().
  53. *
  54. * The PF driver must call pci_disable_sriov() before it begins to destroy the
  55. * drvdata.
  56. */
  57. void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver)
  58. {
  59. struct pci_dev *pf_dev;
  60. if (!dev->is_virtfn)
  61. return ERR_PTR(-EINVAL);
  62. pf_dev = dev->physfn;
  63. if (pf_dev->driver != pf_driver)
  64. return ERR_PTR(-EINVAL);
  65. return pci_get_drvdata(pf_dev);
  66. }
  67. EXPORT_SYMBOL_GPL(pci_iov_get_pf_drvdata);
  68. /*
  69. * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may
  70. * change when NumVFs changes.
  71. *
  72. * Update iov->offset and iov->stride when NumVFs is written.
  73. */
  74. static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn)
  75. {
  76. struct pci_sriov *iov = dev->sriov;
  77. pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
  78. pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
  79. pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
  80. }
  81. /*
  82. * The PF consumes one bus number. NumVFs, First VF Offset, and VF Stride
  83. * determine how many additional bus numbers will be consumed by VFs.
  84. *
  85. * Iterate over all valid NumVFs, validate offset and stride, and calculate
  86. * the maximum number of bus numbers that could ever be required.
  87. */
  88. static int compute_max_vf_buses(struct pci_dev *dev)
  89. {
  90. struct pci_sriov *iov = dev->sriov;
  91. int nr_virtfn, busnr, rc = 0;
  92. for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) {
  93. pci_iov_set_numvfs(dev, nr_virtfn);
  94. if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) {
  95. rc = -EIO;
  96. goto out;
  97. }
  98. busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
  99. if (busnr > iov->max_VF_buses)
  100. iov->max_VF_buses = busnr;
  101. }
  102. out:
  103. pci_iov_set_numvfs(dev, 0);
  104. return rc;
  105. }
  106. static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
  107. {
  108. struct pci_bus *child;
  109. if (bus->number == busnr)
  110. return bus;
  111. child = pci_find_bus(pci_domain_nr(bus), busnr);
  112. if (child)
  113. return child;
  114. child = pci_add_new_bus(bus, NULL, busnr);
  115. if (!child)
  116. return NULL;
  117. pci_bus_insert_busn_res(child, busnr, busnr);
  118. return child;
  119. }
  120. static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus)
  121. {
  122. if (physbus != virtbus && list_empty(&virtbus->devices))
  123. pci_remove_bus(virtbus);
  124. }
  125. resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
  126. {
  127. if (!dev->is_physfn)
  128. return 0;
  129. return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
  130. }
  131. static void pci_read_vf_config_common(struct pci_dev *virtfn)
  132. {
  133. struct pci_dev *physfn = virtfn->physfn;
  134. /*
  135. * Some config registers are the same across all associated VFs.
  136. * Read them once from VF0 so we can skip reading them from the
  137. * other VFs.
  138. *
  139. * PCIe r4.0, sec 9.3.4.1, technically doesn't require all VFs to
  140. * have the same Revision ID and Subsystem ID, but we assume they
  141. * do.
  142. */
  143. pci_read_config_dword(virtfn, PCI_CLASS_REVISION,
  144. &physfn->sriov->class);
  145. pci_read_config_byte(virtfn, PCI_HEADER_TYPE,
  146. &physfn->sriov->hdr_type);
  147. pci_read_config_word(virtfn, PCI_SUBSYSTEM_VENDOR_ID,
  148. &physfn->sriov->subsystem_vendor);
  149. pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID,
  150. &physfn->sriov->subsystem_device);
  151. }
  152. int pci_iov_sysfs_link(struct pci_dev *dev,
  153. struct pci_dev *virtfn, int id)
  154. {
  155. char buf[VIRTFN_ID_LEN];
  156. int rc;
  157. sprintf(buf, "virtfn%u", id);
  158. rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
  159. if (rc)
  160. goto failed;
  161. rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
  162. if (rc)
  163. goto failed1;
  164. kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
  165. return 0;
  166. failed1:
  167. sysfs_remove_link(&dev->dev.kobj, buf);
  168. failed:
  169. return rc;
  170. }
  171. #ifdef CONFIG_PCI_MSI
  172. static ssize_t sriov_vf_total_msix_show(struct device *dev,
  173. struct device_attribute *attr,
  174. char *buf)
  175. {
  176. struct pci_dev *pdev = to_pci_dev(dev);
  177. u32 vf_total_msix = 0;
  178. device_lock(dev);
  179. if (!pdev->driver || !pdev->driver->sriov_get_vf_total_msix)
  180. goto unlock;
  181. vf_total_msix = pdev->driver->sriov_get_vf_total_msix(pdev);
  182. unlock:
  183. device_unlock(dev);
  184. return sysfs_emit(buf, "%u\n", vf_total_msix);
  185. }
  186. static DEVICE_ATTR_RO(sriov_vf_total_msix);
  187. static ssize_t sriov_vf_msix_count_store(struct device *dev,
  188. struct device_attribute *attr,
  189. const char *buf, size_t count)
  190. {
  191. struct pci_dev *vf_dev = to_pci_dev(dev);
  192. struct pci_dev *pdev = pci_physfn(vf_dev);
  193. int val, ret = 0;
  194. if (kstrtoint(buf, 0, &val) < 0)
  195. return -EINVAL;
  196. if (val < 0)
  197. return -EINVAL;
  198. device_lock(&pdev->dev);
  199. if (!pdev->driver || !pdev->driver->sriov_set_msix_vec_count) {
  200. ret = -EOPNOTSUPP;
  201. goto err_pdev;
  202. }
  203. device_lock(&vf_dev->dev);
  204. if (vf_dev->driver) {
  205. /*
  206. * A driver is already attached to this VF and has configured
  207. * itself based on the current MSI-X vector count. Changing
  208. * the vector size could mess up the driver, so block it.
  209. */
  210. ret = -EBUSY;
  211. goto err_dev;
  212. }
  213. ret = pdev->driver->sriov_set_msix_vec_count(vf_dev, val);
  214. err_dev:
  215. device_unlock(&vf_dev->dev);
  216. err_pdev:
  217. device_unlock(&pdev->dev);
  218. return ret ? : count;
  219. }
  220. static DEVICE_ATTR_WO(sriov_vf_msix_count);
  221. #endif
  222. static struct attribute *sriov_vf_dev_attrs[] = {
  223. #ifdef CONFIG_PCI_MSI
  224. &dev_attr_sriov_vf_msix_count.attr,
  225. #endif
  226. NULL,
  227. };
  228. static umode_t sriov_vf_attrs_are_visible(struct kobject *kobj,
  229. struct attribute *a, int n)
  230. {
  231. struct device *dev = kobj_to_dev(kobj);
  232. struct pci_dev *pdev = to_pci_dev(dev);
  233. if (!pdev->is_virtfn)
  234. return 0;
  235. return a->mode;
  236. }
  237. const struct attribute_group sriov_vf_dev_attr_group = {
  238. .attrs = sriov_vf_dev_attrs,
  239. .is_visible = sriov_vf_attrs_are_visible,
  240. };
  241. int pci_iov_add_virtfn(struct pci_dev *dev, int id)
  242. {
  243. int i;
  244. int rc = -ENOMEM;
  245. u64 size;
  246. struct pci_dev *virtfn;
  247. struct resource *res;
  248. struct pci_sriov *iov = dev->sriov;
  249. struct pci_bus *bus;
  250. bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
  251. if (!bus)
  252. goto failed;
  253. virtfn = pci_alloc_dev(bus);
  254. if (!virtfn)
  255. goto failed0;
  256. virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
  257. virtfn->vendor = dev->vendor;
  258. virtfn->device = iov->vf_device;
  259. virtfn->is_virtfn = 1;
  260. virtfn->physfn = pci_dev_get(dev);
  261. virtfn->no_command_memory = 1;
  262. if (id == 0)
  263. pci_read_vf_config_common(virtfn);
  264. rc = pci_setup_device(virtfn);
  265. if (rc)
  266. goto failed1;
  267. virtfn->dev.parent = dev->dev.parent;
  268. virtfn->multifunction = 0;
  269. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  270. res = &dev->resource[i + PCI_IOV_RESOURCES];
  271. if (!res->parent)
  272. continue;
  273. virtfn->resource[i].name = pci_name(virtfn);
  274. virtfn->resource[i].flags = res->flags;
  275. size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
  276. virtfn->resource[i].start = res->start + size * id;
  277. virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
  278. rc = request_resource(res, &virtfn->resource[i]);
  279. BUG_ON(rc);
  280. }
  281. pci_device_add(virtfn, virtfn->bus);
  282. rc = pci_iov_sysfs_link(dev, virtfn, id);
  283. if (rc)
  284. goto failed1;
  285. pci_bus_add_device(virtfn);
  286. return 0;
  287. failed1:
  288. pci_stop_and_remove_bus_device(virtfn);
  289. pci_dev_put(dev);
  290. failed0:
  291. virtfn_remove_bus(dev->bus, bus);
  292. failed:
  293. return rc;
  294. }
  295. void pci_iov_remove_virtfn(struct pci_dev *dev, int id)
  296. {
  297. char buf[VIRTFN_ID_LEN];
  298. struct pci_dev *virtfn;
  299. virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
  300. pci_iov_virtfn_bus(dev, id),
  301. pci_iov_virtfn_devfn(dev, id));
  302. if (!virtfn)
  303. return;
  304. sprintf(buf, "virtfn%u", id);
  305. sysfs_remove_link(&dev->dev.kobj, buf);
  306. /*
  307. * pci_stop_dev() could have been called for this virtfn already,
  308. * so the directory for the virtfn may have been removed before.
  309. * Double check to avoid spurious sysfs warnings.
  310. */
  311. if (virtfn->dev.kobj.sd)
  312. sysfs_remove_link(&virtfn->dev.kobj, "physfn");
  313. pci_stop_and_remove_bus_device(virtfn);
  314. virtfn_remove_bus(dev->bus, virtfn->bus);
  315. /* balance pci_get_domain_bus_and_slot() */
  316. pci_dev_put(virtfn);
  317. pci_dev_put(dev);
  318. }
  319. static ssize_t sriov_totalvfs_show(struct device *dev,
  320. struct device_attribute *attr,
  321. char *buf)
  322. {
  323. struct pci_dev *pdev = to_pci_dev(dev);
  324. return sysfs_emit(buf, "%u\n", pci_sriov_get_totalvfs(pdev));
  325. }
  326. static ssize_t sriov_numvfs_show(struct device *dev,
  327. struct device_attribute *attr,
  328. char *buf)
  329. {
  330. struct pci_dev *pdev = to_pci_dev(dev);
  331. u16 num_vfs;
  332. /* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */
  333. device_lock(&pdev->dev);
  334. num_vfs = pdev->sriov->num_VFs;
  335. device_unlock(&pdev->dev);
  336. return sysfs_emit(buf, "%u\n", num_vfs);
  337. }
  338. /*
  339. * num_vfs > 0; number of VFs to enable
  340. * num_vfs = 0; disable all VFs
  341. *
  342. * Note: SRIOV spec does not allow partial VF
  343. * disable, so it's all or none.
  344. */
  345. static ssize_t sriov_numvfs_store(struct device *dev,
  346. struct device_attribute *attr,
  347. const char *buf, size_t count)
  348. {
  349. struct pci_dev *pdev = to_pci_dev(dev);
  350. int ret = 0;
  351. u16 num_vfs;
  352. if (kstrtou16(buf, 0, &num_vfs) < 0)
  353. return -EINVAL;
  354. if (num_vfs > pci_sriov_get_totalvfs(pdev))
  355. return -ERANGE;
  356. device_lock(&pdev->dev);
  357. if (num_vfs == pdev->sriov->num_VFs)
  358. goto exit;
  359. /* is PF driver loaded */
  360. if (!pdev->driver) {
  361. pci_info(pdev, "no driver bound to device; cannot configure SR-IOV\n");
  362. ret = -ENOENT;
  363. goto exit;
  364. }
  365. /* is PF driver loaded w/callback */
  366. if (!pdev->driver->sriov_configure) {
  367. pci_info(pdev, "driver does not support SR-IOV configuration via sysfs\n");
  368. ret = -ENOENT;
  369. goto exit;
  370. }
  371. if (num_vfs == 0) {
  372. /* disable VFs */
  373. ret = pdev->driver->sriov_configure(pdev, 0);
  374. goto exit;
  375. }
  376. /* enable VFs */
  377. if (pdev->sriov->num_VFs) {
  378. pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n",
  379. pdev->sriov->num_VFs, num_vfs);
  380. ret = -EBUSY;
  381. goto exit;
  382. }
  383. ret = pdev->driver->sriov_configure(pdev, num_vfs);
  384. if (ret < 0)
  385. goto exit;
  386. if (ret != num_vfs)
  387. pci_warn(pdev, "%d VFs requested; only %d enabled\n",
  388. num_vfs, ret);
  389. exit:
  390. device_unlock(&pdev->dev);
  391. if (ret < 0)
  392. return ret;
  393. return count;
  394. }
  395. static ssize_t sriov_offset_show(struct device *dev,
  396. struct device_attribute *attr,
  397. char *buf)
  398. {
  399. struct pci_dev *pdev = to_pci_dev(dev);
  400. return sysfs_emit(buf, "%u\n", pdev->sriov->offset);
  401. }
  402. static ssize_t sriov_stride_show(struct device *dev,
  403. struct device_attribute *attr,
  404. char *buf)
  405. {
  406. struct pci_dev *pdev = to_pci_dev(dev);
  407. return sysfs_emit(buf, "%u\n", pdev->sriov->stride);
  408. }
  409. static ssize_t sriov_vf_device_show(struct device *dev,
  410. struct device_attribute *attr,
  411. char *buf)
  412. {
  413. struct pci_dev *pdev = to_pci_dev(dev);
  414. return sysfs_emit(buf, "%x\n", pdev->sriov->vf_device);
  415. }
  416. static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
  417. struct device_attribute *attr,
  418. char *buf)
  419. {
  420. struct pci_dev *pdev = to_pci_dev(dev);
  421. return sysfs_emit(buf, "%u\n", pdev->sriov->drivers_autoprobe);
  422. }
  423. static ssize_t sriov_drivers_autoprobe_store(struct device *dev,
  424. struct device_attribute *attr,
  425. const char *buf, size_t count)
  426. {
  427. struct pci_dev *pdev = to_pci_dev(dev);
  428. bool drivers_autoprobe;
  429. if (kstrtobool(buf, &drivers_autoprobe) < 0)
  430. return -EINVAL;
  431. pdev->sriov->drivers_autoprobe = drivers_autoprobe;
  432. return count;
  433. }
  434. static DEVICE_ATTR_RO(sriov_totalvfs);
  435. static DEVICE_ATTR_RW(sriov_numvfs);
  436. static DEVICE_ATTR_RO(sriov_offset);
  437. static DEVICE_ATTR_RO(sriov_stride);
  438. static DEVICE_ATTR_RO(sriov_vf_device);
  439. static DEVICE_ATTR_RW(sriov_drivers_autoprobe);
  440. static struct attribute *sriov_pf_dev_attrs[] = {
  441. &dev_attr_sriov_totalvfs.attr,
  442. &dev_attr_sriov_numvfs.attr,
  443. &dev_attr_sriov_offset.attr,
  444. &dev_attr_sriov_stride.attr,
  445. &dev_attr_sriov_vf_device.attr,
  446. &dev_attr_sriov_drivers_autoprobe.attr,
  447. #ifdef CONFIG_PCI_MSI
  448. &dev_attr_sriov_vf_total_msix.attr,
  449. #endif
  450. NULL,
  451. };
  452. static umode_t sriov_pf_attrs_are_visible(struct kobject *kobj,
  453. struct attribute *a, int n)
  454. {
  455. struct device *dev = kobj_to_dev(kobj);
  456. if (!dev_is_pf(dev))
  457. return 0;
  458. return a->mode;
  459. }
  460. const struct attribute_group sriov_pf_dev_attr_group = {
  461. .attrs = sriov_pf_dev_attrs,
  462. .is_visible = sriov_pf_attrs_are_visible,
  463. };
  464. int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
  465. {
  466. return 0;
  467. }
  468. int __weak pcibios_sriov_disable(struct pci_dev *pdev)
  469. {
  470. return 0;
  471. }
  472. static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs)
  473. {
  474. unsigned int i;
  475. int rc;
  476. if (dev->no_vf_scan)
  477. return 0;
  478. for (i = 0; i < num_vfs; i++) {
  479. rc = pci_iov_add_virtfn(dev, i);
  480. if (rc)
  481. goto failed;
  482. }
  483. return 0;
  484. failed:
  485. while (i--)
  486. pci_iov_remove_virtfn(dev, i);
  487. return rc;
  488. }
  489. static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
  490. {
  491. int rc;
  492. int i;
  493. int nres;
  494. u16 initial;
  495. struct resource *res;
  496. struct pci_dev *pdev;
  497. struct pci_sriov *iov = dev->sriov;
  498. int bars = 0;
  499. int bus;
  500. if (!nr_virtfn)
  501. return 0;
  502. if (iov->num_VFs)
  503. return -EINVAL;
  504. pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
  505. if (initial > iov->total_VFs ||
  506. (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs)))
  507. return -EIO;
  508. if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs ||
  509. (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
  510. return -EINVAL;
  511. nres = 0;
  512. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  513. bars |= (1 << (i + PCI_IOV_RESOURCES));
  514. res = &dev->resource[i + PCI_IOV_RESOURCES];
  515. if (res->parent)
  516. nres++;
  517. }
  518. if (nres != iov->nres) {
  519. pci_err(dev, "not enough MMIO resources for SR-IOV\n");
  520. return -ENOMEM;
  521. }
  522. bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
  523. if (bus > dev->bus->busn_res.end) {
  524. pci_err(dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
  525. nr_virtfn, bus, &dev->bus->busn_res);
  526. return -ENOMEM;
  527. }
  528. if (pci_enable_resources(dev, bars)) {
  529. pci_err(dev, "SR-IOV: IOV BARS not allocated\n");
  530. return -ENOMEM;
  531. }
  532. if (iov->link != dev->devfn) {
  533. pdev = pci_get_slot(dev->bus, iov->link);
  534. if (!pdev)
  535. return -ENODEV;
  536. if (!pdev->is_physfn) {
  537. pci_dev_put(pdev);
  538. return -ENOSYS;
  539. }
  540. rc = sysfs_create_link(&dev->dev.kobj,
  541. &pdev->dev.kobj, "dep_link");
  542. pci_dev_put(pdev);
  543. if (rc)
  544. return rc;
  545. }
  546. iov->initial_VFs = initial;
  547. if (nr_virtfn < initial)
  548. initial = nr_virtfn;
  549. rc = pcibios_sriov_enable(dev, initial);
  550. if (rc) {
  551. pci_err(dev, "failure %d from pcibios_sriov_enable()\n", rc);
  552. goto err_pcibios;
  553. }
  554. pci_iov_set_numvfs(dev, nr_virtfn);
  555. iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
  556. pci_cfg_access_lock(dev);
  557. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  558. msleep(100);
  559. pci_cfg_access_unlock(dev);
  560. rc = sriov_add_vfs(dev, initial);
  561. if (rc)
  562. goto err_pcibios;
  563. kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
  564. iov->num_VFs = nr_virtfn;
  565. return 0;
  566. err_pcibios:
  567. iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
  568. pci_cfg_access_lock(dev);
  569. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  570. ssleep(1);
  571. pci_cfg_access_unlock(dev);
  572. pcibios_sriov_disable(dev);
  573. if (iov->link != dev->devfn)
  574. sysfs_remove_link(&dev->dev.kobj, "dep_link");
  575. pci_iov_set_numvfs(dev, 0);
  576. return rc;
  577. }
  578. static void sriov_del_vfs(struct pci_dev *dev)
  579. {
  580. struct pci_sriov *iov = dev->sriov;
  581. int i;
  582. for (i = 0; i < iov->num_VFs; i++)
  583. pci_iov_remove_virtfn(dev, i);
  584. }
  585. static void sriov_disable(struct pci_dev *dev)
  586. {
  587. struct pci_sriov *iov = dev->sriov;
  588. if (!iov->num_VFs)
  589. return;
  590. sriov_del_vfs(dev);
  591. iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
  592. pci_cfg_access_lock(dev);
  593. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  594. ssleep(1);
  595. pci_cfg_access_unlock(dev);
  596. pcibios_sriov_disable(dev);
  597. if (iov->link != dev->devfn)
  598. sysfs_remove_link(&dev->dev.kobj, "dep_link");
  599. iov->num_VFs = 0;
  600. pci_iov_set_numvfs(dev, 0);
  601. }
  602. static int sriov_init(struct pci_dev *dev, int pos)
  603. {
  604. int i, bar64;
  605. int rc;
  606. int nres;
  607. u32 pgsz;
  608. u16 ctrl, total;
  609. struct pci_sriov *iov;
  610. struct resource *res;
  611. const char *res_name;
  612. struct pci_dev *pdev;
  613. pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
  614. if (ctrl & PCI_SRIOV_CTRL_VFE) {
  615. pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
  616. ssleep(1);
  617. }
  618. ctrl = 0;
  619. list_for_each_entry(pdev, &dev->bus->devices, bus_list)
  620. if (pdev->is_physfn)
  621. goto found;
  622. pdev = NULL;
  623. if (pci_ari_enabled(dev->bus))
  624. ctrl |= PCI_SRIOV_CTRL_ARI;
  625. found:
  626. pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
  627. pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
  628. if (!total)
  629. return 0;
  630. pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
  631. i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
  632. pgsz &= ~((1 << i) - 1);
  633. if (!pgsz)
  634. return -EIO;
  635. pgsz &= ~(pgsz - 1);
  636. pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
  637. iov = kzalloc(sizeof(*iov), GFP_KERNEL);
  638. if (!iov)
  639. return -ENOMEM;
  640. nres = 0;
  641. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  642. res = &dev->resource[i + PCI_IOV_RESOURCES];
  643. res_name = pci_resource_name(dev, i + PCI_IOV_RESOURCES);
  644. /*
  645. * If it is already FIXED, don't change it, something
  646. * (perhaps EA or header fixups) wants it this way.
  647. */
  648. if (res->flags & IORESOURCE_PCI_FIXED)
  649. bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
  650. else
  651. bar64 = __pci_read_base(dev, pci_bar_unknown, res,
  652. pos + PCI_SRIOV_BAR + i * 4);
  653. if (!res->flags)
  654. continue;
  655. if (resource_size(res) & (PAGE_SIZE - 1)) {
  656. rc = -EIO;
  657. goto failed;
  658. }
  659. iov->barsz[i] = resource_size(res);
  660. res->end = res->start + resource_size(res) * total - 1;
  661. pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n",
  662. res_name, res, i, total);
  663. i += bar64;
  664. nres++;
  665. }
  666. iov->pos = pos;
  667. iov->nres = nres;
  668. iov->ctrl = ctrl;
  669. iov->total_VFs = total;
  670. iov->driver_max_VFs = total;
  671. pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
  672. iov->pgsz = pgsz;
  673. iov->self = dev;
  674. iov->drivers_autoprobe = true;
  675. pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
  676. pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
  677. if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
  678. iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
  679. if (pdev)
  680. iov->dev = pci_dev_get(pdev);
  681. else
  682. iov->dev = dev;
  683. dev->sriov = iov;
  684. dev->is_physfn = 1;
  685. rc = compute_max_vf_buses(dev);
  686. if (rc)
  687. goto fail_max_buses;
  688. return 0;
  689. fail_max_buses:
  690. dev->sriov = NULL;
  691. dev->is_physfn = 0;
  692. failed:
  693. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  694. res = &dev->resource[i + PCI_IOV_RESOURCES];
  695. res->flags = 0;
  696. }
  697. kfree(iov);
  698. return rc;
  699. }
  700. static void sriov_release(struct pci_dev *dev)
  701. {
  702. BUG_ON(dev->sriov->num_VFs);
  703. if (dev != dev->sriov->dev)
  704. pci_dev_put(dev->sriov->dev);
  705. kfree(dev->sriov);
  706. dev->sriov = NULL;
  707. }
  708. static void sriov_restore_state(struct pci_dev *dev)
  709. {
  710. int i;
  711. u16 ctrl;
  712. struct pci_sriov *iov = dev->sriov;
  713. pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
  714. if (ctrl & PCI_SRIOV_CTRL_VFE)
  715. return;
  716. /*
  717. * Restore PCI_SRIOV_CTRL_ARI before pci_iov_set_numvfs() because
  718. * it reads offset & stride, which depend on PCI_SRIOV_CTRL_ARI.
  719. */
  720. ctrl &= ~PCI_SRIOV_CTRL_ARI;
  721. ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI;
  722. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl);
  723. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
  724. pci_update_resource(dev, i + PCI_IOV_RESOURCES);
  725. pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
  726. pci_iov_set_numvfs(dev, iov->num_VFs);
  727. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  728. if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
  729. msleep(100);
  730. }
  731. /**
  732. * pci_iov_init - initialize the IOV capability
  733. * @dev: the PCI device
  734. *
  735. * Returns 0 on success, or negative on failure.
  736. */
  737. int pci_iov_init(struct pci_dev *dev)
  738. {
  739. int pos;
  740. if (!pci_is_pcie(dev))
  741. return -ENODEV;
  742. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
  743. if (pos)
  744. return sriov_init(dev, pos);
  745. return -ENODEV;
  746. }
  747. /**
  748. * pci_iov_release - release resources used by the IOV capability
  749. * @dev: the PCI device
  750. */
  751. void pci_iov_release(struct pci_dev *dev)
  752. {
  753. if (dev->is_physfn)
  754. sriov_release(dev);
  755. }
  756. /**
  757. * pci_iov_remove - clean up SR-IOV state after PF driver is detached
  758. * @dev: the PCI device
  759. */
  760. void pci_iov_remove(struct pci_dev *dev)
  761. {
  762. struct pci_sriov *iov = dev->sriov;
  763. if (!dev->is_physfn)
  764. return;
  765. iov->driver_max_VFs = iov->total_VFs;
  766. if (iov->num_VFs)
  767. pci_warn(dev, "driver left SR-IOV enabled after remove\n");
  768. }
  769. /**
  770. * pci_iov_update_resource - update a VF BAR
  771. * @dev: the PCI device
  772. * @resno: the resource number
  773. *
  774. * Update a VF BAR in the SR-IOV capability of a PF.
  775. */
  776. void pci_iov_update_resource(struct pci_dev *dev, int resno)
  777. {
  778. struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
  779. struct resource *res = dev->resource + resno;
  780. int vf_bar = resno - PCI_IOV_RESOURCES;
  781. struct pci_bus_region region;
  782. u16 cmd;
  783. u32 new;
  784. int reg;
  785. /*
  786. * The generic pci_restore_bars() path calls this for all devices,
  787. * including VFs and non-SR-IOV devices. If this is not a PF, we
  788. * have nothing to do.
  789. */
  790. if (!iov)
  791. return;
  792. pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
  793. if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
  794. dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
  795. vf_bar, res);
  796. return;
  797. }
  798. /*
  799. * Ignore unimplemented BARs, unused resource slots for 64-bit
  800. * BARs, and non-movable resources, e.g., those described via
  801. * Enhanced Allocation.
  802. */
  803. if (!res->flags)
  804. return;
  805. if (res->flags & IORESOURCE_UNSET)
  806. return;
  807. if (res->flags & IORESOURCE_PCI_FIXED)
  808. return;
  809. pcibios_resource_to_bus(dev->bus, &region, res);
  810. new = region.start;
  811. new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
  812. reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
  813. pci_write_config_dword(dev, reg, new);
  814. if (res->flags & IORESOURCE_MEM_64) {
  815. new = region.start >> 16 >> 16;
  816. pci_write_config_dword(dev, reg + 4, new);
  817. }
  818. }
  819. resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
  820. int resno)
  821. {
  822. return pci_iov_resource_size(dev, resno);
  823. }
  824. /**
  825. * pci_sriov_resource_alignment - get resource alignment for VF BAR
  826. * @dev: the PCI device
  827. * @resno: the resource number
  828. *
  829. * Returns the alignment of the VF BAR found in the SR-IOV capability.
  830. * This is not the same as the resource size which is defined as
  831. * the VF BAR size multiplied by the number of VFs. The alignment
  832. * is just the VF BAR size.
  833. */
  834. resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
  835. {
  836. return pcibios_iov_resource_alignment(dev, resno);
  837. }
  838. /**
  839. * pci_restore_iov_state - restore the state of the IOV capability
  840. * @dev: the PCI device
  841. */
  842. void pci_restore_iov_state(struct pci_dev *dev)
  843. {
  844. if (dev->is_physfn)
  845. sriov_restore_state(dev);
  846. }
  847. /**
  848. * pci_vf_drivers_autoprobe - set PF property drivers_autoprobe for VFs
  849. * @dev: the PCI device
  850. * @auto_probe: set VF drivers auto probe flag
  851. */
  852. void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool auto_probe)
  853. {
  854. if (dev->is_physfn)
  855. dev->sriov->drivers_autoprobe = auto_probe;
  856. }
  857. /**
  858. * pci_iov_bus_range - find bus range used by Virtual Function
  859. * @bus: the PCI bus
  860. *
  861. * Returns max number of buses (exclude current one) used by Virtual
  862. * Functions.
  863. */
  864. int pci_iov_bus_range(struct pci_bus *bus)
  865. {
  866. int max = 0;
  867. struct pci_dev *dev;
  868. list_for_each_entry(dev, &bus->devices, bus_list) {
  869. if (!dev->is_physfn)
  870. continue;
  871. if (dev->sriov->max_VF_buses > max)
  872. max = dev->sriov->max_VF_buses;
  873. }
  874. return max ? max - bus->number : 0;
  875. }
  876. /**
  877. * pci_enable_sriov - enable the SR-IOV capability
  878. * @dev: the PCI device
  879. * @nr_virtfn: number of virtual functions to enable
  880. *
  881. * Returns 0 on success, or negative on failure.
  882. */
  883. int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
  884. {
  885. might_sleep();
  886. if (!dev->is_physfn)
  887. return -ENOSYS;
  888. return sriov_enable(dev, nr_virtfn);
  889. }
  890. EXPORT_SYMBOL_GPL(pci_enable_sriov);
  891. /**
  892. * pci_disable_sriov - disable the SR-IOV capability
  893. * @dev: the PCI device
  894. */
  895. void pci_disable_sriov(struct pci_dev *dev)
  896. {
  897. might_sleep();
  898. if (!dev->is_physfn)
  899. return;
  900. sriov_disable(dev);
  901. }
  902. EXPORT_SYMBOL_GPL(pci_disable_sriov);
  903. /**
  904. * pci_num_vf - return number of VFs associated with a PF device_release_driver
  905. * @dev: the PCI device
  906. *
  907. * Returns number of VFs, or 0 if SR-IOV is not enabled.
  908. */
  909. int pci_num_vf(struct pci_dev *dev)
  910. {
  911. if (!dev->is_physfn)
  912. return 0;
  913. return dev->sriov->num_VFs;
  914. }
  915. EXPORT_SYMBOL_GPL(pci_num_vf);
  916. /**
  917. * pci_vfs_assigned - returns number of VFs are assigned to a guest
  918. * @dev: the PCI device
  919. *
  920. * Returns number of VFs belonging to this device that are assigned to a guest.
  921. * If device is not a physical function returns 0.
  922. */
  923. int pci_vfs_assigned(struct pci_dev *dev)
  924. {
  925. struct pci_dev *vfdev;
  926. unsigned int vfs_assigned = 0;
  927. unsigned short dev_id;
  928. /* only search if we are a PF */
  929. if (!dev->is_physfn)
  930. return 0;
  931. /*
  932. * determine the device ID for the VFs, the vendor ID will be the
  933. * same as the PF so there is no need to check for that one
  934. */
  935. dev_id = dev->sriov->vf_device;
  936. /* loop through all the VFs to see if we own any that are assigned */
  937. vfdev = pci_get_device(dev->vendor, dev_id, NULL);
  938. while (vfdev) {
  939. /*
  940. * It is considered assigned if it is a virtual function with
  941. * our dev as the physical function and the assigned bit is set
  942. */
  943. if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
  944. pci_is_dev_assigned(vfdev))
  945. vfs_assigned++;
  946. vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
  947. }
  948. return vfs_assigned;
  949. }
  950. EXPORT_SYMBOL_GPL(pci_vfs_assigned);
  951. /**
  952. * pci_sriov_set_totalvfs -- reduce the TotalVFs available
  953. * @dev: the PCI PF device
  954. * @numvfs: number that should be used for TotalVFs supported
  955. *
  956. * Should be called from PF driver's probe routine with
  957. * device's mutex held.
  958. *
  959. * Returns 0 if PF is an SRIOV-capable device and
  960. * value of numvfs valid. If not a PF return -ENOSYS;
  961. * if numvfs is invalid return -EINVAL;
  962. * if VFs already enabled, return -EBUSY.
  963. */
  964. int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
  965. {
  966. if (!dev->is_physfn)
  967. return -ENOSYS;
  968. if (numvfs > dev->sriov->total_VFs)
  969. return -EINVAL;
  970. /* Shouldn't change if VFs already enabled */
  971. if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
  972. return -EBUSY;
  973. dev->sriov->driver_max_VFs = numvfs;
  974. return 0;
  975. }
  976. EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
  977. /**
  978. * pci_sriov_get_totalvfs -- get total VFs supported on this device
  979. * @dev: the PCI PF device
  980. *
  981. * For a PCIe device with SRIOV support, return the PCIe
  982. * SRIOV capability value of TotalVFs or the value of driver_max_VFs
  983. * if the driver reduced it. Otherwise 0.
  984. */
  985. int pci_sriov_get_totalvfs(struct pci_dev *dev)
  986. {
  987. if (!dev->is_physfn)
  988. return 0;
  989. return dev->sriov->driver_max_VFs;
  990. }
  991. EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
  992. /**
  993. * pci_sriov_configure_simple - helper to configure SR-IOV
  994. * @dev: the PCI device
  995. * @nr_virtfn: number of virtual functions to enable, 0 to disable
  996. *
  997. * Enable or disable SR-IOV for devices that don't require any PF setup
  998. * before enabling SR-IOV. Return value is negative on error, or number of
  999. * VFs allocated on success.
  1000. */
  1001. int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn)
  1002. {
  1003. int rc;
  1004. might_sleep();
  1005. if (!dev->is_physfn)
  1006. return -ENODEV;
  1007. if (pci_vfs_assigned(dev)) {
  1008. pci_warn(dev, "Cannot modify SR-IOV while VFs are assigned\n");
  1009. return -EPERM;
  1010. }
  1011. if (nr_virtfn == 0) {
  1012. sriov_disable(dev);
  1013. return 0;
  1014. }
  1015. rc = sriov_enable(dev, nr_virtfn);
  1016. if (rc < 0)
  1017. return rc;
  1018. return nr_virtfn;
  1019. }
  1020. EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);