vphb.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright 2014 IBM Corp.
  4. */
  5. #include <linux/pci.h>
  6. #include <misc/cxl.h>
  7. #include "cxl.h"
  8. static int cxl_pci_probe_mode(struct pci_bus *bus)
  9. {
  10. return PCI_PROBE_NORMAL;
  11. }
  12. static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  13. {
  14. return -ENODEV;
  15. }
  16. static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
  17. {
  18. /*
  19. * MSI should never be set but need still need to provide this call
  20. * back.
  21. */
  22. }
  23. static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
  24. {
  25. struct pci_controller *phb;
  26. struct cxl_afu *afu;
  27. struct cxl_context *ctx;
  28. phb = pci_bus_to_host(dev->bus);
  29. afu = (struct cxl_afu *)phb->private_data;
  30. if (!cxl_ops->link_ok(afu->adapter, afu)) {
  31. dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
  32. return false;
  33. }
  34. dev->dev.archdata.dma_offset = PAGE_OFFSET;
  35. /*
  36. * Allocate a context to do cxl things too. If we eventually do real
  37. * DMA ops, we'll need a default context to attach them to
  38. */
  39. ctx = cxl_dev_context_init(dev);
  40. if (IS_ERR(ctx))
  41. return false;
  42. dev->dev.archdata.cxl_ctx = ctx;
  43. return (cxl_ops->afu_check_and_enable(afu) == 0);
  44. }
  45. static void cxl_pci_disable_device(struct pci_dev *dev)
  46. {
  47. struct cxl_context *ctx = cxl_get_context(dev);
  48. if (ctx) {
  49. if (ctx->status == STARTED) {
  50. dev_err(&dev->dev, "Default context started\n");
  51. return;
  52. }
  53. dev->dev.archdata.cxl_ctx = NULL;
  54. cxl_release_context(ctx);
  55. }
  56. }
  57. static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
  58. {
  59. /* Should we do an AFU reset here ? */
  60. }
  61. static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
  62. {
  63. return (bus << 8) + devfn;
  64. }
  65. static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
  66. {
  67. struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
  68. return phb ? phb->private_data : NULL;
  69. }
  70. static void cxl_afu_configured_put(struct cxl_afu *afu)
  71. {
  72. atomic_dec_if_positive(&afu->configured_state);
  73. }
  74. static bool cxl_afu_configured_get(struct cxl_afu *afu)
  75. {
  76. return atomic_inc_unless_negative(&afu->configured_state);
  77. }
  78. static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
  79. struct cxl_afu *afu, int *_record)
  80. {
  81. int record;
  82. record = cxl_pcie_cfg_record(bus->number, devfn);
  83. if (record > afu->crs_num)
  84. return PCIBIOS_DEVICE_NOT_FOUND;
  85. *_record = record;
  86. return 0;
  87. }
  88. static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
  89. int offset, int len, u32 *val)
  90. {
  91. int rc, record;
  92. struct cxl_afu *afu;
  93. u8 val8;
  94. u16 val16;
  95. u32 val32;
  96. afu = pci_bus_to_afu(bus);
  97. /* Grab a reader lock on afu. */
  98. if (afu == NULL || !cxl_afu_configured_get(afu))
  99. return PCIBIOS_DEVICE_NOT_FOUND;
  100. rc = cxl_pcie_config_info(bus, devfn, afu, &record);
  101. if (rc)
  102. goto out;
  103. switch (len) {
  104. case 1:
  105. rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8);
  106. *val = val8;
  107. break;
  108. case 2:
  109. rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
  110. *val = val16;
  111. break;
  112. case 4:
  113. rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
  114. *val = val32;
  115. break;
  116. default:
  117. WARN_ON(1);
  118. }
  119. out:
  120. cxl_afu_configured_put(afu);
  121. return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
  122. }
  123. static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
  124. int offset, int len, u32 val)
  125. {
  126. int rc, record;
  127. struct cxl_afu *afu;
  128. afu = pci_bus_to_afu(bus);
  129. /* Grab a reader lock on afu. */
  130. if (afu == NULL || !cxl_afu_configured_get(afu))
  131. return PCIBIOS_DEVICE_NOT_FOUND;
  132. rc = cxl_pcie_config_info(bus, devfn, afu, &record);
  133. if (rc)
  134. goto out;
  135. switch (len) {
  136. case 1:
  137. rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
  138. break;
  139. case 2:
  140. rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
  141. break;
  142. case 4:
  143. rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
  144. break;
  145. default:
  146. WARN_ON(1);
  147. }
  148. out:
  149. cxl_afu_configured_put(afu);
  150. return rc ? PCIBIOS_SET_FAILED : 0;
  151. }
  152. static struct pci_ops cxl_pcie_pci_ops =
  153. {
  154. .read = cxl_pcie_read_config,
  155. .write = cxl_pcie_write_config,
  156. };
  157. static struct pci_controller_ops cxl_pci_controller_ops =
  158. {
  159. .probe_mode = cxl_pci_probe_mode,
  160. .enable_device_hook = cxl_pci_enable_device_hook,
  161. .disable_device = cxl_pci_disable_device,
  162. .release_device = cxl_pci_disable_device,
  163. .reset_secondary_bus = cxl_pci_reset_secondary_bus,
  164. .setup_msi_irqs = cxl_setup_msi_irqs,
  165. .teardown_msi_irqs = cxl_teardown_msi_irqs,
  166. };
  167. int cxl_pci_vphb_add(struct cxl_afu *afu)
  168. {
  169. struct pci_controller *phb;
  170. struct device_node *vphb_dn;
  171. struct device *parent;
  172. /*
  173. * If there are no AFU configuration records we won't have anything to
  174. * expose under the vPHB, so skip creating one, returning success since
  175. * this is still a valid case. This will also opt us out of EEH
  176. * handling since we won't have anything special to do if there are no
  177. * kernel drivers attached to the vPHB, and EEH handling is not yet
  178. * supported in the peer model.
  179. */
  180. if (!afu->crs_num)
  181. return 0;
  182. /* The parent device is the adapter. Reuse the device node of
  183. * the adapter.
  184. * We don't seem to care what device node is used for the vPHB,
  185. * but tools such as lsvpd walk up the device parents looking
  186. * for a valid location code, so we might as well show devices
  187. * attached to the adapter as being located on that adapter.
  188. */
  189. parent = afu->adapter->dev.parent;
  190. vphb_dn = parent->of_node;
  191. /* Alloc and setup PHB data structure */
  192. phb = pcibios_alloc_controller(vphb_dn);
  193. if (!phb)
  194. return -ENODEV;
  195. /* Setup parent in sysfs */
  196. phb->parent = parent;
  197. /* Setup the PHB using arch provided callback */
  198. phb->ops = &cxl_pcie_pci_ops;
  199. phb->cfg_addr = NULL;
  200. phb->cfg_data = NULL;
  201. phb->private_data = afu;
  202. phb->controller_ops = cxl_pci_controller_ops;
  203. /* Scan the bus */
  204. pcibios_scan_phb(phb);
  205. if (phb->bus == NULL)
  206. return -ENXIO;
  207. /* Set release hook on root bus */
  208. pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
  209. pcibios_free_controller_deferred,
  210. (void *) phb);
  211. /* Claim resources. This might need some rework as well depending
  212. * whether we are doing probe-only or not, like assigning unassigned
  213. * resources etc...
  214. */
  215. pcibios_claim_one_bus(phb->bus);
  216. /* Add probed PCI devices to the device model */
  217. pci_bus_add_devices(phb->bus);
  218. afu->phb = phb;
  219. return 0;
  220. }
  221. void cxl_pci_vphb_remove(struct cxl_afu *afu)
  222. {
  223. struct pci_controller *phb;
  224. /* If there is no configuration record we won't have one of these */
  225. if (!afu || !afu->phb)
  226. return;
  227. phb = afu->phb;
  228. afu->phb = NULL;
  229. pci_remove_root_bus(phb->bus);
  230. /*
  231. * We don't free phb here - that's handled by
  232. * pcibios_free_controller_deferred()
  233. */
  234. }
  235. bool cxl_pci_is_vphb_device(struct pci_dev *dev)
  236. {
  237. struct pci_controller *phb;
  238. phb = pci_bus_to_host(dev->bus);
  239. return (phb->ops == &cxl_pcie_pci_ops);
  240. }
  241. struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
  242. {
  243. struct pci_controller *phb;
  244. phb = pci_bus_to_host(dev->bus);
  245. return (struct cxl_afu *)phb->private_data;
  246. }
  247. EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
  248. unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
  249. {
  250. return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
  251. }
  252. EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);