vphb.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/pci.h>
  10. #include <misc/cxl.h>
  11. #include "cxl.h"
  12. static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
  13. {
  14. if (dma_mask < DMA_BIT_MASK(64)) {
  15. pr_info("%s only 64bit DMA supported on CXL", __func__);
  16. return -EIO;
  17. }
  18. *(pdev->dev.dma_mask) = dma_mask;
  19. return 0;
  20. }
  21. static int cxl_pci_probe_mode(struct pci_bus *bus)
  22. {
  23. return PCI_PROBE_NORMAL;
  24. }
  25. static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  26. {
  27. return -ENODEV;
  28. }
  29. static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
  30. {
  31. /*
  32. * MSI should never be set but need still need to provide this call
  33. * back.
  34. */
  35. }
  36. static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
  37. {
  38. struct pci_controller *phb;
  39. struct cxl_afu *afu;
  40. struct cxl_context *ctx;
  41. phb = pci_bus_to_host(dev->bus);
  42. afu = (struct cxl_afu *)phb->private_data;
  43. if (!cxl_ops->link_ok(afu->adapter, afu)) {
  44. dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
  45. return false;
  46. }
  47. set_dma_ops(&dev->dev, &dma_nommu_ops);
  48. set_dma_offset(&dev->dev, PAGE_OFFSET);
  49. /*
  50. * Allocate a context to do cxl things too. If we eventually do real
  51. * DMA ops, we'll need a default context to attach them to
  52. */
  53. ctx = cxl_dev_context_init(dev);
  54. if (IS_ERR(ctx))
  55. return false;
  56. dev->dev.archdata.cxl_ctx = ctx;
  57. return (cxl_ops->afu_check_and_enable(afu) == 0);
  58. }
  59. static void cxl_pci_disable_device(struct pci_dev *dev)
  60. {
  61. struct cxl_context *ctx = cxl_get_context(dev);
  62. if (ctx) {
  63. if (ctx->status == STARTED) {
  64. dev_err(&dev->dev, "Default context started\n");
  65. return;
  66. }
  67. dev->dev.archdata.cxl_ctx = NULL;
  68. cxl_release_context(ctx);
  69. }
  70. }
  71. static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
  72. unsigned long type)
  73. {
  74. return 1;
  75. }
  76. static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
  77. {
  78. /* Should we do an AFU reset here ? */
  79. }
  80. static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
  81. {
  82. return (bus << 8) + devfn;
  83. }
  84. static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
  85. {
  86. struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
  87. return phb ? phb->private_data : NULL;
  88. }
  89. static void cxl_afu_configured_put(struct cxl_afu *afu)
  90. {
  91. atomic_dec_if_positive(&afu->configured_state);
  92. }
  93. static bool cxl_afu_configured_get(struct cxl_afu *afu)
  94. {
  95. return atomic_inc_unless_negative(&afu->configured_state);
  96. }
  97. static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
  98. struct cxl_afu *afu, int *_record)
  99. {
  100. int record;
  101. record = cxl_pcie_cfg_record(bus->number, devfn);
  102. if (record > afu->crs_num)
  103. return PCIBIOS_DEVICE_NOT_FOUND;
  104. *_record = record;
  105. return 0;
  106. }
  107. static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
  108. int offset, int len, u32 *val)
  109. {
  110. int rc, record;
  111. struct cxl_afu *afu;
  112. u8 val8;
  113. u16 val16;
  114. u32 val32;
  115. afu = pci_bus_to_afu(bus);
  116. /* Grab a reader lock on afu. */
  117. if (afu == NULL || !cxl_afu_configured_get(afu))
  118. return PCIBIOS_DEVICE_NOT_FOUND;
  119. rc = cxl_pcie_config_info(bus, devfn, afu, &record);
  120. if (rc)
  121. goto out;
  122. switch (len) {
  123. case 1:
  124. rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8);
  125. *val = val8;
  126. break;
  127. case 2:
  128. rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
  129. *val = val16;
  130. break;
  131. case 4:
  132. rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
  133. *val = val32;
  134. break;
  135. default:
  136. WARN_ON(1);
  137. }
  138. out:
  139. cxl_afu_configured_put(afu);
  140. return rc ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
  141. }
  142. static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
  143. int offset, int len, u32 val)
  144. {
  145. int rc, record;
  146. struct cxl_afu *afu;
  147. afu = pci_bus_to_afu(bus);
  148. /* Grab a reader lock on afu. */
  149. if (afu == NULL || !cxl_afu_configured_get(afu))
  150. return PCIBIOS_DEVICE_NOT_FOUND;
  151. rc = cxl_pcie_config_info(bus, devfn, afu, &record);
  152. if (rc)
  153. goto out;
  154. switch (len) {
  155. case 1:
  156. rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
  157. break;
  158. case 2:
  159. rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
  160. break;
  161. case 4:
  162. rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
  163. break;
  164. default:
  165. WARN_ON(1);
  166. }
  167. out:
  168. cxl_afu_configured_put(afu);
  169. return rc ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
  170. }
  171. static struct pci_ops cxl_pcie_pci_ops =
  172. {
  173. .read = cxl_pcie_read_config,
  174. .write = cxl_pcie_write_config,
  175. };
  176. static struct pci_controller_ops cxl_pci_controller_ops =
  177. {
  178. .probe_mode = cxl_pci_probe_mode,
  179. .enable_device_hook = cxl_pci_enable_device_hook,
  180. .disable_device = cxl_pci_disable_device,
  181. .release_device = cxl_pci_disable_device,
  182. .window_alignment = cxl_pci_window_alignment,
  183. .reset_secondary_bus = cxl_pci_reset_secondary_bus,
  184. .setup_msi_irqs = cxl_setup_msi_irqs,
  185. .teardown_msi_irqs = cxl_teardown_msi_irqs,
  186. .dma_set_mask = cxl_dma_set_mask,
  187. };
  188. int cxl_pci_vphb_add(struct cxl_afu *afu)
  189. {
  190. struct pci_controller *phb;
  191. struct device_node *vphb_dn;
  192. struct device *parent;
  193. /*
  194. * If there are no AFU configuration records we won't have anything to
  195. * expose under the vPHB, so skip creating one, returning success since
  196. * this is still a valid case. This will also opt us out of EEH
  197. * handling since we won't have anything special to do if there are no
  198. * kernel drivers attached to the vPHB, and EEH handling is not yet
  199. * supported in the peer model.
  200. */
  201. if (!afu->crs_num)
  202. return 0;
  203. /* The parent device is the adapter. Reuse the device node of
  204. * the adapter.
  205. * We don't seem to care what device node is used for the vPHB,
  206. * but tools such as lsvpd walk up the device parents looking
  207. * for a valid location code, so we might as well show devices
  208. * attached to the adapter as being located on that adapter.
  209. */
  210. parent = afu->adapter->dev.parent;
  211. vphb_dn = parent->of_node;
  212. /* Alloc and setup PHB data structure */
  213. phb = pcibios_alloc_controller(vphb_dn);
  214. if (!phb)
  215. return -ENODEV;
  216. /* Setup parent in sysfs */
  217. phb->parent = parent;
  218. /* Setup the PHB using arch provided callback */
  219. phb->ops = &cxl_pcie_pci_ops;
  220. phb->cfg_addr = NULL;
  221. phb->cfg_data = NULL;
  222. phb->private_data = afu;
  223. phb->controller_ops = cxl_pci_controller_ops;
  224. /* Scan the bus */
  225. pcibios_scan_phb(phb);
  226. if (phb->bus == NULL)
  227. return -ENXIO;
  228. /* Set release hook on root bus */
  229. pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
  230. pcibios_free_controller_deferred,
  231. (void *) phb);
  232. /* Claim resources. This might need some rework as well depending
  233. * whether we are doing probe-only or not, like assigning unassigned
  234. * resources etc...
  235. */
  236. pcibios_claim_one_bus(phb->bus);
  237. /* Add probed PCI devices to the device model */
  238. pci_bus_add_devices(phb->bus);
  239. afu->phb = phb;
  240. return 0;
  241. }
  242. void cxl_pci_vphb_remove(struct cxl_afu *afu)
  243. {
  244. struct pci_controller *phb;
  245. /* If there is no configuration record we won't have one of these */
  246. if (!afu || !afu->phb)
  247. return;
  248. phb = afu->phb;
  249. afu->phb = NULL;
  250. pci_remove_root_bus(phb->bus);
  251. /*
  252. * We don't free phb here - that's handled by
  253. * pcibios_free_controller_deferred()
  254. */
  255. }
  256. bool cxl_pci_is_vphb_device(struct pci_dev *dev)
  257. {
  258. struct pci_controller *phb;
  259. phb = pci_bus_to_host(dev->bus);
  260. return (phb->ops == &cxl_pcie_pci_ops);
  261. }
  262. struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
  263. {
  264. struct pci_controller *phb;
  265. phb = pci_bus_to_host(dev->bus);
  266. return (struct cxl_afu *)phb->private_data;
  267. }
  268. EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
  269. unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
  270. {
  271. return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
  272. }
  273. EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);