mem.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
  3. #include <linux/debugfs.h>
  4. #include <linux/device.h>
  5. #include <linux/module.h>
  6. #include <linux/pci.h>
  7. #include "cxlmem.h"
  8. #include "cxlpci.h"
  9. /**
  10. * DOC: cxl mem
  11. *
  12. * CXL memory endpoint devices and switches are CXL capable devices that are
  13. * participating in CXL.mem protocol. Their functionality builds on top of the
  14. * CXL.io protocol that allows enumerating and configuring components via
  15. * standard PCI mechanisms.
  16. *
  17. * The cxl_mem driver owns kicking off the enumeration of this CXL.mem
  18. * capability. With the detection of a CXL capable endpoint, the driver will
  19. * walk up to find the platform specific port it is connected to, and determine
  20. * if there are intervening switches in the path. If there are switches, a
  21. * secondary action is to enumerate those (implemented in cxl_core). Finally the
  22. * cxl_mem driver adds the device it is bound to as a CXL endpoint-port for use
  23. * in higher level operations.
  24. */
  25. static void enable_suspend(void *data)
  26. {
  27. cxl_mem_active_dec();
  28. }
  29. static void remove_debugfs(void *dentry)
  30. {
  31. debugfs_remove_recursive(dentry);
  32. }
  33. static int cxl_mem_dpa_show(struct seq_file *file, void *data)
  34. {
  35. struct device *dev = file->private;
  36. struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
  37. cxl_dpa_debug(file, cxlmd->cxlds);
  38. return 0;
  39. }
  40. static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
  41. struct cxl_dport *parent_dport)
  42. {
  43. struct cxl_port *parent_port = parent_dport->port;
  44. struct cxl_port *endpoint, *iter, *down;
  45. int rc;
  46. /*
  47. * Now that the path to the root is established record all the
  48. * intervening ports in the chain.
  49. */
  50. for (iter = parent_port, down = NULL; !is_cxl_root(iter);
  51. down = iter, iter = to_cxl_port(iter->dev.parent)) {
  52. struct cxl_ep *ep;
  53. ep = cxl_ep_load(iter, cxlmd);
  54. ep->next = down;
  55. }
  56. /* Note: endpoint port component registers are derived from @cxlds */
  57. endpoint = devm_cxl_add_port(host, &cxlmd->dev, CXL_RESOURCE_NONE,
  58. parent_dport);
  59. if (IS_ERR(endpoint))
  60. return PTR_ERR(endpoint);
  61. rc = cxl_endpoint_autoremove(cxlmd, endpoint);
  62. if (rc)
  63. return rc;
  64. if (!endpoint->dev.driver) {
  65. dev_err(&cxlmd->dev, "%s failed probe\n",
  66. dev_name(&endpoint->dev));
  67. return -ENXIO;
  68. }
  69. return 0;
  70. }
  71. static int cxl_debugfs_poison_inject(void *data, u64 dpa)
  72. {
  73. struct cxl_memdev *cxlmd = data;
  74. return cxl_inject_poison(cxlmd, dpa);
  75. }
  76. DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_inject_fops, NULL,
  77. cxl_debugfs_poison_inject, "%llx\n");
  78. static int cxl_debugfs_poison_clear(void *data, u64 dpa)
  79. {
  80. struct cxl_memdev *cxlmd = data;
  81. return cxl_clear_poison(cxlmd, dpa);
  82. }
  83. DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL,
  84. cxl_debugfs_poison_clear, "%llx\n");
  85. static int cxl_mem_probe(struct device *dev)
  86. {
  87. struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
  88. struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
  89. struct cxl_dev_state *cxlds = cxlmd->cxlds;
  90. struct device *endpoint_parent;
  91. struct cxl_dport *dport;
  92. struct dentry *dentry;
  93. int rc;
  94. if (!cxlds->media_ready)
  95. return -EBUSY;
  96. /*
  97. * Someone is trying to reattach this device after it lost its port
  98. * connection (an endpoint port previously registered by this memdev was
  99. * disabled). This racy check is ok because if the port is still gone,
  100. * no harm done, and if the port hierarchy comes back it will re-trigger
  101. * this probe. Port rescan and memdev detach work share the same
  102. * single-threaded workqueue.
  103. */
  104. if (work_pending(&cxlmd->detach_work))
  105. return -EBUSY;
  106. dentry = cxl_debugfs_create_dir(dev_name(dev));
  107. debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show);
  108. if (test_bit(CXL_POISON_ENABLED_INJECT, mds->poison.enabled_cmds))
  109. debugfs_create_file("inject_poison", 0200, dentry, cxlmd,
  110. &cxl_poison_inject_fops);
  111. if (test_bit(CXL_POISON_ENABLED_CLEAR, mds->poison.enabled_cmds))
  112. debugfs_create_file("clear_poison", 0200, dentry, cxlmd,
  113. &cxl_poison_clear_fops);
  114. rc = devm_add_action_or_reset(dev, remove_debugfs, dentry);
  115. if (rc)
  116. return rc;
  117. rc = devm_cxl_enumerate_ports(cxlmd);
  118. if (rc)
  119. return rc;
  120. struct cxl_port *parent_port __free(put_cxl_port) =
  121. cxl_mem_find_port(cxlmd, &dport);
  122. if (!parent_port) {
  123. dev_err(dev, "CXL port topology not found\n");
  124. return -ENXIO;
  125. }
  126. if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) {
  127. rc = devm_cxl_add_nvdimm(parent_port, cxlmd);
  128. if (rc) {
  129. if (rc == -ENODEV)
  130. dev_info(dev, "PMEM disabled by platform\n");
  131. return rc;
  132. }
  133. }
  134. if (dport->rch)
  135. endpoint_parent = parent_port->uport_dev;
  136. else
  137. endpoint_parent = &parent_port->dev;
  138. cxl_dport_init_ras_reporting(dport, dev);
  139. scoped_guard(device, endpoint_parent) {
  140. if (!endpoint_parent->driver) {
  141. dev_err(dev, "CXL port topology %s not enabled\n",
  142. dev_name(endpoint_parent));
  143. return -ENXIO;
  144. }
  145. rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport);
  146. if (rc)
  147. return rc;
  148. }
  149. /*
  150. * The kernel may be operating out of CXL memory on this device,
  151. * there is no spec defined way to determine whether this device
  152. * preserves contents over suspend, and there is no simple way
  153. * to arrange for the suspend image to avoid CXL memory which
  154. * would setup a circular dependency between PCI resume and save
  155. * state restoration.
  156. *
  157. * TODO: support suspend when all the regions this device is
  158. * hosting are locked and covered by the system address map,
  159. * i.e. platform firmware owns restoring the HDM configuration
  160. * that it locked.
  161. */
  162. cxl_mem_active_inc();
  163. return devm_add_action_or_reset(dev, enable_suspend, NULL);
  164. }
  165. static ssize_t trigger_poison_list_store(struct device *dev,
  166. struct device_attribute *attr,
  167. const char *buf, size_t len)
  168. {
  169. bool trigger;
  170. int rc;
  171. if (kstrtobool(buf, &trigger) || !trigger)
  172. return -EINVAL;
  173. rc = cxl_trigger_poison_list(to_cxl_memdev(dev));
  174. return rc ? rc : len;
  175. }
  176. static DEVICE_ATTR_WO(trigger_poison_list);
  177. static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
  178. {
  179. struct device *dev = kobj_to_dev(kobj);
  180. struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
  181. struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
  182. if (a == &dev_attr_trigger_poison_list.attr)
  183. if (!test_bit(CXL_POISON_ENABLED_LIST,
  184. mds->poison.enabled_cmds))
  185. return 0;
  186. return a->mode;
  187. }
  188. static struct attribute *cxl_mem_attrs[] = {
  189. &dev_attr_trigger_poison_list.attr,
  190. NULL
  191. };
  192. static struct attribute_group cxl_mem_group = {
  193. .attrs = cxl_mem_attrs,
  194. .is_visible = cxl_mem_visible,
  195. };
  196. __ATTRIBUTE_GROUPS(cxl_mem);
  197. static struct cxl_driver cxl_mem_driver = {
  198. .name = "cxl_mem",
  199. .probe = cxl_mem_probe,
  200. .id = CXL_DEVICE_MEMORY_EXPANDER,
  201. .drv = {
  202. .dev_groups = cxl_mem_groups,
  203. },
  204. };
  205. module_cxl_driver(cxl_mem_driver);
  206. MODULE_DESCRIPTION("CXL: Memory Expansion");
  207. MODULE_LICENSE("GPL v2");
  208. MODULE_IMPORT_NS(CXL);
  209. MODULE_ALIAS_CXL(CXL_DEVICE_MEMORY_EXPANDER);
  210. /*
  211. * create_endpoint() wants to validate port driver attach immediately after
  212. * endpoint registration.
  213. */
  214. MODULE_SOFTDEP("pre: cxl_port");