region.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4. */
  5. #include <linux/memregion.h>
  6. #include <linux/cpumask.h>
  7. #include <linux/module.h>
  8. #include <linux/device.h>
  9. #include <linux/nd.h>
  10. #include "nd-core.h"
  11. #include "nd.h"
  12. static int nd_region_probe(struct device *dev)
  13. {
  14. int err, rc;
  15. static unsigned long once;
  16. struct nd_region_data *ndrd;
  17. struct nd_region *nd_region = to_nd_region(dev);
  18. struct range range = {
  19. .start = nd_region->ndr_start,
  20. .end = nd_region->ndr_start + nd_region->ndr_size - 1,
  21. };
  22. if (nd_region->num_lanes > num_online_cpus()
  23. && nd_region->num_lanes < num_possible_cpus()
  24. && !test_and_set_bit(0, &once)) {
  25. dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
  26. num_online_cpus(), nd_region->num_lanes,
  27. num_possible_cpus());
  28. dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
  29. nd_region->num_lanes);
  30. }
  31. rc = nd_region_activate(nd_region);
  32. if (rc)
  33. return rc;
  34. if (devm_init_badblocks(dev, &nd_region->bb))
  35. return -ENODEV;
  36. nd_region->bb_state =
  37. sysfs_get_dirent(nd_region->dev.kobj.sd, "badblocks");
  38. if (!nd_region->bb_state)
  39. dev_warn(dev, "'badblocks' notification disabled\n");
  40. nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
  41. rc = nd_region_register_namespaces(nd_region, &err);
  42. if (rc < 0)
  43. return rc;
  44. ndrd = dev_get_drvdata(dev);
  45. ndrd->ns_active = rc;
  46. ndrd->ns_count = rc + err;
  47. if (rc && err && rc == err)
  48. return -ENODEV;
  49. nd_region->btt_seed = nd_btt_create(nd_region);
  50. nd_region->pfn_seed = nd_pfn_create(nd_region);
  51. nd_region->dax_seed = nd_dax_create(nd_region);
  52. if (err == 0)
  53. return 0;
  54. /*
  55. * Given multiple namespaces per region, we do not want to
  56. * disable all the successfully registered peer namespaces upon
  57. * a single registration failure. If userspace is missing a
  58. * namespace that it expects it can disable/re-enable the region
  59. * to retry discovery after correcting the failure.
  60. * <regionX>/namespaces returns the current
  61. * "<async-registered>/<total>" namespace count.
  62. */
  63. dev_err(dev, "failed to register %d namespace%s, continuing...\n",
  64. err, err == 1 ? "" : "s");
  65. return 0;
  66. }
  67. static int child_unregister(struct device *dev, void *data)
  68. {
  69. nd_device_unregister(dev, ND_SYNC);
  70. return 0;
  71. }
  72. static void nd_region_remove(struct device *dev)
  73. {
  74. struct nd_region *nd_region = to_nd_region(dev);
  75. device_for_each_child(dev, NULL, child_unregister);
  76. /* flush attribute readers and disable */
  77. nvdimm_bus_lock(dev);
  78. nd_region->ns_seed = NULL;
  79. nd_region->btt_seed = NULL;
  80. nd_region->pfn_seed = NULL;
  81. nd_region->dax_seed = NULL;
  82. dev_set_drvdata(dev, NULL);
  83. nvdimm_bus_unlock(dev);
  84. /*
  85. * Note, this assumes device_lock() context to not race
  86. * nd_region_notify()
  87. */
  88. sysfs_put(nd_region->bb_state);
  89. nd_region->bb_state = NULL;
  90. /*
  91. * Try to flush caches here since a disabled region may be subject to
  92. * secure erase while disabled, and previous dirty data should not be
  93. * written back to a new instance of the region. This only matters on
  94. * bare metal where security commands are available, so silent failure
  95. * here is ok.
  96. */
  97. if (cpu_cache_has_invalidate_memregion())
  98. cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
  99. }
  100. static int child_notify(struct device *dev, void *data)
  101. {
  102. nd_device_notify(dev, *(enum nvdimm_event *) data);
  103. return 0;
  104. }
  105. static void nd_region_notify(struct device *dev, enum nvdimm_event event)
  106. {
  107. if (event == NVDIMM_REVALIDATE_POISON) {
  108. struct nd_region *nd_region = to_nd_region(dev);
  109. if (is_memory(&nd_region->dev)) {
  110. struct range range = {
  111. .start = nd_region->ndr_start,
  112. .end = nd_region->ndr_start +
  113. nd_region->ndr_size - 1,
  114. };
  115. nvdimm_badblocks_populate(nd_region,
  116. &nd_region->bb, &range);
  117. if (nd_region->bb_state)
  118. sysfs_notify_dirent(nd_region->bb_state);
  119. }
  120. }
  121. device_for_each_child(dev, &event, child_notify);
  122. }
  123. static struct nd_device_driver nd_region_driver = {
  124. .probe = nd_region_probe,
  125. .remove = nd_region_remove,
  126. .notify = nd_region_notify,
  127. .drv = {
  128. .name = "nd_region",
  129. },
  130. .type = ND_DRIVER_REGION_BLK | ND_DRIVER_REGION_PMEM,
  131. };
  132. int __init nd_region_init(void)
  133. {
  134. return nd_driver_register(&nd_region_driver);
  135. }
  136. void nd_region_exit(void)
  137. {
  138. driver_unregister(&nd_region_driver.drv);
  139. }
  140. MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM);