kmem.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2016-2019 Intel Corporation. All rights reserved. */
  3. #include <linux/memremap.h>
  4. #include <linux/pagemap.h>
  5. #include <linux/memory.h>
  6. #include <linux/module.h>
  7. #include <linux/device.h>
  8. #include <linux/pfn_t.h>
  9. #include <linux/slab.h>
  10. #include <linux/dax.h>
  11. #include <linux/fs.h>
  12. #include <linux/mm.h>
  13. #include <linux/mman.h>
  14. #include <linux/memory-tiers.h>
  15. #include <linux/memory_hotplug.h>
  16. #include "dax-private.h"
  17. #include "bus.h"
  18. /*
  19. * Default abstract distance assigned to the NUMA node onlined
  20. * by DAX/kmem if the low level platform driver didn't initialize
  21. * one for this NUMA node.
  22. */
  23. #define MEMTIER_DEFAULT_DAX_ADISTANCE (MEMTIER_ADISTANCE_DRAM * 5)
  24. /* Memory resource name used for add_memory_driver_managed(). */
  25. static const char *kmem_name;
  26. /* Set if any memory will remain added when the driver will be unloaded. */
  27. static bool any_hotremove_failed;
  28. static int dax_kmem_range(struct dev_dax *dev_dax, int i, struct range *r)
  29. {
  30. struct dev_dax_range *dax_range = &dev_dax->ranges[i];
  31. struct range *range = &dax_range->range;
  32. /* memory-block align the hotplug range */
  33. r->start = ALIGN(range->start, memory_block_size_bytes());
  34. r->end = ALIGN_DOWN(range->end + 1, memory_block_size_bytes()) - 1;
  35. if (r->start >= r->end) {
  36. r->start = range->start;
  37. r->end = range->end;
  38. return -ENOSPC;
  39. }
  40. return 0;
  41. }
  42. struct dax_kmem_data {
  43. const char *res_name;
  44. int mgid;
  45. struct resource *res[];
  46. };
  47. static DEFINE_MUTEX(kmem_memory_type_lock);
  48. static LIST_HEAD(kmem_memory_types);
  49. static struct memory_dev_type *kmem_find_alloc_memory_type(int adist)
  50. {
  51. guard(mutex)(&kmem_memory_type_lock);
  52. return mt_find_alloc_memory_type(adist, &kmem_memory_types);
  53. }
  54. static void kmem_put_memory_types(void)
  55. {
  56. guard(mutex)(&kmem_memory_type_lock);
  57. mt_put_memory_types(&kmem_memory_types);
  58. }
  59. static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
  60. {
  61. struct device *dev = &dev_dax->dev;
  62. unsigned long total_len = 0;
  63. struct dax_kmem_data *data;
  64. struct memory_dev_type *mtype;
  65. int i, rc, mapped = 0;
  66. mhp_t mhp_flags;
  67. int numa_node;
  68. int adist = MEMTIER_DEFAULT_DAX_ADISTANCE;
  69. /*
  70. * Ensure good NUMA information for the persistent memory.
  71. * Without this check, there is a risk that slow memory
  72. * could be mixed in a node with faster memory, causing
  73. * unavoidable performance issues.
  74. */
  75. numa_node = dev_dax->target_node;
  76. if (numa_node < 0) {
  77. dev_warn(dev, "rejecting DAX region with invalid node: %d\n",
  78. numa_node);
  79. return -EINVAL;
  80. }
  81. mt_calc_adistance(numa_node, &adist);
  82. mtype = kmem_find_alloc_memory_type(adist);
  83. if (IS_ERR(mtype))
  84. return PTR_ERR(mtype);
  85. for (i = 0; i < dev_dax->nr_range; i++) {
  86. struct range range;
  87. rc = dax_kmem_range(dev_dax, i, &range);
  88. if (rc) {
  89. dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
  90. i, range.start, range.end);
  91. continue;
  92. }
  93. total_len += range_len(&range);
  94. }
  95. if (!total_len) {
  96. dev_warn(dev, "rejecting DAX region without any memory after alignment\n");
  97. return -EINVAL;
  98. }
  99. init_node_memory_type(numa_node, mtype);
  100. rc = -ENOMEM;
  101. data = kzalloc(struct_size(data, res, dev_dax->nr_range), GFP_KERNEL);
  102. if (!data)
  103. goto err_dax_kmem_data;
  104. data->res_name = kstrdup(dev_name(dev), GFP_KERNEL);
  105. if (!data->res_name)
  106. goto err_res_name;
  107. rc = memory_group_register_static(numa_node, PFN_UP(total_len));
  108. if (rc < 0)
  109. goto err_reg_mgid;
  110. data->mgid = rc;
  111. for (i = 0; i < dev_dax->nr_range; i++) {
  112. struct resource *res;
  113. struct range range;
  114. rc = dax_kmem_range(dev_dax, i, &range);
  115. if (rc)
  116. continue;
  117. /* Region is permanently reserved if hotremove fails. */
  118. res = request_mem_region(range.start, range_len(&range), data->res_name);
  119. if (!res) {
  120. dev_warn(dev, "mapping%d: %#llx-%#llx could not reserve region\n",
  121. i, range.start, range.end);
  122. /*
  123. * Once some memory has been onlined we can't
  124. * assume that it can be un-onlined safely.
  125. */
  126. if (mapped)
  127. continue;
  128. rc = -EBUSY;
  129. goto err_request_mem;
  130. }
  131. data->res[i] = res;
  132. /*
  133. * Set flags appropriate for System RAM. Leave ..._BUSY clear
  134. * so that add_memory() can add a child resource. Do not
  135. * inherit flags from the parent since it may set new flags
  136. * unknown to us that will break add_memory() below.
  137. */
  138. res->flags = IORESOURCE_SYSTEM_RAM;
  139. mhp_flags = MHP_NID_IS_MGID;
  140. if (dev_dax->memmap_on_memory)
  141. mhp_flags |= MHP_MEMMAP_ON_MEMORY;
  142. /*
  143. * Ensure that future kexec'd kernels will not treat
  144. * this as RAM automatically.
  145. */
  146. rc = add_memory_driver_managed(data->mgid, range.start,
  147. range_len(&range), kmem_name, mhp_flags);
  148. if (rc) {
  149. dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
  150. i, range.start, range.end);
  151. remove_resource(res);
  152. kfree(res);
  153. data->res[i] = NULL;
  154. if (mapped)
  155. continue;
  156. goto err_request_mem;
  157. }
  158. mapped++;
  159. }
  160. dev_set_drvdata(dev, data);
  161. return 0;
  162. err_request_mem:
  163. memory_group_unregister(data->mgid);
  164. err_reg_mgid:
  165. kfree(data->res_name);
  166. err_res_name:
  167. kfree(data);
  168. err_dax_kmem_data:
  169. clear_node_memory_type(numa_node, mtype);
  170. return rc;
  171. }
  172. #ifdef CONFIG_MEMORY_HOTREMOVE
  173. static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
  174. {
  175. int i, success = 0;
  176. int node = dev_dax->target_node;
  177. struct device *dev = &dev_dax->dev;
  178. struct dax_kmem_data *data = dev_get_drvdata(dev);
  179. /*
  180. * We have one shot for removing memory, if some memory blocks were not
  181. * offline prior to calling this function remove_memory() will fail, and
  182. * there is no way to hotremove this memory until reboot because device
  183. * unbind will succeed even if we return failure.
  184. */
  185. for (i = 0; i < dev_dax->nr_range; i++) {
  186. struct range range;
  187. int rc;
  188. rc = dax_kmem_range(dev_dax, i, &range);
  189. if (rc)
  190. continue;
  191. rc = remove_memory(range.start, range_len(&range));
  192. if (rc == 0) {
  193. remove_resource(data->res[i]);
  194. kfree(data->res[i]);
  195. data->res[i] = NULL;
  196. success++;
  197. continue;
  198. }
  199. any_hotremove_failed = true;
  200. dev_err(dev,
  201. "mapping%d: %#llx-%#llx cannot be hotremoved until the next reboot\n",
  202. i, range.start, range.end);
  203. }
  204. if (success >= dev_dax->nr_range) {
  205. memory_group_unregister(data->mgid);
  206. kfree(data->res_name);
  207. kfree(data);
  208. dev_set_drvdata(dev, NULL);
  209. /*
  210. * Clear the memtype association on successful unplug.
  211. * If not, we have memory blocks left which can be
  212. * offlined/onlined later. We need to keep memory_dev_type
  213. * for that. This implies this reference will be around
  214. * till next reboot.
  215. */
  216. clear_node_memory_type(node, NULL);
  217. }
  218. }
  219. #else
  220. static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
  221. {
  222. /*
  223. * Without hotremove purposely leak the request_mem_region() for the
  224. * device-dax range and return '0' to ->remove() attempts. The removal
  225. * of the device from the driver always succeeds, but the region is
  226. * permanently pinned as reserved by the unreleased
  227. * request_mem_region().
  228. */
  229. any_hotremove_failed = true;
  230. }
  231. #endif /* CONFIG_MEMORY_HOTREMOVE */
  232. static struct dax_device_driver device_dax_kmem_driver = {
  233. .probe = dev_dax_kmem_probe,
  234. .remove = dev_dax_kmem_remove,
  235. .type = DAXDRV_KMEM_TYPE,
  236. };
  237. static int __init dax_kmem_init(void)
  238. {
  239. int rc;
  240. /* Resource name is permanently allocated if any hotremove fails. */
  241. kmem_name = kstrdup_const("System RAM (kmem)", GFP_KERNEL);
  242. if (!kmem_name)
  243. return -ENOMEM;
  244. rc = dax_driver_register(&device_dax_kmem_driver);
  245. if (rc)
  246. goto error_dax_driver;
  247. return rc;
  248. error_dax_driver:
  249. kmem_put_memory_types();
  250. kfree_const(kmem_name);
  251. return rc;
  252. }
  253. static void __exit dax_kmem_exit(void)
  254. {
  255. dax_driver_unregister(&device_dax_kmem_driver);
  256. if (!any_hotremove_failed)
  257. kfree_const(kmem_name);
  258. kmem_put_memory_types();
  259. }
  260. MODULE_AUTHOR("Intel Corporation");
  261. MODULE_DESCRIPTION("KMEM DAX: map dax-devices as System-RAM");
  262. MODULE_LICENSE("GPL v2");
  263. module_init(dax_kmem_init);
  264. module_exit(dax_kmem_exit);
  265. MODULE_ALIAS_DAX_DEVICE(0);