device.c 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/platform_device.h>
  3. #include <linux/memregion.h>
  4. #include <linux/module.h>
  5. #include <linux/dax.h>
  6. #include <linux/mm.h>
  7. static bool nohmem;
  8. module_param_named(disable, nohmem, bool, 0444);
  9. static bool platform_initialized;
  10. static DEFINE_MUTEX(hmem_resource_lock);
  11. static struct resource hmem_active = {
  12. .name = "HMEM devices",
  13. .start = 0,
  14. .end = -1,
  15. .flags = IORESOURCE_MEM,
  16. };
  17. int walk_hmem_resources(struct device *host, walk_hmem_fn fn)
  18. {
  19. struct resource *res;
  20. int rc = 0;
  21. mutex_lock(&hmem_resource_lock);
  22. for (res = hmem_active.child; res; res = res->sibling) {
  23. rc = fn(host, (int) res->desc, res);
  24. if (rc)
  25. break;
  26. }
  27. mutex_unlock(&hmem_resource_lock);
  28. return rc;
  29. }
  30. EXPORT_SYMBOL_GPL(walk_hmem_resources);
  31. static void __hmem_register_resource(int target_nid, struct resource *res)
  32. {
  33. struct platform_device *pdev;
  34. struct resource *new;
  35. int rc;
  36. new = __request_region(&hmem_active, res->start, resource_size(res), "",
  37. 0);
  38. if (!new) {
  39. pr_debug("hmem range %pr already active\n", res);
  40. return;
  41. }
  42. new->desc = target_nid;
  43. if (platform_initialized)
  44. return;
  45. pdev = platform_device_alloc("hmem_platform", 0);
  46. if (!pdev) {
  47. pr_err_once("failed to register device-dax hmem_platform device\n");
  48. return;
  49. }
  50. rc = platform_device_add(pdev);
  51. if (rc)
  52. platform_device_put(pdev);
  53. else
  54. platform_initialized = true;
  55. }
  56. void hmem_register_resource(int target_nid, struct resource *res)
  57. {
  58. if (nohmem)
  59. return;
  60. mutex_lock(&hmem_resource_lock);
  61. __hmem_register_resource(target_nid, res);
  62. mutex_unlock(&hmem_resource_lock);
  63. }
  64. static __init int hmem_register_one(struct resource *res, void *data)
  65. {
  66. hmem_register_resource(phys_to_target_node(res->start), res);
  67. return 0;
  68. }
  69. static __init int hmem_init(void)
  70. {
  71. walk_iomem_res_desc(IORES_DESC_SOFT_RESERVED,
  72. IORESOURCE_MEM, 0, -1, NULL, hmem_register_one);
  73. return 0;
  74. }
  75. /*
  76. * As this is a fallback for address ranges unclaimed by the ACPI HMAT
  77. * parsing it must be at an initcall level greater than hmat_init().
  78. */
  79. device_initcall(hmem_init);