acpi_numa.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ACPI 6.6 based NUMA setup for RISCV
  4. * Lots of code was borrowed from arch/arm64/kernel/acpi_numa.c
  5. *
  6. * Copyright 2004 Andi Kleen, SuSE Labs.
  7. * Copyright (C) 2013-2016, Linaro Ltd.
  8. * Author: Hanjun Guo <hanjun.guo@linaro.org>
  9. * Copyright (C) 2024 Intel Corporation.
  10. *
  11. * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
  12. *
  13. * Called from acpi_numa_init while reading the SRAT and SLIT tables.
  14. * Assumes all memory regions belonging to a single proximity domain
  15. * are in one chunk. Holes between them will be included in the node.
  16. */
  17. #define pr_fmt(fmt) "ACPI: NUMA: " fmt
  18. #include <linux/acpi.h>
  19. #include <linux/bitmap.h>
  20. #include <linux/kernel.h>
  21. #include <linux/mm.h>
  22. #include <linux/memblock.h>
  23. #include <linux/mmzone.h>
  24. #include <linux/module.h>
  25. #include <linux/topology.h>
  26. #include <asm/numa.h>
  27. static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
  28. static int __init acpi_numa_get_nid(unsigned int cpu)
  29. {
  30. return acpi_early_node_map[cpu];
  31. }
  32. static inline int get_cpu_for_acpi_id(u32 uid)
  33. {
  34. int cpu;
  35. for (cpu = 0; cpu < nr_cpu_ids; cpu++)
  36. if (uid == get_acpi_id_for_cpu(cpu))
  37. return cpu;
  38. return -EINVAL;
  39. }
  40. static int __init acpi_parse_rintc_pxm(union acpi_subtable_headers *header,
  41. const unsigned long end)
  42. {
  43. struct acpi_srat_rintc_affinity *pa;
  44. int cpu, pxm, node;
  45. if (srat_disabled())
  46. return -EINVAL;
  47. pa = (struct acpi_srat_rintc_affinity *)header;
  48. if (!pa)
  49. return -EINVAL;
  50. if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED))
  51. return 0;
  52. pxm = pa->proximity_domain;
  53. node = pxm_to_node(pxm);
  54. /*
  55. * If we can't map the UID to a logical cpu this
  56. * means that the UID is not part of possible cpus
  57. * so we do not need a NUMA mapping for it, skip
  58. * the SRAT entry and keep parsing.
  59. */
  60. cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid);
  61. if (cpu < 0)
  62. return 0;
  63. acpi_early_node_map[cpu] = node;
  64. pr_info("SRAT: PXM %d -> HARTID 0x%lx -> Node %d\n", pxm,
  65. cpuid_to_hartid_map(cpu), node);
  66. return 0;
  67. }
  68. void __init acpi_map_cpus_to_nodes(void)
  69. {
  70. int i;
  71. /*
  72. * In ACPI, SMP and CPU NUMA information is provided in separate
  73. * static tables, namely the MADT and the SRAT.
  74. *
  75. * Thus, it is simpler to first create the cpu logical map through
  76. * an MADT walk and then map the logical cpus to their node ids
  77. * as separate steps.
  78. */
  79. acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat),
  80. ACPI_SRAT_TYPE_RINTC_AFFINITY, acpi_parse_rintc_pxm, 0);
  81. for (i = 0; i < nr_cpu_ids; i++)
  82. early_map_cpu_to_node(i, acpi_numa_get_nid(i));
  83. }
  84. /* Callback for Proximity Domain -> logical node ID mapping */
  85. void __init acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa)
  86. {
  87. int pxm, node;
  88. if (srat_disabled())
  89. return;
  90. if (pa->header.length < sizeof(struct acpi_srat_rintc_affinity)) {
  91. pr_err("SRAT: Invalid SRAT header length: %d\n", pa->header.length);
  92. bad_srat();
  93. return;
  94. }
  95. if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED))
  96. return;
  97. pxm = pa->proximity_domain;
  98. node = acpi_map_pxm_to_node(pxm);
  99. if (node == NUMA_NO_NODE) {
  100. pr_err("SRAT: Too many proximity domains %d\n", pxm);
  101. bad_srat();
  102. return;
  103. }
  104. node_set(node, numa_nodes_parsed);
  105. }