x2apic_cluster.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/threads.h>
  3. #include <linux/cpumask.h>
  4. #include <linux/string.h>
  5. #include <linux/kernel.h>
  6. #include <linux/ctype.h>
  7. #include <linux/dmar.h>
  8. #include <linux/irq.h>
  9. #include <linux/cpu.h>
  10. #include <asm/smp.h>
  11. #include "x2apic.h"
  12. struct cluster_mask {
  13. unsigned int clusterid;
  14. int node;
  15. struct cpumask mask;
  16. };
  17. static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
  18. static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
  19. static DEFINE_PER_CPU(struct cluster_mask *, cluster_masks);
  20. static struct cluster_mask *cluster_hotplug_mask;
  21. static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
  22. {
  23. return x2apic_enabled();
  24. }
  25. static void x2apic_send_IPI(int cpu, int vector)
  26. {
  27. u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
  28. /* x2apic MSRs are special and need a special fence: */
  29. weak_wrmsr_fence();
  30. __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
  31. }
  32. static void
  33. __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
  34. {
  35. unsigned int cpu, clustercpu;
  36. struct cpumask *tmpmsk;
  37. unsigned long flags;
  38. u32 dest;
  39. /* x2apic MSRs are special and need a special fence: */
  40. weak_wrmsr_fence();
  41. local_irq_save(flags);
  42. tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
  43. cpumask_copy(tmpmsk, mask);
  44. /* If IPI should not be sent to self, clear current CPU */
  45. if (apic_dest != APIC_DEST_ALLINC)
  46. cpumask_clear_cpu(smp_processor_id(), tmpmsk);
  47. /* Collapse cpus in a cluster so a single IPI per cluster is sent */
  48. for_each_cpu(cpu, tmpmsk) {
  49. struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
  50. dest = 0;
  51. for_each_cpu_and(clustercpu, tmpmsk, &cmsk->mask)
  52. dest |= per_cpu(x86_cpu_to_logical_apicid, clustercpu);
  53. if (!dest)
  54. continue;
  55. __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
  56. /* Remove cluster CPUs from tmpmask */
  57. cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
  58. }
  59. local_irq_restore(flags);
  60. }
  61. static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
  62. {
  63. __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
  64. }
  65. static void
  66. x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
  67. {
  68. __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
  69. }
  70. static void x2apic_send_IPI_allbutself(int vector)
  71. {
  72. __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
  73. }
  74. static void x2apic_send_IPI_all(int vector)
  75. {
  76. __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
  77. }
  78. static u32 x2apic_calc_apicid(unsigned int cpu)
  79. {
  80. return per_cpu(x86_cpu_to_logical_apicid, cpu);
  81. }
  82. static void init_x2apic_ldr(void)
  83. {
  84. struct cluster_mask *cmsk = this_cpu_read(cluster_masks);
  85. u32 cluster, apicid = apic_read(APIC_LDR);
  86. unsigned int cpu;
  87. this_cpu_write(x86_cpu_to_logical_apicid, apicid);
  88. if (cmsk)
  89. goto update;
  90. cluster = apicid >> 16;
  91. for_each_online_cpu(cpu) {
  92. cmsk = per_cpu(cluster_masks, cpu);
  93. /* Matching cluster found. Link and update it. */
  94. if (cmsk && cmsk->clusterid == cluster)
  95. goto update;
  96. }
  97. cmsk = cluster_hotplug_mask;
  98. cmsk->clusterid = cluster;
  99. cluster_hotplug_mask = NULL;
  100. update:
  101. this_cpu_write(cluster_masks, cmsk);
  102. cpumask_set_cpu(smp_processor_id(), &cmsk->mask);
  103. }
  104. static int alloc_clustermask(unsigned int cpu, int node)
  105. {
  106. if (per_cpu(cluster_masks, cpu))
  107. return 0;
  108. /*
  109. * If a hotplug spare mask exists, check whether it's on the right
  110. * node. If not, free it and allocate a new one.
  111. */
  112. if (cluster_hotplug_mask) {
  113. if (cluster_hotplug_mask->node == node)
  114. return 0;
  115. kfree(cluster_hotplug_mask);
  116. }
  117. cluster_hotplug_mask = kzalloc_node(sizeof(*cluster_hotplug_mask),
  118. GFP_KERNEL, node);
  119. if (!cluster_hotplug_mask)
  120. return -ENOMEM;
  121. cluster_hotplug_mask->node = node;
  122. return 0;
  123. }
  124. static int x2apic_prepare_cpu(unsigned int cpu)
  125. {
  126. if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
  127. return -ENOMEM;
  128. if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
  129. return -ENOMEM;
  130. return 0;
  131. }
  132. static int x2apic_dead_cpu(unsigned int dead_cpu)
  133. {
  134. struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
  135. if (cmsk)
  136. cpumask_clear_cpu(dead_cpu, &cmsk->mask);
  137. free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
  138. return 0;
  139. }
  140. static int x2apic_cluster_probe(void)
  141. {
  142. if (!x2apic_mode)
  143. return 0;
  144. if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
  145. x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
  146. pr_err("Failed to register X2APIC_PREPARE\n");
  147. return 0;
  148. }
  149. init_x2apic_ldr();
  150. return 1;
  151. }
  152. static struct apic apic_x2apic_cluster __ro_after_init = {
  153. .name = "cluster x2apic",
  154. .probe = x2apic_cluster_probe,
  155. .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
  156. .apic_id_valid = x2apic_apic_id_valid,
  157. .apic_id_registered = x2apic_apic_id_registered,
  158. .irq_delivery_mode = dest_Fixed,
  159. .irq_dest_mode = 1, /* logical */
  160. .disable_esr = 0,
  161. .dest_logical = APIC_DEST_LOGICAL,
  162. .check_apicid_used = NULL,
  163. .init_apic_ldr = init_x2apic_ldr,
  164. .ioapic_phys_id_map = NULL,
  165. .setup_apic_routing = NULL,
  166. .cpu_present_to_apicid = default_cpu_present_to_apicid,
  167. .apicid_to_cpu_present = NULL,
  168. .check_phys_apicid_present = default_check_phys_apicid_present,
  169. .phys_pkg_id = x2apic_phys_pkg_id,
  170. .get_apic_id = x2apic_get_apic_id,
  171. .set_apic_id = x2apic_set_apic_id,
  172. .calc_dest_apicid = x2apic_calc_apicid,
  173. .send_IPI = x2apic_send_IPI,
  174. .send_IPI_mask = x2apic_send_IPI_mask,
  175. .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
  176. .send_IPI_allbutself = x2apic_send_IPI_allbutself,
  177. .send_IPI_all = x2apic_send_IPI_all,
  178. .send_IPI_self = x2apic_send_IPI_self,
  179. .inquire_remote_apic = NULL,
  180. .read = native_apic_msr_read,
  181. .write = native_apic_msr_write,
  182. .eoi_write = native_apic_msr_eoi_write,
  183. .icr_read = native_x2apic_icr_read,
  184. .icr_write = native_x2apic_icr_write,
  185. .wait_icr_idle = native_x2apic_wait_icr_idle,
  186. .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
  187. };
  188. apic_driver(apic_x2apic_cluster);