ipi.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/cpumask.h>
  3. #include <linux/interrupt.h>
  4. #include <linux/mm.h>
  5. #include <linux/delay.h>
  6. #include <linux/spinlock.h>
  7. #include <linux/kernel_stat.h>
  8. #include <linux/mc146818rtc.h>
  9. #include <linux/cache.h>
  10. #include <linux/cpu.h>
  11. #include <asm/smp.h>
  12. #include <asm/mtrr.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/mmu_context.h>
  15. #include <asm/apic.h>
  16. #include <asm/proto.h>
  17. #include <asm/ipi.h>
  18. void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
  19. {
  20. /*
  21. * Subtle. In the case of the 'never do double writes' workaround
  22. * we have to lock out interrupts to be safe. As we don't care
  23. * of the value read we use an atomic rmw access to avoid costly
  24. * cli/sti. Otherwise we use an even cheaper single atomic write
  25. * to the APIC.
  26. */
  27. unsigned int cfg;
  28. /*
  29. * Wait for idle.
  30. */
  31. __xapic_wait_icr_idle();
  32. /*
  33. * No need to touch the target chip field
  34. */
  35. cfg = __prepare_ICR(shortcut, vector, dest);
  36. /*
  37. * Send the IPI. The write to APIC_ICR fires this off.
  38. */
  39. native_apic_mem_write(APIC_ICR, cfg);
  40. }
  41. /*
  42. * This is used to send an IPI with no shorthand notation (the destination is
  43. * specified in bits 56 to 63 of the ICR).
  44. */
  45. void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
  46. {
  47. unsigned long cfg;
  48. /*
  49. * Wait for idle.
  50. */
  51. if (unlikely(vector == NMI_VECTOR))
  52. safe_apic_wait_icr_idle();
  53. else
  54. __xapic_wait_icr_idle();
  55. /*
  56. * prepare target chip field
  57. */
  58. cfg = __prepare_ICR2(mask);
  59. native_apic_mem_write(APIC_ICR2, cfg);
  60. /*
  61. * program the ICR
  62. */
  63. cfg = __prepare_ICR(0, vector, dest);
  64. /*
  65. * Send the IPI. The write to APIC_ICR fires this off.
  66. */
  67. native_apic_mem_write(APIC_ICR, cfg);
  68. }
  69. void default_send_IPI_single_phys(int cpu, int vector)
  70. {
  71. unsigned long flags;
  72. local_irq_save(flags);
  73. __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
  74. vector, APIC_DEST_PHYSICAL);
  75. local_irq_restore(flags);
  76. }
  77. void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
  78. {
  79. unsigned long query_cpu;
  80. unsigned long flags;
  81. /*
  82. * Hack. The clustered APIC addressing mode doesn't allow us to send
  83. * to an arbitrary mask, so I do a unicast to each CPU instead.
  84. * - mbligh
  85. */
  86. local_irq_save(flags);
  87. for_each_cpu(query_cpu, mask) {
  88. __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
  89. query_cpu), vector, APIC_DEST_PHYSICAL);
  90. }
  91. local_irq_restore(flags);
  92. }
  93. void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
  94. int vector)
  95. {
  96. unsigned int this_cpu = smp_processor_id();
  97. unsigned int query_cpu;
  98. unsigned long flags;
  99. /* See Hack comment above */
  100. local_irq_save(flags);
  101. for_each_cpu(query_cpu, mask) {
  102. if (query_cpu == this_cpu)
  103. continue;
  104. __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
  105. query_cpu), vector, APIC_DEST_PHYSICAL);
  106. }
  107. local_irq_restore(flags);
  108. }
  109. /*
  110. * Helper function for APICs which insist on cpumasks
  111. */
  112. void default_send_IPI_single(int cpu, int vector)
  113. {
  114. apic->send_IPI_mask(cpumask_of(cpu), vector);
  115. }
  116. #ifdef CONFIG_X86_32
  117. void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
  118. int vector)
  119. {
  120. unsigned long flags;
  121. unsigned int query_cpu;
  122. /*
  123. * Hack. The clustered APIC addressing mode doesn't allow us to send
  124. * to an arbitrary mask, so I do a unicasts to each CPU instead. This
  125. * should be modified to do 1 message per cluster ID - mbligh
  126. */
  127. local_irq_save(flags);
  128. for_each_cpu(query_cpu, mask)
  129. __default_send_IPI_dest_field(
  130. early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
  131. vector, apic->dest_logical);
  132. local_irq_restore(flags);
  133. }
  134. void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
  135. int vector)
  136. {
  137. unsigned long flags;
  138. unsigned int query_cpu;
  139. unsigned int this_cpu = smp_processor_id();
  140. /* See Hack comment above */
  141. local_irq_save(flags);
  142. for_each_cpu(query_cpu, mask) {
  143. if (query_cpu == this_cpu)
  144. continue;
  145. __default_send_IPI_dest_field(
  146. early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
  147. vector, apic->dest_logical);
  148. }
  149. local_irq_restore(flags);
  150. }
  151. /*
  152. * This is only used on smaller machines.
  153. */
  154. void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
  155. {
  156. unsigned long mask = cpumask_bits(cpumask)[0];
  157. unsigned long flags;
  158. if (!mask)
  159. return;
  160. local_irq_save(flags);
  161. WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
  162. __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
  163. local_irq_restore(flags);
  164. }
  165. void default_send_IPI_allbutself(int vector)
  166. {
  167. /*
  168. * if there are no other CPUs in the system then we get an APIC send
  169. * error if we try to broadcast, thus avoid sending IPIs in this case.
  170. */
  171. if (!(num_online_cpus() > 1))
  172. return;
  173. __default_local_send_IPI_allbutself(vector);
  174. }
  175. void default_send_IPI_all(int vector)
  176. {
  177. __default_local_send_IPI_all(vector);
  178. }
  179. void default_send_IPI_self(int vector)
  180. {
  181. __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
  182. }
  183. /* must come after the send_IPI functions above for inlining */
  184. static int convert_apicid_to_cpu(int apic_id)
  185. {
  186. int i;
  187. for_each_possible_cpu(i) {
  188. if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
  189. return i;
  190. }
  191. return -1;
  192. }
  193. int safe_smp_processor_id(void)
  194. {
  195. int apicid, cpuid;
  196. if (!boot_cpu_has(X86_FEATURE_APIC))
  197. return 0;
  198. apicid = hard_smp_processor_id();
  199. if (apicid == BAD_APICID)
  200. return 0;
  201. cpuid = convert_apicid_to_cpu(apicid);
  202. return cpuid >= 0 ? cpuid : 0;
  203. }
  204. #endif