cpuhotplug.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Generic cpu hotunplug interrupt migration code copied from the
  4. * arch/arm implementation
  5. *
  6. * Copyright (C) Russell King
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/interrupt.h>
  13. #include <linux/ratelimit.h>
  14. #include <linux/irq.h>
  15. #include "internals.h"
  16. /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
  17. static inline bool irq_needs_fixup(struct irq_data *d)
  18. {
  19. const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
  20. unsigned int cpu = smp_processor_id();
  21. #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  22. /*
  23. * The cpumask_empty() check is a workaround for interrupt chips,
  24. * which do not implement effective affinity, but the architecture has
  25. * enabled the config switch. Use the general affinity mask instead.
  26. */
  27. if (cpumask_empty(m))
  28. m = irq_data_get_affinity_mask(d);
  29. /*
  30. * Sanity check. If the mask is not empty when excluding the outgoing
  31. * CPU then it must contain at least one online CPU. The outgoing CPU
  32. * has been removed from the online mask already.
  33. */
  34. if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
  35. cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
  36. /*
  37. * If this happens then there was a missed IRQ fixup at some
  38. * point. Warn about it and enforce fixup.
  39. */
  40. pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
  41. cpumask_pr_args(m), d->irq, cpu);
  42. return true;
  43. }
  44. #endif
  45. return cpumask_test_cpu(cpu, m);
  46. }
  47. static bool migrate_one_irq(struct irq_desc *desc)
  48. {
  49. struct irq_data *d = irq_desc_get_irq_data(desc);
  50. struct irq_chip *chip = irq_data_get_irq_chip(d);
  51. bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
  52. const struct cpumask *affinity;
  53. bool brokeaff = false;
  54. int err;
  55. /*
  56. * IRQ chip might be already torn down, but the irq descriptor is
  57. * still in the radix tree. Also if the chip has no affinity setter,
  58. * nothing can be done here.
  59. */
  60. if (!chip || !chip->irq_set_affinity) {
  61. pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
  62. return false;
  63. }
  64. /*
  65. * No move required, if:
  66. * - Interrupt is per cpu
  67. * - Interrupt is not started
  68. * - Affinity mask does not include this CPU.
  69. *
  70. * Note: Do not check desc->action as this might be a chained
  71. * interrupt.
  72. */
  73. if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
  74. /*
  75. * If an irq move is pending, abort it if the dying CPU is
  76. * the sole target.
  77. */
  78. irq_fixup_move_pending(desc, false);
  79. return false;
  80. }
  81. /*
  82. * Complete an eventually pending irq move cleanup. If this
  83. * interrupt was moved in hard irq context, then the vectors need
  84. * to be cleaned up. It can't wait until this interrupt actually
  85. * happens and this CPU was involved.
  86. */
  87. irq_force_complete_move(desc);
  88. /*
  89. * If there is a setaffinity pending, then try to reuse the pending
  90. * mask, so the last change of the affinity does not get lost. If
  91. * there is no move pending or the pending mask does not contain
  92. * any online CPU, use the current affinity mask.
  93. */
  94. if (irq_fixup_move_pending(desc, true))
  95. affinity = irq_desc_get_pending_mask(desc);
  96. else
  97. affinity = irq_data_get_affinity_mask(d);
  98. /* Mask the chip for interrupts which cannot move in process context */
  99. if (maskchip && chip->irq_mask)
  100. chip->irq_mask(d);
  101. if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
  102. /*
  103. * If the interrupt is managed, then shut it down and leave
  104. * the affinity untouched.
  105. */
  106. if (irqd_affinity_is_managed(d)) {
  107. irqd_set_managed_shutdown(d);
  108. irq_shutdown_and_deactivate(desc);
  109. return false;
  110. }
  111. affinity = cpu_online_mask;
  112. brokeaff = true;
  113. }
  114. /*
  115. * Do not set the force argument of irq_do_set_affinity() as this
  116. * disables the masking of offline CPUs from the supplied affinity
  117. * mask and therefore might keep/reassign the irq to the outgoing
  118. * CPU.
  119. */
  120. err = irq_do_set_affinity(d, affinity, false);
  121. if (err) {
  122. pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
  123. d->irq, err);
  124. brokeaff = false;
  125. }
  126. if (maskchip && chip->irq_unmask)
  127. chip->irq_unmask(d);
  128. return brokeaff;
  129. }
  130. /**
  131. * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
  132. *
  133. * The current CPU has been marked offline. Migrate IRQs off this CPU.
  134. * If the affinity settings do not allow other CPUs, force them onto any
  135. * available CPU.
  136. *
  137. * Note: we must iterate over all IRQs, whether they have an attached
  138. * action structure or not, as we need to get chained interrupts too.
  139. */
  140. void irq_migrate_all_off_this_cpu(void)
  141. {
  142. struct irq_desc *desc;
  143. unsigned int irq;
  144. for_each_active_irq(irq) {
  145. bool affinity_broken;
  146. desc = irq_to_desc(irq);
  147. raw_spin_lock(&desc->lock);
  148. affinity_broken = migrate_one_irq(desc);
  149. raw_spin_unlock(&desc->lock);
  150. if (affinity_broken) {
  151. pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
  152. irq, smp_processor_id());
  153. }
  154. }
  155. }
  156. static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
  157. {
  158. struct irq_data *data = irq_desc_get_irq_data(desc);
  159. const struct cpumask *affinity = irq_data_get_affinity_mask(data);
  160. if (!irqd_affinity_is_managed(data) || !desc->action ||
  161. !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
  162. return;
  163. if (irqd_is_managed_and_shutdown(data)) {
  164. irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
  165. return;
  166. }
  167. /*
  168. * If the interrupt can only be directed to a single target
  169. * CPU then it is already assigned to a CPU in the affinity
  170. * mask. No point in trying to move it around.
  171. */
  172. if (!irqd_is_single_target(data))
  173. irq_set_affinity_locked(data, affinity, false);
  174. }
  175. /**
  176. * irq_affinity_online_cpu - Restore affinity for managed interrupts
  177. * @cpu: Upcoming CPU for which interrupts should be restored
  178. */
  179. int irq_affinity_online_cpu(unsigned int cpu)
  180. {
  181. struct irq_desc *desc;
  182. unsigned int irq;
  183. irq_lock_sparse();
  184. for_each_active_irq(irq) {
  185. desc = irq_to_desc(irq);
  186. raw_spin_lock_irq(&desc->lock);
  187. irq_restore_affinity_of_irq(desc, cpu);
  188. raw_spin_unlock_irq(&desc->lock);
  189. }
  190. irq_unlock_sparse();
  191. return 0;
  192. }