matrix.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de>
  3. #include <linux/spinlock.h>
  4. #include <linux/seq_file.h>
  5. #include <linux/bitmap.h>
  6. #include <linux/percpu.h>
  7. #include <linux/cpu.h>
  8. #include <linux/irq.h>
  9. struct cpumap {
  10. unsigned int available;
  11. unsigned int allocated;
  12. unsigned int managed;
  13. unsigned int managed_allocated;
  14. bool initialized;
  15. bool online;
  16. unsigned long *managed_map;
  17. unsigned long alloc_map[];
  18. };
  19. struct irq_matrix {
  20. unsigned int matrix_bits;
  21. unsigned int alloc_start;
  22. unsigned int alloc_end;
  23. unsigned int alloc_size;
  24. unsigned int global_available;
  25. unsigned int global_reserved;
  26. unsigned int systembits_inalloc;
  27. unsigned int total_allocated;
  28. unsigned int online_maps;
  29. struct cpumap __percpu *maps;
  30. unsigned long *system_map;
  31. unsigned long scratch_map[];
  32. };
  33. #define CREATE_TRACE_POINTS
  34. #include <trace/events/irq_matrix.h>
  35. /**
  36. * irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
  37. * @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS
  38. * @alloc_start: From which bit the allocation search starts
  39. * @alloc_end: At which bit the allocation search ends, i.e first
  40. * invalid bit
  41. */
  42. __init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
  43. unsigned int alloc_start,
  44. unsigned int alloc_end)
  45. {
  46. unsigned int cpu, matrix_size = BITS_TO_LONGS(matrix_bits);
  47. struct irq_matrix *m;
  48. m = kzalloc(struct_size(m, scratch_map, matrix_size * 2), GFP_KERNEL);
  49. if (!m)
  50. return NULL;
  51. m->system_map = &m->scratch_map[matrix_size];
  52. m->matrix_bits = matrix_bits;
  53. m->alloc_start = alloc_start;
  54. m->alloc_end = alloc_end;
  55. m->alloc_size = alloc_end - alloc_start;
  56. m->maps = __alloc_percpu(struct_size(m->maps, alloc_map, matrix_size * 2),
  57. __alignof__(*m->maps));
  58. if (!m->maps) {
  59. kfree(m);
  60. return NULL;
  61. }
  62. for_each_possible_cpu(cpu) {
  63. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  64. cm->managed_map = &cm->alloc_map[matrix_size];
  65. }
  66. return m;
  67. }
  68. /**
  69. * irq_matrix_online - Bring the local CPU matrix online
  70. * @m: Matrix pointer
  71. */
  72. void irq_matrix_online(struct irq_matrix *m)
  73. {
  74. struct cpumap *cm = this_cpu_ptr(m->maps);
  75. BUG_ON(cm->online);
  76. if (!cm->initialized) {
  77. cm->available = m->alloc_size;
  78. cm->available -= cm->managed + m->systembits_inalloc;
  79. cm->initialized = true;
  80. }
  81. m->global_available += cm->available;
  82. cm->online = true;
  83. m->online_maps++;
  84. trace_irq_matrix_online(m);
  85. }
  86. /**
  87. * irq_matrix_offline - Bring the local CPU matrix offline
  88. * @m: Matrix pointer
  89. */
  90. void irq_matrix_offline(struct irq_matrix *m)
  91. {
  92. struct cpumap *cm = this_cpu_ptr(m->maps);
  93. /* Update the global available size */
  94. m->global_available -= cm->available;
  95. cm->online = false;
  96. m->online_maps--;
  97. trace_irq_matrix_offline(m);
  98. }
  99. static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
  100. unsigned int num, bool managed)
  101. {
  102. unsigned int area, start = m->alloc_start;
  103. unsigned int end = m->alloc_end;
  104. bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
  105. bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
  106. area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
  107. if (area >= end)
  108. return area;
  109. if (managed)
  110. bitmap_set(cm->managed_map, area, num);
  111. else
  112. bitmap_set(cm->alloc_map, area, num);
  113. return area;
  114. }
  115. /* Find the best CPU which has the lowest vector allocation count */
  116. static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
  117. const struct cpumask *msk)
  118. {
  119. unsigned int cpu, best_cpu, maxavl = 0;
  120. struct cpumap *cm;
  121. best_cpu = UINT_MAX;
  122. for_each_cpu(cpu, msk) {
  123. cm = per_cpu_ptr(m->maps, cpu);
  124. if (!cm->online || cm->available <= maxavl)
  125. continue;
  126. best_cpu = cpu;
  127. maxavl = cm->available;
  128. }
  129. return best_cpu;
  130. }
  131. /* Find the best CPU which has the lowest number of managed IRQs allocated */
  132. static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
  133. const struct cpumask *msk)
  134. {
  135. unsigned int cpu, best_cpu, allocated = UINT_MAX;
  136. struct cpumap *cm;
  137. best_cpu = UINT_MAX;
  138. for_each_cpu(cpu, msk) {
  139. cm = per_cpu_ptr(m->maps, cpu);
  140. if (!cm->online || cm->managed_allocated > allocated)
  141. continue;
  142. best_cpu = cpu;
  143. allocated = cm->managed_allocated;
  144. }
  145. return best_cpu;
  146. }
  147. /**
  148. * irq_matrix_assign_system - Assign system wide entry in the matrix
  149. * @m: Matrix pointer
  150. * @bit: Which bit to reserve
  151. * @replace: Replace an already allocated vector with a system
  152. * vector at the same bit position.
  153. *
  154. * The BUG_ON()s below are on purpose. If this goes wrong in the
  155. * early boot process, then the chance to survive is about zero.
  156. * If this happens when the system is life, it's not much better.
  157. */
  158. void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
  159. bool replace)
  160. {
  161. struct cpumap *cm = this_cpu_ptr(m->maps);
  162. BUG_ON(bit > m->matrix_bits);
  163. BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
  164. set_bit(bit, m->system_map);
  165. if (replace) {
  166. BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
  167. cm->allocated--;
  168. m->total_allocated--;
  169. }
  170. if (bit >= m->alloc_start && bit < m->alloc_end)
  171. m->systembits_inalloc++;
  172. trace_irq_matrix_assign_system(bit, m);
  173. }
  174. /**
  175. * irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
  176. * @m: Matrix pointer
  177. * @msk: On which CPUs the bits should be reserved.
  178. *
  179. * Can be called for offline CPUs. Note, this will only reserve one bit
  180. * on all CPUs in @msk, but it's not guaranteed that the bits are at the
  181. * same offset on all CPUs
  182. */
  183. int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
  184. {
  185. unsigned int cpu, failed_cpu;
  186. for_each_cpu(cpu, msk) {
  187. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  188. unsigned int bit;
  189. bit = matrix_alloc_area(m, cm, 1, true);
  190. if (bit >= m->alloc_end)
  191. goto cleanup;
  192. cm->managed++;
  193. if (cm->online) {
  194. cm->available--;
  195. m->global_available--;
  196. }
  197. trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
  198. }
  199. return 0;
  200. cleanup:
  201. failed_cpu = cpu;
  202. for_each_cpu(cpu, msk) {
  203. if (cpu == failed_cpu)
  204. break;
  205. irq_matrix_remove_managed(m, cpumask_of(cpu));
  206. }
  207. return -ENOSPC;
  208. }
  209. /**
  210. * irq_matrix_remove_managed - Remove managed interrupts in a CPU map
  211. * @m: Matrix pointer
  212. * @msk: On which CPUs the bits should be removed
  213. *
  214. * Can be called for offline CPUs
  215. *
  216. * This removes not allocated managed interrupts from the map. It does
  217. * not matter which one because the managed interrupts free their
  218. * allocation when they shut down. If not, the accounting is screwed,
  219. * but all what can be done at this point is warn about it.
  220. */
  221. void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
  222. {
  223. unsigned int cpu;
  224. for_each_cpu(cpu, msk) {
  225. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  226. unsigned int bit, end = m->alloc_end;
  227. if (WARN_ON_ONCE(!cm->managed))
  228. continue;
  229. /* Get managed bit which are not allocated */
  230. bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
  231. bit = find_first_bit(m->scratch_map, end);
  232. if (WARN_ON_ONCE(bit >= end))
  233. continue;
  234. clear_bit(bit, cm->managed_map);
  235. cm->managed--;
  236. if (cm->online) {
  237. cm->available++;
  238. m->global_available++;
  239. }
  240. trace_irq_matrix_remove_managed(bit, cpu, m, cm);
  241. }
  242. }
  243. /**
  244. * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
  245. * @m: Matrix pointer
  246. * @msk: Which CPUs to search in
  247. * @mapped_cpu: Pointer to store the CPU for which the irq was allocated
  248. */
  249. int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
  250. unsigned int *mapped_cpu)
  251. {
  252. unsigned int bit, cpu, end;
  253. struct cpumap *cm;
  254. if (cpumask_empty(msk))
  255. return -EINVAL;
  256. cpu = matrix_find_best_cpu_managed(m, msk);
  257. if (cpu == UINT_MAX)
  258. return -ENOSPC;
  259. cm = per_cpu_ptr(m->maps, cpu);
  260. end = m->alloc_end;
  261. /* Get managed bit which are not allocated */
  262. bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
  263. bit = find_first_bit(m->scratch_map, end);
  264. if (bit >= end)
  265. return -ENOSPC;
  266. set_bit(bit, cm->alloc_map);
  267. cm->allocated++;
  268. cm->managed_allocated++;
  269. m->total_allocated++;
  270. *mapped_cpu = cpu;
  271. trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
  272. return bit;
  273. }
  274. /**
  275. * irq_matrix_assign - Assign a preallocated interrupt in the local CPU map
  276. * @m: Matrix pointer
  277. * @bit: Which bit to mark
  278. *
  279. * This should only be used to mark preallocated vectors
  280. */
  281. void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
  282. {
  283. struct cpumap *cm = this_cpu_ptr(m->maps);
  284. if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
  285. return;
  286. if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
  287. return;
  288. cm->allocated++;
  289. m->total_allocated++;
  290. cm->available--;
  291. m->global_available--;
  292. trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
  293. }
  294. /**
  295. * irq_matrix_reserve - Reserve interrupts
  296. * @m: Matrix pointer
  297. *
  298. * This is merely a book keeping call. It increments the number of globally
  299. * reserved interrupt bits w/o actually allocating them. This allows to
  300. * setup interrupt descriptors w/o assigning low level resources to it.
  301. * The actual allocation happens when the interrupt gets activated.
  302. */
  303. void irq_matrix_reserve(struct irq_matrix *m)
  304. {
  305. if (m->global_reserved == m->global_available)
  306. pr_warn("Interrupt reservation exceeds available resources\n");
  307. m->global_reserved++;
  308. trace_irq_matrix_reserve(m);
  309. }
  310. /**
  311. * irq_matrix_remove_reserved - Remove interrupt reservation
  312. * @m: Matrix pointer
  313. *
  314. * This is merely a book keeping call. It decrements the number of globally
  315. * reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
  316. * interrupt was never in use and a real vector allocated, which undid the
  317. * reservation.
  318. */
  319. void irq_matrix_remove_reserved(struct irq_matrix *m)
  320. {
  321. m->global_reserved--;
  322. trace_irq_matrix_remove_reserved(m);
  323. }
  324. /**
  325. * irq_matrix_alloc - Allocate a regular interrupt in a CPU map
  326. * @m: Matrix pointer
  327. * @msk: Which CPUs to search in
  328. * @reserved: Allocate previously reserved interrupts
  329. * @mapped_cpu: Pointer to store the CPU for which the irq was allocated
  330. */
  331. int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
  332. bool reserved, unsigned int *mapped_cpu)
  333. {
  334. unsigned int cpu, bit;
  335. struct cpumap *cm;
  336. /*
  337. * Not required in theory, but matrix_find_best_cpu() uses
  338. * for_each_cpu() which ignores the cpumask on UP .
  339. */
  340. if (cpumask_empty(msk))
  341. return -EINVAL;
  342. cpu = matrix_find_best_cpu(m, msk);
  343. if (cpu == UINT_MAX)
  344. return -ENOSPC;
  345. cm = per_cpu_ptr(m->maps, cpu);
  346. bit = matrix_alloc_area(m, cm, 1, false);
  347. if (bit >= m->alloc_end)
  348. return -ENOSPC;
  349. cm->allocated++;
  350. cm->available--;
  351. m->total_allocated++;
  352. m->global_available--;
  353. if (reserved)
  354. m->global_reserved--;
  355. *mapped_cpu = cpu;
  356. trace_irq_matrix_alloc(bit, cpu, m, cm);
  357. return bit;
  358. }
  359. /**
  360. * irq_matrix_free - Free allocated interrupt in the matrix
  361. * @m: Matrix pointer
  362. * @cpu: Which CPU map needs be updated
  363. * @bit: The bit to remove
  364. * @managed: If true, the interrupt is managed and not accounted
  365. * as available.
  366. */
  367. void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
  368. unsigned int bit, bool managed)
  369. {
  370. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  371. if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
  372. return;
  373. if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
  374. return;
  375. cm->allocated--;
  376. if(managed)
  377. cm->managed_allocated--;
  378. if (cm->online)
  379. m->total_allocated--;
  380. if (!managed) {
  381. cm->available++;
  382. if (cm->online)
  383. m->global_available++;
  384. }
  385. trace_irq_matrix_free(bit, cpu, m, cm);
  386. }
  387. /**
  388. * irq_matrix_available - Get the number of globally available irqs
  389. * @m: Pointer to the matrix to query
  390. * @cpudown: If true, the local CPU is about to go down, adjust
  391. * the number of available irqs accordingly
  392. */
  393. unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
  394. {
  395. struct cpumap *cm = this_cpu_ptr(m->maps);
  396. if (!cpudown)
  397. return m->global_available;
  398. return m->global_available - cm->available;
  399. }
  400. /**
  401. * irq_matrix_reserved - Get the number of globally reserved irqs
  402. * @m: Pointer to the matrix to query
  403. */
  404. unsigned int irq_matrix_reserved(struct irq_matrix *m)
  405. {
  406. return m->global_reserved;
  407. }
  408. /**
  409. * irq_matrix_allocated - Get the number of allocated non-managed irqs on the local CPU
  410. * @m: Pointer to the matrix to search
  411. *
  412. * This returns number of allocated non-managed interrupts.
  413. */
  414. unsigned int irq_matrix_allocated(struct irq_matrix *m)
  415. {
  416. struct cpumap *cm = this_cpu_ptr(m->maps);
  417. return cm->allocated - cm->managed_allocated;
  418. }
  419. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  420. /**
  421. * irq_matrix_debug_show - Show detailed allocation information
  422. * @sf: Pointer to the seq_file to print to
  423. * @m: Pointer to the matrix allocator
  424. * @ind: Indentation for the print format
  425. *
  426. * Note, this is a lockless snapshot.
  427. */
  428. void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
  429. {
  430. unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
  431. int cpu;
  432. seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
  433. seq_printf(sf, "Global available: %6u\n", m->global_available);
  434. seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
  435. seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
  436. seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
  437. m->system_map);
  438. seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " ");
  439. cpus_read_lock();
  440. for_each_online_cpu(cpu) {
  441. struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
  442. seq_printf(sf, "%*s %4d %4u %4u %4u %4u %*pbl\n", ind, " ",
  443. cpu, cm->available, cm->managed,
  444. cm->managed_allocated, cm->allocated,
  445. m->matrix_bits, cm->alloc_map);
  446. }
  447. cpus_read_unlock();
  448. }
  449. #endif