proc.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
  4. *
  5. * This file contains the /proc/irq/ handling code.
  6. */
  7. #include <linux/irq.h>
  8. #include <linux/gfp.h>
  9. #include <linux/proc_fs.h>
  10. #include <linux/seq_file.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/kernel_stat.h>
  13. #include <linux/mutex.h>
  14. #include "internals.h"
  15. /*
  16. * Access rules:
  17. *
  18. * procfs protects read/write of /proc/irq/N/ files against a
  19. * concurrent free of the interrupt descriptor. remove_proc_entry()
  20. * immediately prevents new read/writes to happen and waits for
  21. * already running read/write functions to complete.
  22. *
  23. * We remove the proc entries first and then delete the interrupt
  24. * descriptor from the radix tree and free it. So it is guaranteed
  25. * that irq_to_desc(N) is valid as long as the read/writes are
  26. * permitted by procfs.
  27. *
  28. * The read from /proc/interrupts is a different problem because there
  29. * is no protection. So the lookup and the access to irqdesc
  30. * information must be protected by sparse_irq_lock.
  31. */
  32. static struct proc_dir_entry *root_irq_dir;
  33. #ifdef CONFIG_SMP
  34. enum {
  35. AFFINITY,
  36. AFFINITY_LIST,
  37. EFFECTIVE,
  38. EFFECTIVE_LIST,
  39. };
  40. static int show_irq_affinity(int type, struct seq_file *m)
  41. {
  42. struct irq_desc *desc = irq_to_desc((long)m->private);
  43. const struct cpumask *mask;
  44. switch (type) {
  45. case AFFINITY:
  46. case AFFINITY_LIST:
  47. mask = desc->irq_common_data.affinity;
  48. if (irq_move_pending(&desc->irq_data))
  49. mask = irq_desc_get_pending_mask(desc);
  50. break;
  51. case EFFECTIVE:
  52. case EFFECTIVE_LIST:
  53. #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  54. mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
  55. break;
  56. #endif
  57. default:
  58. return -EINVAL;
  59. }
  60. switch (type) {
  61. case AFFINITY_LIST:
  62. case EFFECTIVE_LIST:
  63. seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
  64. break;
  65. case AFFINITY:
  66. case EFFECTIVE:
  67. seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
  68. break;
  69. }
  70. return 0;
  71. }
  72. static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
  73. {
  74. struct irq_desc *desc = irq_to_desc((long)m->private);
  75. unsigned long flags;
  76. cpumask_var_t mask;
  77. if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
  78. return -ENOMEM;
  79. raw_spin_lock_irqsave(&desc->lock, flags);
  80. if (desc->affinity_hint)
  81. cpumask_copy(mask, desc->affinity_hint);
  82. raw_spin_unlock_irqrestore(&desc->lock, flags);
  83. seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
  84. free_cpumask_var(mask);
  85. return 0;
  86. }
  87. int no_irq_affinity;
  88. static int irq_affinity_proc_show(struct seq_file *m, void *v)
  89. {
  90. return show_irq_affinity(AFFINITY, m);
  91. }
  92. static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
  93. {
  94. return show_irq_affinity(AFFINITY_LIST, m);
  95. }
  96. #ifndef CONFIG_AUTO_IRQ_AFFINITY
  97. static inline int irq_select_affinity_usr(unsigned int irq)
  98. {
  99. /*
  100. * If the interrupt is started up already then this fails. The
  101. * interrupt is assigned to an online CPU already. There is no
  102. * point to move it around randomly. Tell user space that the
  103. * selected mask is bogus.
  104. *
  105. * If not then any change to the affinity is pointless because the
  106. * startup code invokes irq_setup_affinity() which will select
  107. * a online CPU anyway.
  108. */
  109. return -EINVAL;
  110. }
  111. #else
  112. /* ALPHA magic affinity auto selector. Keep it for historical reasons. */
  113. static inline int irq_select_affinity_usr(unsigned int irq)
  114. {
  115. return irq_select_affinity(irq);
  116. }
  117. #endif
  118. static ssize_t write_irq_affinity(int type, struct file *file,
  119. const char __user *buffer, size_t count, loff_t *pos)
  120. {
  121. unsigned int irq = (int)(long)pde_data(file_inode(file));
  122. cpumask_var_t new_value;
  123. int err;
  124. if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
  125. return -EPERM;
  126. if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
  127. return -ENOMEM;
  128. if (type)
  129. err = cpumask_parselist_user(buffer, count, new_value);
  130. else
  131. err = cpumask_parse_user(buffer, count, new_value);
  132. if (err)
  133. goto free_cpumask;
  134. /*
  135. * Do not allow disabling IRQs completely - it's a too easy
  136. * way to make the system unusable accidentally :-) At least
  137. * one online CPU still has to be targeted.
  138. */
  139. if (!cpumask_intersects(new_value, cpu_online_mask)) {
  140. /*
  141. * Special case for empty set - allow the architecture code
  142. * to set default SMP affinity.
  143. */
  144. err = irq_select_affinity_usr(irq) ? -EINVAL : count;
  145. } else {
  146. err = irq_set_affinity(irq, new_value);
  147. if (!err)
  148. err = count;
  149. }
  150. free_cpumask:
  151. free_cpumask_var(new_value);
  152. return err;
  153. }
  154. static ssize_t irq_affinity_proc_write(struct file *file,
  155. const char __user *buffer, size_t count, loff_t *pos)
  156. {
  157. return write_irq_affinity(0, file, buffer, count, pos);
  158. }
  159. static ssize_t irq_affinity_list_proc_write(struct file *file,
  160. const char __user *buffer, size_t count, loff_t *pos)
  161. {
  162. return write_irq_affinity(1, file, buffer, count, pos);
  163. }
  164. static int irq_affinity_proc_open(struct inode *inode, struct file *file)
  165. {
  166. return single_open(file, irq_affinity_proc_show, pde_data(inode));
  167. }
  168. static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
  169. {
  170. return single_open(file, irq_affinity_list_proc_show, pde_data(inode));
  171. }
  172. static const struct proc_ops irq_affinity_proc_ops = {
  173. .proc_open = irq_affinity_proc_open,
  174. .proc_read = seq_read,
  175. .proc_lseek = seq_lseek,
  176. .proc_release = single_release,
  177. .proc_write = irq_affinity_proc_write,
  178. };
  179. static const struct proc_ops irq_affinity_list_proc_ops = {
  180. .proc_open = irq_affinity_list_proc_open,
  181. .proc_read = seq_read,
  182. .proc_lseek = seq_lseek,
  183. .proc_release = single_release,
  184. .proc_write = irq_affinity_list_proc_write,
  185. };
  186. #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  187. static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
  188. {
  189. return show_irq_affinity(EFFECTIVE, m);
  190. }
  191. static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
  192. {
  193. return show_irq_affinity(EFFECTIVE_LIST, m);
  194. }
  195. #endif
  196. static int default_affinity_show(struct seq_file *m, void *v)
  197. {
  198. seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
  199. return 0;
  200. }
  201. static ssize_t default_affinity_write(struct file *file,
  202. const char __user *buffer, size_t count, loff_t *ppos)
  203. {
  204. cpumask_var_t new_value;
  205. int err;
  206. if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
  207. return -ENOMEM;
  208. err = cpumask_parse_user(buffer, count, new_value);
  209. if (err)
  210. goto out;
  211. /*
  212. * Do not allow disabling IRQs completely - it's a too easy
  213. * way to make the system unusable accidentally :-) At least
  214. * one online CPU still has to be targeted.
  215. */
  216. if (!cpumask_intersects(new_value, cpu_online_mask)) {
  217. err = -EINVAL;
  218. goto out;
  219. }
  220. cpumask_copy(irq_default_affinity, new_value);
  221. err = count;
  222. out:
  223. free_cpumask_var(new_value);
  224. return err;
  225. }
  226. static int default_affinity_open(struct inode *inode, struct file *file)
  227. {
  228. return single_open(file, default_affinity_show, pde_data(inode));
  229. }
  230. static const struct proc_ops default_affinity_proc_ops = {
  231. .proc_open = default_affinity_open,
  232. .proc_read = seq_read,
  233. .proc_lseek = seq_lseek,
  234. .proc_release = single_release,
  235. .proc_write = default_affinity_write,
  236. };
  237. static int irq_node_proc_show(struct seq_file *m, void *v)
  238. {
  239. struct irq_desc *desc = irq_to_desc((long) m->private);
  240. seq_printf(m, "%d\n", irq_desc_get_node(desc));
  241. return 0;
  242. }
  243. #endif
  244. static int irq_spurious_proc_show(struct seq_file *m, void *v)
  245. {
  246. struct irq_desc *desc = irq_to_desc((long) m->private);
  247. seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
  248. desc->irq_count, desc->irqs_unhandled,
  249. jiffies_to_msecs(desc->last_unhandled));
  250. return 0;
  251. }
  252. #define MAX_NAMELEN 128
  253. static int name_unique(unsigned int irq, struct irqaction *new_action)
  254. {
  255. struct irq_desc *desc = irq_to_desc(irq);
  256. struct irqaction *action;
  257. unsigned long flags;
  258. int ret = 1;
  259. raw_spin_lock_irqsave(&desc->lock, flags);
  260. for_each_action_of_desc(desc, action) {
  261. if ((action != new_action) && action->name &&
  262. !strcmp(new_action->name, action->name)) {
  263. ret = 0;
  264. break;
  265. }
  266. }
  267. raw_spin_unlock_irqrestore(&desc->lock, flags);
  268. return ret;
  269. }
  270. void register_handler_proc(unsigned int irq, struct irqaction *action)
  271. {
  272. char name [MAX_NAMELEN];
  273. struct irq_desc *desc = irq_to_desc(irq);
  274. if (!desc->dir || action->dir || !action->name ||
  275. !name_unique(irq, action))
  276. return;
  277. snprintf(name, MAX_NAMELEN, "%s", action->name);
  278. /* create /proc/irq/1234/handler/ */
  279. action->dir = proc_mkdir(name, desc->dir);
  280. }
  281. #undef MAX_NAMELEN
  282. #define MAX_NAMELEN 10
  283. void register_irq_proc(unsigned int irq, struct irq_desc *desc)
  284. {
  285. static DEFINE_MUTEX(register_lock);
  286. void __maybe_unused *irqp = (void *)(unsigned long) irq;
  287. char name [MAX_NAMELEN];
  288. if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
  289. return;
  290. /*
  291. * irq directories are registered only when a handler is
  292. * added, not when the descriptor is created, so multiple
  293. * tasks might try to register at the same time.
  294. */
  295. mutex_lock(&register_lock);
  296. if (desc->dir)
  297. goto out_unlock;
  298. sprintf(name, "%d", irq);
  299. /* create /proc/irq/1234 */
  300. desc->dir = proc_mkdir(name, root_irq_dir);
  301. if (!desc->dir)
  302. goto out_unlock;
  303. #ifdef CONFIG_SMP
  304. umode_t umode = S_IRUGO;
  305. if (irq_can_set_affinity_usr(desc->irq_data.irq))
  306. umode |= S_IWUSR;
  307. /* create /proc/irq/<irq>/smp_affinity */
  308. proc_create_data("smp_affinity", umode, desc->dir,
  309. &irq_affinity_proc_ops, irqp);
  310. /* create /proc/irq/<irq>/affinity_hint */
  311. proc_create_single_data("affinity_hint", 0444, desc->dir,
  312. irq_affinity_hint_proc_show, irqp);
  313. /* create /proc/irq/<irq>/smp_affinity_list */
  314. proc_create_data("smp_affinity_list", umode, desc->dir,
  315. &irq_affinity_list_proc_ops, irqp);
  316. proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
  317. irqp);
  318. # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  319. proc_create_single_data("effective_affinity", 0444, desc->dir,
  320. irq_effective_aff_proc_show, irqp);
  321. proc_create_single_data("effective_affinity_list", 0444, desc->dir,
  322. irq_effective_aff_list_proc_show, irqp);
  323. # endif
  324. #endif
  325. proc_create_single_data("spurious", 0444, desc->dir,
  326. irq_spurious_proc_show, (void *)(long)irq);
  327. out_unlock:
  328. mutex_unlock(&register_lock);
  329. }
  330. void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
  331. {
  332. char name [MAX_NAMELEN];
  333. if (!root_irq_dir || !desc->dir)
  334. return;
  335. #ifdef CONFIG_SMP
  336. remove_proc_entry("smp_affinity", desc->dir);
  337. remove_proc_entry("affinity_hint", desc->dir);
  338. remove_proc_entry("smp_affinity_list", desc->dir);
  339. remove_proc_entry("node", desc->dir);
  340. # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  341. remove_proc_entry("effective_affinity", desc->dir);
  342. remove_proc_entry("effective_affinity_list", desc->dir);
  343. # endif
  344. #endif
  345. remove_proc_entry("spurious", desc->dir);
  346. sprintf(name, "%u", irq);
  347. remove_proc_entry(name, root_irq_dir);
  348. }
  349. #undef MAX_NAMELEN
  350. void unregister_handler_proc(unsigned int irq, struct irqaction *action)
  351. {
  352. proc_remove(action->dir);
  353. }
  354. static void register_default_affinity_proc(void)
  355. {
  356. #ifdef CONFIG_SMP
  357. proc_create("irq/default_smp_affinity", 0644, NULL,
  358. &default_affinity_proc_ops);
  359. #endif
  360. }
  361. void init_irq_proc(void)
  362. {
  363. unsigned int irq;
  364. struct irq_desc *desc;
  365. /* create /proc/irq */
  366. root_irq_dir = proc_mkdir("irq", NULL);
  367. if (!root_irq_dir)
  368. return;
  369. register_default_affinity_proc();
  370. /*
  371. * Create entries for all existing IRQs.
  372. */
  373. for_each_irq_desc(irq, desc)
  374. register_irq_proc(irq, desc);
  375. }
  376. #ifdef CONFIG_GENERIC_IRQ_SHOW
  377. int __weak arch_show_interrupts(struct seq_file *p, int prec)
  378. {
  379. return 0;
  380. }
  381. #ifndef ACTUAL_NR_IRQS
  382. # define ACTUAL_NR_IRQS nr_irqs
  383. #endif
  384. int show_interrupts(struct seq_file *p, void *v)
  385. {
  386. static int prec;
  387. int i = *(loff_t *) v, j;
  388. struct irqaction *action;
  389. struct irq_desc *desc;
  390. unsigned long flags;
  391. if (i > ACTUAL_NR_IRQS)
  392. return 0;
  393. if (i == ACTUAL_NR_IRQS)
  394. return arch_show_interrupts(p, prec);
  395. /* print header and calculate the width of the first column */
  396. if (i == 0) {
  397. for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
  398. j *= 10;
  399. seq_printf(p, "%*s", prec + 8, "");
  400. for_each_online_cpu(j)
  401. seq_printf(p, "CPU%-8d", j);
  402. seq_putc(p, '\n');
  403. }
  404. rcu_read_lock();
  405. desc = irq_to_desc(i);
  406. if (!desc || irq_settings_is_hidden(desc))
  407. goto outsparse;
  408. if (!desc->action || irq_desc_is_chained(desc) || !desc->kstat_irqs)
  409. goto outsparse;
  410. seq_printf(p, "%*d: ", prec, i);
  411. for_each_online_cpu(j)
  412. seq_printf(p, "%10u ", desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, j) : 0);
  413. raw_spin_lock_irqsave(&desc->lock, flags);
  414. if (desc->irq_data.chip) {
  415. if (desc->irq_data.chip->irq_print_chip)
  416. desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
  417. else if (desc->irq_data.chip->name)
  418. seq_printf(p, " %8s", desc->irq_data.chip->name);
  419. else
  420. seq_printf(p, " %8s", "-");
  421. } else {
  422. seq_printf(p, " %8s", "None");
  423. }
  424. if (desc->irq_data.domain)
  425. seq_printf(p, " %*lu", prec, desc->irq_data.hwirq);
  426. else
  427. seq_printf(p, " %*s", prec, "");
  428. #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
  429. seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
  430. #endif
  431. if (desc->name)
  432. seq_printf(p, "-%-8s", desc->name);
  433. action = desc->action;
  434. if (action) {
  435. seq_printf(p, " %s", action->name);
  436. while ((action = action->next) != NULL)
  437. seq_printf(p, ", %s", action->name);
  438. }
  439. seq_putc(p, '\n');
  440. raw_spin_unlock_irqrestore(&desc->lock, flags);
  441. outsparse:
  442. rcu_read_unlock();
  443. return 0;
  444. }
  445. #endif