irq.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2012 Regents of the University of California
  4. * Copyright (C) 2017 SiFive
  5. * Copyright (C) 2018 Christoph Hellwig
  6. */
  7. #include <linux/interrupt.h>
  8. #include <linux/irqchip.h>
  9. #include <linux/irqdomain.h>
  10. #include <linux/module.h>
  11. #include <linux/scs.h>
  12. #include <linux/seq_file.h>
  13. #include <asm/sbi.h>
  14. #include <asm/smp.h>
  15. #include <asm/softirq_stack.h>
  16. #include <asm/stacktrace.h>
  17. static struct fwnode_handle *(*__get_intc_node)(void);
  18. void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void))
  19. {
  20. __get_intc_node = fn;
  21. }
  22. struct fwnode_handle *riscv_get_intc_hwnode(void)
  23. {
  24. if (__get_intc_node)
  25. return __get_intc_node();
  26. return NULL;
  27. }
  28. EXPORT_SYMBOL_GPL(riscv_get_intc_hwnode);
  29. #ifdef CONFIG_IRQ_STACKS
  30. #include <asm/irq_stack.h>
  31. DECLARE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
  32. #ifdef CONFIG_SHADOW_CALL_STACK
  33. DEFINE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
  34. #endif
  35. static void init_irq_scs(void)
  36. {
  37. int cpu;
  38. if (!scs_is_enabled())
  39. return;
  40. for_each_possible_cpu(cpu)
  41. per_cpu(irq_shadow_call_stack_ptr, cpu) =
  42. scs_alloc(cpu_to_node(cpu));
  43. }
  44. DEFINE_PER_CPU(ulong *, irq_stack_ptr);
  45. #ifdef CONFIG_VMAP_STACK
  46. static void init_irq_stacks(void)
  47. {
  48. int cpu;
  49. ulong *p;
  50. for_each_possible_cpu(cpu) {
  51. p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
  52. per_cpu(irq_stack_ptr, cpu) = p;
  53. }
  54. }
  55. #else
  56. /* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
  57. DEFINE_PER_CPU_ALIGNED(ulong [IRQ_STACK_SIZE/sizeof(ulong)], irq_stack);
  58. static void init_irq_stacks(void)
  59. {
  60. int cpu;
  61. for_each_possible_cpu(cpu)
  62. per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
  63. }
  64. #endif /* CONFIG_VMAP_STACK */
  65. #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
  66. static void ___do_softirq(struct pt_regs *regs)
  67. {
  68. __do_softirq();
  69. }
  70. void do_softirq_own_stack(void)
  71. {
  72. if (on_thread_stack())
  73. call_on_irq_stack(NULL, ___do_softirq);
  74. else
  75. __do_softirq();
  76. }
  77. #endif /* CONFIG_SOFTIRQ_ON_OWN_STACK */
  78. #else
  79. static void init_irq_scs(void) {}
  80. static void init_irq_stacks(void) {}
  81. #endif /* CONFIG_IRQ_STACKS */
  82. int arch_show_interrupts(struct seq_file *p, int prec)
  83. {
  84. show_ipi_stats(p, prec);
  85. return 0;
  86. }
  87. void __init init_IRQ(void)
  88. {
  89. init_irq_scs();
  90. init_irq_stacks();
  91. irqchip_init();
  92. if (!handle_arch_irq)
  93. panic("No interrupt controller found.");
  94. sbi_ipi_init();
  95. }