ip27-irq.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
  4. *
  5. * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
  6. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  7. * Copyright (C) 1999 - 2001 Kanoj Sarcar
  8. */
  9. #include <linux/interrupt.h>
  10. #include <linux/irq.h>
  11. #include <linux/irqdomain.h>
  12. #include <linux/ioport.h>
  13. #include <linux/kernel.h>
  14. #include <linux/bitops.h>
  15. #include <linux/sched.h>
  16. #include <asm/io.h>
  17. #include <asm/irq_cpu.h>
  18. #include <asm/sn/addrs.h>
  19. #include <asm/sn/agent.h>
  20. #include <asm/sn/arch.h>
  21. #include <asm/sn/intr.h>
  22. #include <asm/sn/irq_alloc.h>
  23. #include "ip27-common.h"
  24. struct hub_irq_data {
  25. u64 *irq_mask[2];
  26. cpuid_t cpu;
  27. };
  28. static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT);
  29. static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask);
  30. static inline int alloc_level(void)
  31. {
  32. int level;
  33. again:
  34. level = find_first_zero_bit(hub_irq_map, IP27_HUB_IRQ_COUNT);
  35. if (level >= IP27_HUB_IRQ_COUNT)
  36. return -ENOSPC;
  37. if (test_and_set_bit(level, hub_irq_map))
  38. goto again;
  39. return level;
  40. }
  41. static void enable_hub_irq(struct irq_data *d)
  42. {
  43. struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
  44. unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
  45. set_bit(d->hwirq, mask);
  46. __raw_writeq(mask[0], hd->irq_mask[0]);
  47. __raw_writeq(mask[1], hd->irq_mask[1]);
  48. }
  49. static void disable_hub_irq(struct irq_data *d)
  50. {
  51. struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
  52. unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
  53. clear_bit(d->hwirq, mask);
  54. __raw_writeq(mask[0], hd->irq_mask[0]);
  55. __raw_writeq(mask[1], hd->irq_mask[1]);
  56. }
  57. static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
  58. {
  59. nasid_t nasid;
  60. int cpu;
  61. cpu = cpumask_first_and(mask, cpu_online_mask);
  62. if (cpu >= nr_cpu_ids)
  63. cpu = cpumask_any(cpu_online_mask);
  64. nasid = cpu_to_node(cpu);
  65. hd->cpu = cpu;
  66. if (!cputoslice(cpu)) {
  67. hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A);
  68. hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A);
  69. } else {
  70. hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
  71. hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
  72. }
  73. }
  74. static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
  75. bool force)
  76. {
  77. struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
  78. if (!hd)
  79. return -EINVAL;
  80. if (irqd_is_started(d))
  81. disable_hub_irq(d);
  82. setup_hub_mask(hd, mask);
  83. if (irqd_is_started(d))
  84. enable_hub_irq(d);
  85. irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
  86. return 0;
  87. }
  88. static struct irq_chip hub_irq_type = {
  89. .name = "HUB",
  90. .irq_mask = disable_hub_irq,
  91. .irq_unmask = enable_hub_irq,
  92. .irq_set_affinity = set_affinity_hub_irq,
  93. };
  94. static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
  95. unsigned int nr_irqs, void *arg)
  96. {
  97. struct irq_alloc_info *info = arg;
  98. struct hub_irq_data *hd;
  99. struct hub_data *hub;
  100. struct irq_desc *desc;
  101. int swlevel;
  102. if (nr_irqs > 1 || !info)
  103. return -EINVAL;
  104. hd = kzalloc(sizeof(*hd), GFP_KERNEL);
  105. if (!hd)
  106. return -ENOMEM;
  107. swlevel = alloc_level();
  108. if (unlikely(swlevel < 0)) {
  109. kfree(hd);
  110. return -EAGAIN;
  111. }
  112. irq_domain_set_info(domain, virq, swlevel, &hub_irq_type, hd,
  113. handle_level_irq, NULL, NULL);
  114. /* use CPU connected to nearest hub */
  115. hub = hub_data(info->nasid);
  116. setup_hub_mask(hd, &hub->h_cpus);
  117. info->nasid = cpu_to_node(hd->cpu);
  118. /* Make sure it's not already pending when we connect it. */
  119. REMOTE_HUB_CLR_INTR(info->nasid, swlevel);
  120. desc = irq_to_desc(virq);
  121. desc->irq_common_data.node = info->nasid;
  122. cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus);
  123. return 0;
  124. }
  125. static void hub_domain_free(struct irq_domain *domain,
  126. unsigned int virq, unsigned int nr_irqs)
  127. {
  128. struct irq_data *irqd;
  129. if (nr_irqs > 1)
  130. return;
  131. irqd = irq_domain_get_irq_data(domain, virq);
  132. if (irqd && irqd->chip_data)
  133. kfree(irqd->chip_data);
  134. }
  135. static const struct irq_domain_ops hub_domain_ops = {
  136. .alloc = hub_domain_alloc,
  137. .free = hub_domain_free,
  138. };
  139. /*
  140. * This code is unnecessarily complex, because we do
  141. * intr enabling. Basically, once we grab the set of intrs we need
  142. * to service, we must mask _all_ these interrupts; firstly, to make
  143. * sure the same intr does not intr again, causing recursion that
  144. * can lead to stack overflow. Secondly, we can not just mask the
  145. * one intr we are do_IRQing, because the non-masked intrs in the
  146. * first set might intr again, causing multiple servicings of the
  147. * same intr. This effect is mostly seen for intercpu intrs.
  148. * Kanoj 05.13.00
  149. */
  150. static void ip27_do_irq_mask0(struct irq_desc *desc)
  151. {
  152. cpuid_t cpu = smp_processor_id();
  153. unsigned long *mask = per_cpu(irq_enable_mask, cpu);
  154. struct irq_domain *domain;
  155. u64 pend0;
  156. int ret;
  157. /* copied from Irix intpend0() */
  158. pend0 = LOCAL_HUB_L(PI_INT_PEND0);
  159. pend0 &= mask[0]; /* Pick intrs we should look at */
  160. if (!pend0)
  161. return;
  162. #ifdef CONFIG_SMP
  163. if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
  164. LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
  165. scheduler_ipi();
  166. } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
  167. LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
  168. scheduler_ipi();
  169. } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
  170. LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
  171. generic_smp_call_function_interrupt();
  172. } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
  173. LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
  174. generic_smp_call_function_interrupt();
  175. } else
  176. #endif
  177. {
  178. domain = irq_desc_get_handler_data(desc);
  179. ret = generic_handle_domain_irq(domain, __ffs(pend0));
  180. if (ret)
  181. spurious_interrupt();
  182. }
  183. LOCAL_HUB_L(PI_INT_PEND0);
  184. }
  185. static void ip27_do_irq_mask1(struct irq_desc *desc)
  186. {
  187. cpuid_t cpu = smp_processor_id();
  188. unsigned long *mask = per_cpu(irq_enable_mask, cpu);
  189. struct irq_domain *domain;
  190. u64 pend1;
  191. int ret;
  192. /* copied from Irix intpend0() */
  193. pend1 = LOCAL_HUB_L(PI_INT_PEND1);
  194. pend1 &= mask[1]; /* Pick intrs we should look at */
  195. if (!pend1)
  196. return;
  197. domain = irq_desc_get_handler_data(desc);
  198. ret = generic_handle_domain_irq(domain, __ffs(pend1) + 64);
  199. if (ret)
  200. spurious_interrupt();
  201. LOCAL_HUB_L(PI_INT_PEND1);
  202. }
  203. void install_ipi(void)
  204. {
  205. int cpu = smp_processor_id();
  206. unsigned long *mask = per_cpu(irq_enable_mask, cpu);
  207. int slice = LOCAL_HUB_L(PI_CPU_NUM);
  208. int resched, call;
  209. resched = CPU_RESCHED_A_IRQ + slice;
  210. set_bit(resched, mask);
  211. LOCAL_HUB_CLR_INTR(resched);
  212. call = CPU_CALL_A_IRQ + slice;
  213. set_bit(call, mask);
  214. LOCAL_HUB_CLR_INTR(call);
  215. if (slice == 0) {
  216. LOCAL_HUB_S(PI_INT_MASK0_A, mask[0]);
  217. LOCAL_HUB_S(PI_INT_MASK1_A, mask[1]);
  218. } else {
  219. LOCAL_HUB_S(PI_INT_MASK0_B, mask[0]);
  220. LOCAL_HUB_S(PI_INT_MASK1_B, mask[1]);
  221. }
  222. }
  223. void __init arch_init_irq(void)
  224. {
  225. struct irq_domain *domain;
  226. struct fwnode_handle *fn;
  227. mips_cpu_irq_init();
  228. /*
  229. * Some interrupts are reserved by hardware or by software convention.
  230. * Mark these as reserved right away so they won't be used accidentally
  231. * later.
  232. */
  233. bitmap_set(hub_irq_map, 0, CPU_CALL_B_IRQ + 1);
  234. bitmap_set(hub_irq_map, NI_BRDCAST_ERR_A, MSC_PANIC_INTR - NI_BRDCAST_ERR_A + 1);
  235. fn = irq_domain_alloc_named_fwnode("HUB");
  236. if (WARN_ON(fn == NULL))
  237. return;
  238. domain = irq_domain_create_linear(fn, IP27_HUB_IRQ_COUNT,
  239. &hub_domain_ops, NULL);
  240. if (WARN_ON(domain == NULL))
  241. return;
  242. irq_set_default_host(domain);
  243. irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
  244. irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0,
  245. domain);
  246. irq_set_percpu_devid(IP27_HUB_PEND1_IRQ);
  247. irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1,
  248. domain);
  249. }