irq-armada-370-xp.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714
  1. /*
  2. * Marvell Armada 370 and Armada XP SoC IRQ handling
  3. *
  4. * Copyright (C) 2012 Marvell
  5. *
  6. * Lior Amsalem <alior@marvell.com>
  7. * Gregory CLEMENT <gregory.clement@free-electrons.com>
  8. * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
  9. * Ben Dooks <ben.dooks@codethink.co.uk>
  10. *
  11. * This file is licensed under the terms of the GNU General Public
  12. * License version 2. This program is licensed "as is" without any
  13. * warranty of any kind, whether express or implied.
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/init.h>
  18. #include <linux/irq.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/irqchip.h>
  21. #include <linux/irqchip/chained_irq.h>
  22. #include <linux/cpu.h>
  23. #include <linux/io.h>
  24. #include <linux/of_address.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/of_pci.h>
  27. #include <linux/irqdomain.h>
  28. #include <linux/slab.h>
  29. #include <linux/syscore_ops.h>
  30. #include <linux/msi.h>
  31. #include <asm/mach/arch.h>
  32. #include <asm/exception.h>
  33. #include <asm/smp_plat.h>
  34. #include <asm/mach/irq.h>
  35. /*
  36. * Overall diagram of the Armada XP interrupt controller:
  37. *
  38. * To CPU 0 To CPU 1
  39. *
  40. * /\ /\
  41. * || ||
  42. * +---------------+ +---------------+
  43. * | | | |
  44. * | per-CPU | | per-CPU |
  45. * | mask/unmask | | mask/unmask |
  46. * | CPU0 | | CPU1 |
  47. * | | | |
  48. * +---------------+ +---------------+
  49. * /\ /\
  50. * || ||
  51. * \\_______________________//
  52. * ||
  53. * +-------------------+
  54. * | |
  55. * | Global interrupt |
  56. * | mask/unmask |
  57. * | |
  58. * +-------------------+
  59. * /\
  60. * ||
  61. * interrupt from
  62. * device
  63. *
  64. * The "global interrupt mask/unmask" is modified using the
  65. * ARMADA_370_XP_INT_SET_ENABLE_OFFS and
  66. * ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS registers, which are relative
  67. * to "main_int_base".
  68. *
  69. * The "per-CPU mask/unmask" is modified using the
  70. * ARMADA_370_XP_INT_SET_MASK_OFFS and
  71. * ARMADA_370_XP_INT_CLEAR_MASK_OFFS registers, which are relative to
  72. * "per_cpu_int_base". This base address points to a special address,
  73. * which automatically accesses the registers of the current CPU.
  74. *
  75. * The per-CPU mask/unmask can also be adjusted using the global
  76. * per-interrupt ARMADA_370_XP_INT_SOURCE_CTL register, which we use
  77. * to configure interrupt affinity.
  78. *
  79. * Due to this model, all interrupts need to be mask/unmasked at two
  80. * different levels: at the global level and at the per-CPU level.
  81. *
  82. * This driver takes the following approach to deal with this:
  83. *
  84. * - For global interrupts:
  85. *
  86. * At ->map() time, a global interrupt is unmasked at the per-CPU
  87. * mask/unmask level. It is therefore unmasked at this level for
  88. * the current CPU, running the ->map() code. This allows to have
  89. * the interrupt unmasked at this level in non-SMP
  90. * configurations. In SMP configurations, the ->set_affinity()
  91. * callback is called, which using the
  92. * ARMADA_370_XP_INT_SOURCE_CTL() readjusts the per-CPU mask/unmask
  93. * for the interrupt.
  94. *
  95. * The ->mask() and ->unmask() operations only mask/unmask the
  96. * interrupt at the "global" level.
  97. *
  98. * So, a global interrupt is enabled at the per-CPU level as soon
  99. * as it is mapped. At run time, the masking/unmasking takes place
  100. * at the global level.
  101. *
  102. * - For per-CPU interrupts
  103. *
  104. * At ->map() time, a per-CPU interrupt is unmasked at the global
  105. * mask/unmask level.
  106. *
  107. * The ->mask() and ->unmask() operations mask/unmask the interrupt
  108. * at the per-CPU level.
  109. *
  110. * So, a per-CPU interrupt is enabled at the global level as soon
  111. * as it is mapped. At run time, the masking/unmasking takes place
  112. * at the per-CPU level.
  113. */
  114. /* Registers relative to main_int_base */
  115. #define ARMADA_370_XP_INT_CONTROL (0x00)
  116. #define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x04)
  117. #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
  118. #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
  119. #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
  120. #define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
  121. #define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid)
  122. /* Registers relative to per_cpu_int_base */
  123. #define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x08)
  124. #define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0x0c)
  125. #define ARMADA_375_PPI_CAUSE (0x10)
  126. #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
  127. #define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
  128. #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
  129. #define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54)
  130. #define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu)
  131. #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
  132. #define IPI_DOORBELL_START (0)
  133. #define IPI_DOORBELL_END (8)
  134. #define IPI_DOORBELL_MASK 0xFF
  135. #define PCI_MSI_DOORBELL_START (16)
  136. #define PCI_MSI_DOORBELL_NR (16)
  137. #define PCI_MSI_DOORBELL_END (32)
  138. #define PCI_MSI_DOORBELL_MASK 0xFFFF0000
  139. static void __iomem *per_cpu_int_base;
  140. static void __iomem *main_int_base;
  141. static struct irq_domain *armada_370_xp_mpic_domain;
  142. static u32 doorbell_mask_reg;
  143. static int parent_irq;
  144. #ifdef CONFIG_PCI_MSI
  145. static struct irq_domain *armada_370_xp_msi_domain;
  146. static struct irq_domain *armada_370_xp_msi_inner_domain;
  147. static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
  148. static DEFINE_MUTEX(msi_used_lock);
  149. static phys_addr_t msi_doorbell_addr;
  150. #endif
  151. static inline bool is_percpu_irq(irq_hw_number_t irq)
  152. {
  153. if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
  154. return true;
  155. return false;
  156. }
  157. /*
  158. * In SMP mode:
  159. * For shared global interrupts, mask/unmask global enable bit
  160. * For CPU interrupts, mask/unmask the calling CPU's bit
  161. */
  162. static void armada_370_xp_irq_mask(struct irq_data *d)
  163. {
  164. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  165. if (!is_percpu_irq(hwirq))
  166. writel(hwirq, main_int_base +
  167. ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
  168. else
  169. writel(hwirq, per_cpu_int_base +
  170. ARMADA_370_XP_INT_SET_MASK_OFFS);
  171. }
  172. static void armada_370_xp_irq_unmask(struct irq_data *d)
  173. {
  174. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  175. if (!is_percpu_irq(hwirq))
  176. writel(hwirq, main_int_base +
  177. ARMADA_370_XP_INT_SET_ENABLE_OFFS);
  178. else
  179. writel(hwirq, per_cpu_int_base +
  180. ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  181. }
  182. #ifdef CONFIG_PCI_MSI
  183. static struct irq_chip armada_370_xp_msi_irq_chip = {
  184. .name = "MPIC MSI",
  185. .irq_mask = pci_msi_mask_irq,
  186. .irq_unmask = pci_msi_unmask_irq,
  187. };
  188. static struct msi_domain_info armada_370_xp_msi_domain_info = {
  189. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  190. MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
  191. .chip = &armada_370_xp_msi_irq_chip,
  192. };
  193. static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  194. {
  195. msg->address_lo = lower_32_bits(msi_doorbell_addr);
  196. msg->address_hi = upper_32_bits(msi_doorbell_addr);
  197. msg->data = 0xf00 | (data->hwirq + PCI_MSI_DOORBELL_START);
  198. }
  199. static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
  200. const struct cpumask *mask, bool force)
  201. {
  202. return -EINVAL;
  203. }
  204. static struct irq_chip armada_370_xp_msi_bottom_irq_chip = {
  205. .name = "MPIC MSI",
  206. .irq_compose_msi_msg = armada_370_xp_compose_msi_msg,
  207. .irq_set_affinity = armada_370_xp_msi_set_affinity,
  208. };
  209. static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
  210. unsigned int nr_irqs, void *args)
  211. {
  212. int hwirq, i;
  213. mutex_lock(&msi_used_lock);
  214. hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
  215. 0, nr_irqs, 0);
  216. if (hwirq >= PCI_MSI_DOORBELL_NR) {
  217. mutex_unlock(&msi_used_lock);
  218. return -ENOSPC;
  219. }
  220. bitmap_set(msi_used, hwirq, nr_irqs);
  221. mutex_unlock(&msi_used_lock);
  222. for (i = 0; i < nr_irqs; i++) {
  223. irq_domain_set_info(domain, virq + i, hwirq + i,
  224. &armada_370_xp_msi_bottom_irq_chip,
  225. domain->host_data, handle_simple_irq,
  226. NULL, NULL);
  227. }
  228. return hwirq;
  229. }
  230. static void armada_370_xp_msi_free(struct irq_domain *domain,
  231. unsigned int virq, unsigned int nr_irqs)
  232. {
  233. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  234. mutex_lock(&msi_used_lock);
  235. bitmap_clear(msi_used, d->hwirq, nr_irqs);
  236. mutex_unlock(&msi_used_lock);
  237. }
  238. static const struct irq_domain_ops armada_370_xp_msi_domain_ops = {
  239. .alloc = armada_370_xp_msi_alloc,
  240. .free = armada_370_xp_msi_free,
  241. };
  242. static int armada_370_xp_msi_init(struct device_node *node,
  243. phys_addr_t main_int_phys_base)
  244. {
  245. u32 reg;
  246. msi_doorbell_addr = main_int_phys_base +
  247. ARMADA_370_XP_SW_TRIG_INT_OFFS;
  248. armada_370_xp_msi_inner_domain =
  249. irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
  250. &armada_370_xp_msi_domain_ops, NULL);
  251. if (!armada_370_xp_msi_inner_domain)
  252. return -ENOMEM;
  253. armada_370_xp_msi_domain =
  254. pci_msi_create_irq_domain(of_node_to_fwnode(node),
  255. &armada_370_xp_msi_domain_info,
  256. armada_370_xp_msi_inner_domain);
  257. if (!armada_370_xp_msi_domain) {
  258. irq_domain_remove(armada_370_xp_msi_inner_domain);
  259. return -ENOMEM;
  260. }
  261. reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
  262. | PCI_MSI_DOORBELL_MASK;
  263. writel(reg, per_cpu_int_base +
  264. ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
  265. /* Unmask IPI interrupt */
  266. writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  267. return 0;
  268. }
  269. #else
  270. static inline int armada_370_xp_msi_init(struct device_node *node,
  271. phys_addr_t main_int_phys_base)
  272. {
  273. return 0;
  274. }
  275. #endif
  276. #ifdef CONFIG_SMP
  277. static DEFINE_RAW_SPINLOCK(irq_controller_lock);
  278. static int armada_xp_set_affinity(struct irq_data *d,
  279. const struct cpumask *mask_val, bool force)
  280. {
  281. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  282. unsigned long reg, mask;
  283. int cpu;
  284. /* Select a single core from the affinity mask which is online */
  285. cpu = cpumask_any_and(mask_val, cpu_online_mask);
  286. mask = 1UL << cpu_logical_map(cpu);
  287. raw_spin_lock(&irq_controller_lock);
  288. reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
  289. reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
  290. writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
  291. raw_spin_unlock(&irq_controller_lock);
  292. irq_data_update_effective_affinity(d, cpumask_of(cpu));
  293. return IRQ_SET_MASK_OK;
  294. }
  295. #endif
  296. static struct irq_chip armada_370_xp_irq_chip = {
  297. .name = "MPIC",
  298. .irq_mask = armada_370_xp_irq_mask,
  299. .irq_mask_ack = armada_370_xp_irq_mask,
  300. .irq_unmask = armada_370_xp_irq_unmask,
  301. #ifdef CONFIG_SMP
  302. .irq_set_affinity = armada_xp_set_affinity,
  303. #endif
  304. .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
  305. };
  306. static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
  307. unsigned int virq, irq_hw_number_t hw)
  308. {
  309. armada_370_xp_irq_mask(irq_get_irq_data(virq));
  310. if (!is_percpu_irq(hw))
  311. writel(hw, per_cpu_int_base +
  312. ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  313. else
  314. writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
  315. irq_set_status_flags(virq, IRQ_LEVEL);
  316. if (is_percpu_irq(hw)) {
  317. irq_set_percpu_devid(virq);
  318. irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
  319. handle_percpu_devid_irq);
  320. } else {
  321. irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
  322. handle_level_irq);
  323. irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
  324. }
  325. irq_set_probe(virq);
  326. return 0;
  327. }
  328. static void armada_xp_mpic_smp_cpu_init(void)
  329. {
  330. u32 control;
  331. int nr_irqs, i;
  332. control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
  333. nr_irqs = (control >> 2) & 0x3ff;
  334. for (i = 0; i < nr_irqs; i++)
  335. writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
  336. /* Clear pending IPIs */
  337. writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
  338. /* Enable first 8 IPIs */
  339. writel(IPI_DOORBELL_MASK, per_cpu_int_base +
  340. ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
  341. /* Unmask IPI interrupt */
  342. writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  343. }
  344. static void armada_xp_mpic_perf_init(void)
  345. {
  346. unsigned long cpuid = cpu_logical_map(smp_processor_id());
  347. /* Enable Performance Counter Overflow interrupts */
  348. writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
  349. per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS);
  350. }
  351. #ifdef CONFIG_SMP
  352. static void armada_mpic_send_doorbell(const struct cpumask *mask,
  353. unsigned int irq)
  354. {
  355. int cpu;
  356. unsigned long map = 0;
  357. /* Convert our logical CPU mask into a physical one. */
  358. for_each_cpu(cpu, mask)
  359. map |= 1 << cpu_logical_map(cpu);
  360. /*
  361. * Ensure that stores to Normal memory are visible to the
  362. * other CPUs before issuing the IPI.
  363. */
  364. dsb();
  365. /* submit softirq */
  366. writel((map << 8) | irq, main_int_base +
  367. ARMADA_370_XP_SW_TRIG_INT_OFFS);
  368. }
  369. static void armada_xp_mpic_reenable_percpu(void)
  370. {
  371. unsigned int irq;
  372. /* Re-enable per-CPU interrupts that were enabled before suspend */
  373. for (irq = 0; irq < ARMADA_370_XP_MAX_PER_CPU_IRQS; irq++) {
  374. struct irq_data *data;
  375. int virq;
  376. virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
  377. if (virq == 0)
  378. continue;
  379. data = irq_get_irq_data(virq);
  380. if (!irq_percpu_is_enabled(virq))
  381. continue;
  382. armada_370_xp_irq_unmask(data);
  383. }
  384. }
  385. static int armada_xp_mpic_starting_cpu(unsigned int cpu)
  386. {
  387. armada_xp_mpic_perf_init();
  388. armada_xp_mpic_smp_cpu_init();
  389. armada_xp_mpic_reenable_percpu();
  390. return 0;
  391. }
  392. static int mpic_cascaded_starting_cpu(unsigned int cpu)
  393. {
  394. armada_xp_mpic_perf_init();
  395. armada_xp_mpic_reenable_percpu();
  396. enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
  397. return 0;
  398. }
  399. #endif
  400. static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
  401. .map = armada_370_xp_mpic_irq_map,
  402. .xlate = irq_domain_xlate_onecell,
  403. };
  404. #ifdef CONFIG_PCI_MSI
  405. static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
  406. {
  407. u32 msimask, msinr;
  408. msimask = readl_relaxed(per_cpu_int_base +
  409. ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
  410. & PCI_MSI_DOORBELL_MASK;
  411. writel(~msimask, per_cpu_int_base +
  412. ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
  413. for (msinr = PCI_MSI_DOORBELL_START;
  414. msinr < PCI_MSI_DOORBELL_END; msinr++) {
  415. int irq;
  416. if (!(msimask & BIT(msinr)))
  417. continue;
  418. if (is_chained) {
  419. irq = irq_find_mapping(armada_370_xp_msi_inner_domain,
  420. msinr - PCI_MSI_DOORBELL_START);
  421. generic_handle_irq(irq);
  422. } else {
  423. irq = msinr - PCI_MSI_DOORBELL_START;
  424. handle_domain_irq(armada_370_xp_msi_inner_domain,
  425. irq, regs);
  426. }
  427. }
  428. }
  429. #else
  430. static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
  431. #endif
  432. static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
  433. {
  434. struct irq_chip *chip = irq_desc_get_chip(desc);
  435. unsigned long irqmap, irqn, irqsrc, cpuid;
  436. unsigned int cascade_irq;
  437. chained_irq_enter(chip, desc);
  438. irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
  439. cpuid = cpu_logical_map(smp_processor_id());
  440. for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
  441. irqsrc = readl_relaxed(main_int_base +
  442. ARMADA_370_XP_INT_SOURCE_CTL(irqn));
  443. /* Check if the interrupt is not masked on current CPU.
  444. * Test IRQ (0-1) and FIQ (8-9) mask bits.
  445. */
  446. if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
  447. continue;
  448. if (irqn == 1) {
  449. armada_370_xp_handle_msi_irq(NULL, true);
  450. continue;
  451. }
  452. cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
  453. generic_handle_irq(cascade_irq);
  454. }
  455. chained_irq_exit(chip, desc);
  456. }
  457. static void __exception_irq_entry
  458. armada_370_xp_handle_irq(struct pt_regs *regs)
  459. {
  460. u32 irqstat, irqnr;
  461. do {
  462. irqstat = readl_relaxed(per_cpu_int_base +
  463. ARMADA_370_XP_CPU_INTACK_OFFS);
  464. irqnr = irqstat & 0x3FF;
  465. if (irqnr > 1022)
  466. break;
  467. if (irqnr > 1) {
  468. handle_domain_irq(armada_370_xp_mpic_domain,
  469. irqnr, regs);
  470. continue;
  471. }
  472. /* MSI handling */
  473. if (irqnr == 1)
  474. armada_370_xp_handle_msi_irq(regs, false);
  475. #ifdef CONFIG_SMP
  476. /* IPI Handling */
  477. if (irqnr == 0) {
  478. u32 ipimask, ipinr;
  479. ipimask = readl_relaxed(per_cpu_int_base +
  480. ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
  481. & IPI_DOORBELL_MASK;
  482. writel(~ipimask, per_cpu_int_base +
  483. ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
  484. /* Handle all pending doorbells */
  485. for (ipinr = IPI_DOORBELL_START;
  486. ipinr < IPI_DOORBELL_END; ipinr++) {
  487. if (ipimask & (0x1 << ipinr))
  488. handle_IPI(ipinr, regs);
  489. }
  490. continue;
  491. }
  492. #endif
  493. } while (1);
  494. }
  495. static int armada_370_xp_mpic_suspend(void)
  496. {
  497. doorbell_mask_reg = readl(per_cpu_int_base +
  498. ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
  499. return 0;
  500. }
  501. static void armada_370_xp_mpic_resume(void)
  502. {
  503. int nirqs;
  504. irq_hw_number_t irq;
  505. /* Re-enable interrupts */
  506. nirqs = (readl(main_int_base + ARMADA_370_XP_INT_CONTROL) >> 2) & 0x3ff;
  507. for (irq = 0; irq < nirqs; irq++) {
  508. struct irq_data *data;
  509. int virq;
  510. virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
  511. if (virq == 0)
  512. continue;
  513. data = irq_get_irq_data(virq);
  514. if (!is_percpu_irq(irq)) {
  515. /* Non per-CPU interrupts */
  516. writel(irq, per_cpu_int_base +
  517. ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  518. if (!irqd_irq_disabled(data))
  519. armada_370_xp_irq_unmask(data);
  520. } else {
  521. /* Per-CPU interrupts */
  522. writel(irq, main_int_base +
  523. ARMADA_370_XP_INT_SET_ENABLE_OFFS);
  524. /*
  525. * Re-enable on the current CPU,
  526. * armada_xp_mpic_reenable_percpu() will take
  527. * care of secondary CPUs when they come up.
  528. */
  529. if (irq_percpu_is_enabled(virq))
  530. armada_370_xp_irq_unmask(data);
  531. }
  532. }
  533. /* Reconfigure doorbells for IPIs and MSIs */
  534. writel(doorbell_mask_reg,
  535. per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
  536. if (doorbell_mask_reg & IPI_DOORBELL_MASK)
  537. writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  538. if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
  539. writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  540. }
  541. static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
  542. .suspend = armada_370_xp_mpic_suspend,
  543. .resume = armada_370_xp_mpic_resume,
  544. };
  545. static int __init armada_370_xp_mpic_of_init(struct device_node *node,
  546. struct device_node *parent)
  547. {
  548. struct resource main_int_res, per_cpu_int_res;
  549. int nr_irqs, i;
  550. u32 control;
  551. BUG_ON(of_address_to_resource(node, 0, &main_int_res));
  552. BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
  553. BUG_ON(!request_mem_region(main_int_res.start,
  554. resource_size(&main_int_res),
  555. node->full_name));
  556. BUG_ON(!request_mem_region(per_cpu_int_res.start,
  557. resource_size(&per_cpu_int_res),
  558. node->full_name));
  559. main_int_base = ioremap(main_int_res.start,
  560. resource_size(&main_int_res));
  561. BUG_ON(!main_int_base);
  562. per_cpu_int_base = ioremap(per_cpu_int_res.start,
  563. resource_size(&per_cpu_int_res));
  564. BUG_ON(!per_cpu_int_base);
  565. control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
  566. nr_irqs = (control >> 2) & 0x3ff;
  567. for (i = 0; i < nr_irqs; i++)
  568. writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
  569. armada_370_xp_mpic_domain =
  570. irq_domain_add_linear(node, nr_irqs,
  571. &armada_370_xp_mpic_irq_ops, NULL);
  572. BUG_ON(!armada_370_xp_mpic_domain);
  573. irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
  574. /* Setup for the boot CPU */
  575. armada_xp_mpic_perf_init();
  576. armada_xp_mpic_smp_cpu_init();
  577. armada_370_xp_msi_init(node, main_int_res.start);
  578. parent_irq = irq_of_parse_and_map(node, 0);
  579. if (parent_irq <= 0) {
  580. irq_set_default_host(armada_370_xp_mpic_domain);
  581. set_handle_irq(armada_370_xp_handle_irq);
  582. #ifdef CONFIG_SMP
  583. set_smp_cross_call(armada_mpic_send_doorbell);
  584. cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
  585. "irqchip/armada/ipi:starting",
  586. armada_xp_mpic_starting_cpu, NULL);
  587. #endif
  588. } else {
  589. #ifdef CONFIG_SMP
  590. cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
  591. "irqchip/armada/cascade:starting",
  592. mpic_cascaded_starting_cpu, NULL);
  593. #endif
  594. irq_set_chained_handler(parent_irq,
  595. armada_370_xp_mpic_handle_cascade_irq);
  596. }
  597. register_syscore_ops(&armada_370_xp_mpic_syscore_ops);
  598. return 0;
  599. }
  600. IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);