irq-apple-aic.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright The Asahi Linux Contributors
  4. *
  5. * Based on irq-lpc32xx:
  6. * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
  7. * Based on irq-bcm2836:
  8. * Copyright 2015 Broadcom
  9. */
  10. /*
  11. * AIC is a fairly simple interrupt controller with the following features:
  12. *
  13. * - 896 level-triggered hardware IRQs
  14. * - Single mask bit per IRQ
  15. * - Per-IRQ affinity setting
  16. * - Automatic masking on event delivery (auto-ack)
  17. * - Software triggering (ORed with hw line)
  18. * - 2 per-CPU IPIs (meant as "self" and "other", but they are
  19. * interchangeable if not symmetric)
  20. * - Automatic prioritization (single event/ack register per CPU, lower IRQs =
  21. * higher priority)
  22. * - Automatic masking on ack
  23. * - Default "this CPU" register view and explicit per-CPU views
  24. *
  25. * In addition, this driver also handles FIQs, as these are routed to the same
  26. * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and
  27. * performance counters (TODO).
  28. *
  29. * Implementation notes:
  30. *
  31. * - This driver creates two IRQ domains, one for HW IRQs and internal FIQs,
  32. * and one for IPIs.
  33. * - Since Linux needs more than 2 IPIs, we implement a software IRQ controller
  34. * and funnel all IPIs into one per-CPU IPI (the second "self" IPI is unused).
  35. * - FIQ hwirq numbers are assigned after true hwirqs, and are per-cpu.
  36. * - DT bindings use 3-cell form (like GIC):
  37. * - <0 nr flags> - hwirq #nr
  38. * - <1 nr flags> - FIQ #nr
  39. * - nr=0 Physical HV timer
  40. * - nr=1 Virtual HV timer
  41. * - nr=2 Physical guest timer
  42. * - nr=3 Virtual guest timer
  43. */
  44. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  45. #include <linux/bits.h>
  46. #include <linux/bitfield.h>
  47. #include <linux/cpuhotplug.h>
  48. #include <linux/io.h>
  49. #include <linux/irqchip.h>
  50. #include <linux/irqchip/arm-vgic-info.h>
  51. #include <linux/irqdomain.h>
  52. #include <linux/jump_label.h>
  53. #include <linux/limits.h>
  54. #include <linux/of_address.h>
  55. #include <linux/slab.h>
  56. #include <asm/apple_m1_pmu.h>
  57. #include <asm/cputype.h>
  58. #include <asm/exception.h>
  59. #include <asm/sysreg.h>
  60. #include <asm/virt.h>
  61. #include <dt-bindings/interrupt-controller/apple-aic.h>
  62. /*
  63. * AIC v1 registers (MMIO)
  64. */
  65. #define AIC_INFO 0x0004
  66. #define AIC_INFO_NR_IRQ GENMASK(15, 0)
  67. #define AIC_CONFIG 0x0010
  68. #define AIC_WHOAMI 0x2000
  69. #define AIC_EVENT 0x2004
  70. #define AIC_EVENT_DIE GENMASK(31, 24)
  71. #define AIC_EVENT_TYPE GENMASK(23, 16)
  72. #define AIC_EVENT_NUM GENMASK(15, 0)
  73. #define AIC_EVENT_TYPE_FIQ 0 /* Software use */
  74. #define AIC_EVENT_TYPE_IRQ 1
  75. #define AIC_EVENT_TYPE_IPI 4
  76. #define AIC_EVENT_IPI_OTHER 1
  77. #define AIC_EVENT_IPI_SELF 2
  78. #define AIC_IPI_SEND 0x2008
  79. #define AIC_IPI_ACK 0x200c
  80. #define AIC_IPI_MASK_SET 0x2024
  81. #define AIC_IPI_MASK_CLR 0x2028
  82. #define AIC_IPI_SEND_CPU(cpu) BIT(cpu)
  83. #define AIC_IPI_OTHER BIT(0)
  84. #define AIC_IPI_SELF BIT(31)
  85. #define AIC_TARGET_CPU 0x3000
  86. #define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7))
  87. #define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7))
  88. #define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
  89. #define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
  90. #define AIC_MAX_IRQ 0x400
  91. /*
  92. * AIC v2 registers (MMIO)
  93. */
  94. #define AIC2_VERSION 0x0000
  95. #define AIC2_VERSION_VER GENMASK(7, 0)
  96. #define AIC2_INFO1 0x0004
  97. #define AIC2_INFO1_NR_IRQ GENMASK(15, 0)
  98. #define AIC2_INFO1_LAST_DIE GENMASK(27, 24)
  99. #define AIC2_INFO2 0x0008
  100. #define AIC2_INFO3 0x000c
  101. #define AIC2_INFO3_MAX_IRQ GENMASK(15, 0)
  102. #define AIC2_INFO3_MAX_DIE GENMASK(27, 24)
  103. #define AIC2_RESET 0x0010
  104. #define AIC2_RESET_RESET BIT(0)
  105. #define AIC2_CONFIG 0x0014
  106. #define AIC2_CONFIG_ENABLE BIT(0)
  107. #define AIC2_CONFIG_PREFER_PCPU BIT(28)
  108. #define AIC2_TIMEOUT 0x0028
  109. #define AIC2_CLUSTER_PRIO 0x0030
  110. #define AIC2_DELAY_GROUPS 0x0100
  111. #define AIC2_IRQ_CFG 0x2000
  112. /*
  113. * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG:
  114. *
  115. * Repeat for each die:
  116. * IRQ_CFG: u32 * MAX_IRQS
  117. * SW_SET: u32 * (MAX_IRQS / 32)
  118. * SW_CLR: u32 * (MAX_IRQS / 32)
  119. * MASK_SET: u32 * (MAX_IRQS / 32)
  120. * MASK_CLR: u32 * (MAX_IRQS / 32)
  121. * HW_STATE: u32 * (MAX_IRQS / 32)
  122. *
  123. * This is followed by a set of event registers, each 16K page aligned.
  124. * The first one is the AP event register we will use. Unfortunately,
  125. * the actual implemented die count is not specified anywhere in the
  126. * capability registers, so we have to explicitly specify the event
  127. * register as a second reg entry in the device tree to remain
  128. * forward-compatible.
  129. */
  130. #define AIC2_IRQ_CFG_TARGET GENMASK(3, 0)
  131. #define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5)
  132. #define MASK_REG(x) (4 * ((x) >> 5))
  133. #define MASK_BIT(x) BIT((x) & GENMASK(4, 0))
  134. /*
  135. * IMP-DEF sysregs that control FIQ sources
  136. */
  137. /* IPI request registers */
  138. #define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0)
  139. #define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1)
  140. #define IPI_RR_CPU GENMASK(7, 0)
  141. /* Cluster only used for the GLOBAL register */
  142. #define IPI_RR_CLUSTER GENMASK(23, 16)
  143. #define IPI_RR_TYPE GENMASK(29, 28)
  144. #define IPI_RR_IMMEDIATE 0
  145. #define IPI_RR_RETRACT 1
  146. #define IPI_RR_DEFERRED 2
  147. #define IPI_RR_NOWAKE 3
  148. /* IPI status register */
  149. #define SYS_IMP_APL_IPI_SR_EL1 sys_reg(3, 5, 15, 1, 1)
  150. #define IPI_SR_PENDING BIT(0)
  151. /* Guest timer FIQ enable register */
  152. #define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3)
  153. #define VM_TMR_FIQ_ENABLE_V BIT(0)
  154. #define VM_TMR_FIQ_ENABLE_P BIT(1)
  155. /* Deferred IPI countdown register */
  156. #define SYS_IMP_APL_IPI_CR_EL1 sys_reg(3, 5, 15, 3, 1)
  157. /* Uncore PMC control register */
  158. #define SYS_IMP_APL_UPMCR0_EL1 sys_reg(3, 7, 15, 0, 4)
  159. #define UPMCR0_IMODE GENMASK(18, 16)
  160. #define UPMCR0_IMODE_OFF 0
  161. #define UPMCR0_IMODE_AIC 2
  162. #define UPMCR0_IMODE_HALT 3
  163. #define UPMCR0_IMODE_FIQ 4
  164. /* Uncore PMC status register */
  165. #define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4)
  166. #define UPMSR_IACT BIT(0)
  167. /* MPIDR fields */
  168. #define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0)
  169. #define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1)
  170. #define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \
  171. FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \
  172. FIELD_PREP(AIC_EVENT_NUM, irq))
  173. #define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \
  174. FIELD_PREP(AIC_EVENT_NUM, x))
  175. #define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x)
  176. #define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x)
  177. #define AIC_NR_SWIPI 32
  178. /*
  179. * FIQ hwirq index definitions: FIQ sources use the DT binding defines
  180. * directly, except that timers are special. At the irqchip level, the
  181. * two timer types are represented by their access method: _EL0 registers
  182. * or _EL02 registers. In the DT binding, the timers are represented
  183. * by their purpose (HV or guest). This mapping is for when the kernel is
  184. * running at EL2 (with VHE). When the kernel is running at EL1, the
  185. * mapping differs and aic_irq_domain_translate() performs the remapping.
  186. */
  187. enum fiq_hwirq {
  188. /* Must be ordered as in apple-aic.h */
  189. AIC_TMR_EL0_PHYS = AIC_TMR_HV_PHYS,
  190. AIC_TMR_EL0_VIRT = AIC_TMR_HV_VIRT,
  191. AIC_TMR_EL02_PHYS = AIC_TMR_GUEST_PHYS,
  192. AIC_TMR_EL02_VIRT = AIC_TMR_GUEST_VIRT,
  193. AIC_CPU_PMU_Effi = AIC_CPU_PMU_E,
  194. AIC_CPU_PMU_Perf = AIC_CPU_PMU_P,
  195. /* No need for this to be discovered from DT */
  196. AIC_VGIC_MI,
  197. AIC_NR_FIQ
  198. };
  199. /* True if UNCORE/UNCORE2 and Sn_... IPI registers are present and used (A11+) */
  200. static DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
  201. /* True if SYS_IMP_APL_IPI_RR_LOCAL_EL1 exists for local fast IPIs (M1+) */
  202. static DEFINE_STATIC_KEY_TRUE(use_local_fast_ipi);
  203. struct aic_info {
  204. int version;
  205. /* Register offsets */
  206. u32 event;
  207. u32 target_cpu;
  208. u32 irq_cfg;
  209. u32 sw_set;
  210. u32 sw_clr;
  211. u32 mask_set;
  212. u32 mask_clr;
  213. u32 die_stride;
  214. /* Features */
  215. bool fast_ipi;
  216. bool local_fast_ipi;
  217. };
  218. static const struct aic_info aic1_info __initconst = {
  219. .version = 1,
  220. .event = AIC_EVENT,
  221. .target_cpu = AIC_TARGET_CPU,
  222. };
  223. static const struct aic_info aic1_fipi_info __initconst = {
  224. .version = 1,
  225. .event = AIC_EVENT,
  226. .target_cpu = AIC_TARGET_CPU,
  227. .fast_ipi = true,
  228. };
  229. static const struct aic_info aic1_local_fipi_info __initconst = {
  230. .version = 1,
  231. .event = AIC_EVENT,
  232. .target_cpu = AIC_TARGET_CPU,
  233. .fast_ipi = true,
  234. .local_fast_ipi = true,
  235. };
  236. static const struct aic_info aic2_info __initconst = {
  237. .version = 2,
  238. .irq_cfg = AIC2_IRQ_CFG,
  239. .fast_ipi = true,
  240. .local_fast_ipi = true,
  241. };
  242. static const struct of_device_id aic_info_match[] = {
  243. {
  244. .compatible = "apple,t8103-aic",
  245. .data = &aic1_local_fipi_info,
  246. },
  247. {
  248. .compatible = "apple,t8015-aic",
  249. .data = &aic1_fipi_info,
  250. },
  251. {
  252. .compatible = "apple,aic",
  253. .data = &aic1_info,
  254. },
  255. {
  256. .compatible = "apple,aic2",
  257. .data = &aic2_info,
  258. },
  259. {}
  260. };
  261. struct aic_irq_chip {
  262. void __iomem *base;
  263. void __iomem *event;
  264. struct irq_domain *hw_domain;
  265. struct {
  266. cpumask_t aff;
  267. } *fiq_aff[AIC_NR_FIQ];
  268. int nr_irq;
  269. int max_irq;
  270. int nr_die;
  271. int max_die;
  272. struct aic_info info;
  273. };
  274. static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
  275. static struct aic_irq_chip *aic_irqc;
  276. static void aic_handle_ipi(struct pt_regs *regs);
  277. static u32 aic_ic_read(struct aic_irq_chip *ic, u32 reg)
  278. {
  279. return readl_relaxed(ic->base + reg);
  280. }
  281. static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val)
  282. {
  283. writel_relaxed(val, ic->base + reg);
  284. }
  285. /*
  286. * IRQ irqchip
  287. */
  288. static void aic_irq_mask(struct irq_data *d)
  289. {
  290. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  291. struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
  292. u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
  293. u32 irq = AIC_HWIRQ_IRQ(hwirq);
  294. aic_ic_write(ic, ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq));
  295. }
  296. static void aic_irq_unmask(struct irq_data *d)
  297. {
  298. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  299. struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
  300. u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
  301. u32 irq = AIC_HWIRQ_IRQ(hwirq);
  302. aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq));
  303. }
  304. static void aic_irq_eoi(struct irq_data *d)
  305. {
  306. /*
  307. * Reading the interrupt reason automatically acknowledges and masks
  308. * the IRQ, so we just unmask it here if needed.
  309. */
  310. if (!irqd_irq_masked(d))
  311. aic_irq_unmask(d);
  312. }
  313. static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
  314. {
  315. struct aic_irq_chip *ic = aic_irqc;
  316. u32 event, type, irq;
  317. do {
  318. /*
  319. * We cannot use a relaxed read here, as reads from DMA buffers
  320. * need to be ordered after the IRQ fires.
  321. */
  322. event = readl(ic->event + ic->info.event);
  323. type = FIELD_GET(AIC_EVENT_TYPE, event);
  324. irq = FIELD_GET(AIC_EVENT_NUM, event);
  325. if (type == AIC_EVENT_TYPE_IRQ)
  326. generic_handle_domain_irq(aic_irqc->hw_domain, event);
  327. else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
  328. aic_handle_ipi(regs);
  329. else if (event != 0)
  330. pr_err_ratelimited("Unknown IRQ event %d, %d\n", type, irq);
  331. } while (event);
  332. /*
  333. * vGIC maintenance interrupts end up here too, so we need to check
  334. * for them separately. It should however only trigger when NV is
  335. * in use, and be cleared when coming back from the handler.
  336. */
  337. if (is_kernel_in_hyp_mode() &&
  338. (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
  339. read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
  340. generic_handle_domain_irq(aic_irqc->hw_domain,
  341. AIC_FIQ_HWIRQ(AIC_VGIC_MI));
  342. if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
  343. read_sysreg_s(SYS_ICH_MISR_EL2))) {
  344. pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
  345. sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
  346. }
  347. }
  348. }
  349. static int aic_irq_set_affinity(struct irq_data *d,
  350. const struct cpumask *mask_val, bool force)
  351. {
  352. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  353. struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
  354. int cpu;
  355. BUG_ON(!ic->info.target_cpu);
  356. if (force)
  357. cpu = cpumask_first(mask_val);
  358. else
  359. cpu = cpumask_any_and(mask_val, cpu_online_mask);
  360. aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu));
  361. irq_data_update_effective_affinity(d, cpumask_of(cpu));
  362. return IRQ_SET_MASK_OK;
  363. }
  364. static int aic_irq_set_type(struct irq_data *d, unsigned int type)
  365. {
  366. /*
  367. * Some IRQs (e.g. MSIs) implicitly have edge semantics, and we don't
  368. * have a way to find out the type of any given IRQ, so just allow both.
  369. */
  370. return (type == IRQ_TYPE_LEVEL_HIGH || type == IRQ_TYPE_EDGE_RISING) ? 0 : -EINVAL;
  371. }
  372. static struct irq_chip aic_chip = {
  373. .name = "AIC",
  374. .irq_mask = aic_irq_mask,
  375. .irq_unmask = aic_irq_unmask,
  376. .irq_eoi = aic_irq_eoi,
  377. .irq_set_affinity = aic_irq_set_affinity,
  378. .irq_set_type = aic_irq_set_type,
  379. };
  380. static struct irq_chip aic2_chip = {
  381. .name = "AIC2",
  382. .irq_mask = aic_irq_mask,
  383. .irq_unmask = aic_irq_unmask,
  384. .irq_eoi = aic_irq_eoi,
  385. .irq_set_type = aic_irq_set_type,
  386. };
  387. /*
  388. * FIQ irqchip
  389. */
  390. static unsigned long aic_fiq_get_idx(struct irq_data *d)
  391. {
  392. return AIC_HWIRQ_IRQ(irqd_to_hwirq(d));
  393. }
  394. static void aic_fiq_set_mask(struct irq_data *d)
  395. {
  396. /* Only the guest timers have real mask bits, unfortunately. */
  397. switch (aic_fiq_get_idx(d)) {
  398. case AIC_TMR_EL02_PHYS:
  399. sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_P, 0);
  400. isb();
  401. break;
  402. case AIC_TMR_EL02_VIRT:
  403. sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_V, 0);
  404. isb();
  405. break;
  406. default:
  407. break;
  408. }
  409. }
  410. static void aic_fiq_clear_mask(struct irq_data *d)
  411. {
  412. switch (aic_fiq_get_idx(d)) {
  413. case AIC_TMR_EL02_PHYS:
  414. sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_P);
  415. isb();
  416. break;
  417. case AIC_TMR_EL02_VIRT:
  418. sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_V);
  419. isb();
  420. break;
  421. default:
  422. break;
  423. }
  424. }
  425. static void aic_fiq_mask(struct irq_data *d)
  426. {
  427. aic_fiq_set_mask(d);
  428. __this_cpu_and(aic_fiq_unmasked, ~BIT(aic_fiq_get_idx(d)));
  429. }
  430. static void aic_fiq_unmask(struct irq_data *d)
  431. {
  432. aic_fiq_clear_mask(d);
  433. __this_cpu_or(aic_fiq_unmasked, BIT(aic_fiq_get_idx(d)));
  434. }
  435. static void aic_fiq_eoi(struct irq_data *d)
  436. {
  437. /* We mask to ack (where we can), so we need to unmask at EOI. */
  438. if (__this_cpu_read(aic_fiq_unmasked) & BIT(aic_fiq_get_idx(d)))
  439. aic_fiq_clear_mask(d);
  440. }
  441. #define TIMER_FIRING(x) \
  442. (((x) & (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK | \
  443. ARCH_TIMER_CTRL_IT_STAT)) == \
  444. (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
  445. static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
  446. {
  447. /*
  448. * It would be really nice if we had a system register that lets us get
  449. * the FIQ source state without having to peek down into sources...
  450. * but such a register does not seem to exist.
  451. *
  452. * So, we have these potential sources to test for:
  453. * - Fast IPIs (not yet used)
  454. * - The 4 timers (CNTP, CNTV for each of HV and guest)
  455. * - Per-core PMCs (not yet supported)
  456. * - Per-cluster uncore PMCs (not yet supported)
  457. *
  458. * Since not dealing with any of these results in a FIQ storm,
  459. * we check for everything here, even things we don't support yet.
  460. */
  461. if (static_branch_likely(&use_fast_ipi) &&
  462. (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING))
  463. aic_handle_ipi(regs);
  464. if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
  465. generic_handle_domain_irq(aic_irqc->hw_domain,
  466. AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS));
  467. if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
  468. generic_handle_domain_irq(aic_irqc->hw_domain,
  469. AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT));
  470. if (is_kernel_in_hyp_mode()) {
  471. uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
  472. if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
  473. TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
  474. generic_handle_domain_irq(aic_irqc->hw_domain,
  475. AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS));
  476. if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
  477. TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
  478. generic_handle_domain_irq(aic_irqc->hw_domain,
  479. AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
  480. }
  481. if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
  482. (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
  483. int irq;
  484. if (cpumask_test_cpu(smp_processor_id(),
  485. &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
  486. irq = AIC_CPU_PMU_P;
  487. else
  488. irq = AIC_CPU_PMU_E;
  489. generic_handle_domain_irq(aic_irqc->hw_domain,
  490. AIC_FIQ_HWIRQ(irq));
  491. }
  492. if (static_branch_likely(&use_fast_ipi) &&
  493. (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ) &&
  494. (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
  495. /* Same story with uncore PMCs */
  496. pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
  497. sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
  498. FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
  499. }
  500. }
  501. static int aic_fiq_set_type(struct irq_data *d, unsigned int type)
  502. {
  503. return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL;
  504. }
  505. static struct irq_chip fiq_chip = {
  506. .name = "AIC-FIQ",
  507. .irq_mask = aic_fiq_mask,
  508. .irq_unmask = aic_fiq_unmask,
  509. .irq_ack = aic_fiq_set_mask,
  510. .irq_eoi = aic_fiq_eoi,
  511. .irq_set_type = aic_fiq_set_type,
  512. };
  513. /*
  514. * Main IRQ domain
  515. */
  516. static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
  517. irq_hw_number_t hw)
  518. {
  519. struct aic_irq_chip *ic = id->host_data;
  520. u32 type = FIELD_GET(AIC_EVENT_TYPE, hw);
  521. struct irq_chip *chip = &aic_chip;
  522. if (ic->info.version == 2)
  523. chip = &aic2_chip;
  524. if (type == AIC_EVENT_TYPE_IRQ) {
  525. irq_domain_set_info(id, irq, hw, chip, id->host_data,
  526. handle_fasteoi_irq, NULL, NULL);
  527. irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
  528. } else {
  529. int fiq = FIELD_GET(AIC_EVENT_NUM, hw);
  530. switch (fiq) {
  531. case AIC_CPU_PMU_P:
  532. case AIC_CPU_PMU_E:
  533. irq_set_percpu_devid_partition(irq, &ic->fiq_aff[fiq]->aff);
  534. break;
  535. default:
  536. irq_set_percpu_devid(irq);
  537. break;
  538. }
  539. irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data,
  540. handle_percpu_devid_irq, NULL, NULL);
  541. }
  542. return 0;
  543. }
  544. static int aic_irq_domain_translate(struct irq_domain *id,
  545. struct irq_fwspec *fwspec,
  546. unsigned long *hwirq,
  547. unsigned int *type)
  548. {
  549. struct aic_irq_chip *ic = id->host_data;
  550. u32 *args;
  551. u32 die = 0;
  552. if (fwspec->param_count < 3 || fwspec->param_count > 4 ||
  553. !is_of_node(fwspec->fwnode))
  554. return -EINVAL;
  555. args = &fwspec->param[1];
  556. if (fwspec->param_count == 4) {
  557. die = args[0];
  558. args++;
  559. }
  560. switch (fwspec->param[0]) {
  561. case AIC_IRQ:
  562. if (die >= ic->nr_die)
  563. return -EINVAL;
  564. if (args[0] >= ic->nr_irq)
  565. return -EINVAL;
  566. *hwirq = AIC_IRQ_HWIRQ(die, args[0]);
  567. break;
  568. case AIC_FIQ:
  569. if (die != 0)
  570. return -EINVAL;
  571. if (args[0] >= AIC_NR_FIQ)
  572. return -EINVAL;
  573. *hwirq = AIC_FIQ_HWIRQ(args[0]);
  574. /*
  575. * In EL1 the non-redirected registers are the guest's,
  576. * not EL2's, so remap the hwirqs to match.
  577. */
  578. if (!is_kernel_in_hyp_mode()) {
  579. switch (args[0]) {
  580. case AIC_TMR_GUEST_PHYS:
  581. *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS);
  582. break;
  583. case AIC_TMR_GUEST_VIRT:
  584. *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT);
  585. break;
  586. case AIC_TMR_HV_PHYS:
  587. case AIC_TMR_HV_VIRT:
  588. return -ENOENT;
  589. default:
  590. break;
  591. }
  592. }
  593. break;
  594. default:
  595. return -EINVAL;
  596. }
  597. *type = args[1] & IRQ_TYPE_SENSE_MASK;
  598. return 0;
  599. }
  600. static int aic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  601. unsigned int nr_irqs, void *arg)
  602. {
  603. unsigned int type = IRQ_TYPE_NONE;
  604. struct irq_fwspec *fwspec = arg;
  605. irq_hw_number_t hwirq;
  606. int i, ret;
  607. ret = aic_irq_domain_translate(domain, fwspec, &hwirq, &type);
  608. if (ret)
  609. return ret;
  610. for (i = 0; i < nr_irqs; i++) {
  611. ret = aic_irq_domain_map(domain, virq + i, hwirq + i);
  612. if (ret)
  613. return ret;
  614. }
  615. return 0;
  616. }
  617. static void aic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
  618. unsigned int nr_irqs)
  619. {
  620. int i;
  621. for (i = 0; i < nr_irqs; i++) {
  622. struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
  623. irq_set_handler(virq + i, NULL);
  624. irq_domain_reset_irq_data(d);
  625. }
  626. }
  627. static const struct irq_domain_ops aic_irq_domain_ops = {
  628. .translate = aic_irq_domain_translate,
  629. .alloc = aic_irq_domain_alloc,
  630. .free = aic_irq_domain_free,
  631. };
  632. /*
  633. * IPI irqchip
  634. */
  635. static void aic_ipi_send_fast(int cpu)
  636. {
  637. u64 mpidr = cpu_logical_map(cpu);
  638. u64 my_mpidr = read_cpuid_mpidr();
  639. u64 cluster = MPIDR_CLUSTER(mpidr);
  640. u64 idx = MPIDR_CPU(mpidr);
  641. if (static_branch_likely(&use_local_fast_ipi) && MPIDR_CLUSTER(my_mpidr) == cluster) {
  642. write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx), SYS_IMP_APL_IPI_RR_LOCAL_EL1);
  643. } else {
  644. write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
  645. SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
  646. }
  647. isb();
  648. }
  649. static void aic_handle_ipi(struct pt_regs *regs)
  650. {
  651. /*
  652. * Ack the IPI. We need to order this after the AIC event read, but
  653. * that is enforced by normal MMIO ordering guarantees.
  654. *
  655. * For the Fast IPI case, this needs to be ordered before the vIPI
  656. * handling below, so we need to isb();
  657. */
  658. if (static_branch_likely(&use_fast_ipi)) {
  659. write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
  660. isb();
  661. } else {
  662. aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
  663. }
  664. ipi_mux_process();
  665. /*
  666. * No ordering needed here; at worst this just changes the timing of
  667. * when the next IPI will be delivered.
  668. */
  669. if (!static_branch_likely(&use_fast_ipi))
  670. aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
  671. }
  672. static void aic_ipi_send_single(unsigned int cpu)
  673. {
  674. if (static_branch_likely(&use_fast_ipi))
  675. aic_ipi_send_fast(cpu);
  676. else
  677. aic_ic_write(aic_irqc, AIC_IPI_SEND, AIC_IPI_SEND_CPU(cpu));
  678. }
  679. static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
  680. {
  681. int base_ipi;
  682. base_ipi = ipi_mux_create(AIC_NR_SWIPI, aic_ipi_send_single);
  683. if (WARN_ON(base_ipi <= 0))
  684. return -ENODEV;
  685. set_smp_ipi_range(base_ipi, AIC_NR_SWIPI);
  686. return 0;
  687. }
  688. static int aic_init_cpu(unsigned int cpu)
  689. {
  690. /* Mask all hard-wired per-CPU IRQ/FIQ sources */
  691. /* Pending Fast IPI FIQs */
  692. if (static_branch_likely(&use_fast_ipi))
  693. write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
  694. /* Timer FIQs */
  695. sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
  696. sysreg_clear_set(cntv_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
  697. /* EL2-only (VHE mode) IRQ sources */
  698. if (is_kernel_in_hyp_mode()) {
  699. /* Guest timers */
  700. sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2,
  701. VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0);
  702. /* vGIC maintenance IRQ */
  703. sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
  704. }
  705. /* PMC FIQ */
  706. sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT,
  707. FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
  708. /* Uncore PMC FIQ */
  709. if (static_branch_likely(&use_fast_ipi)) {
  710. sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
  711. FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
  712. }
  713. /* Commit all of the above */
  714. isb();
  715. if (aic_irqc->info.version == 1) {
  716. /*
  717. * Make sure the kernel's idea of logical CPU order is the same as AIC's
  718. * If we ever end up with a mismatch here, we will have to introduce
  719. * a mapping table similar to what other irqchip drivers do.
  720. */
  721. WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
  722. /*
  723. * Always keep IPIs unmasked at the hardware level (except auto-masking
  724. * by AIC during processing). We manage masks at the vIPI level.
  725. * These registers only exist on AICv1, AICv2 always uses fast IPIs.
  726. */
  727. aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
  728. if (static_branch_likely(&use_fast_ipi)) {
  729. aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
  730. } else {
  731. aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
  732. aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
  733. }
  734. }
  735. /* Initialize the local mask state */
  736. __this_cpu_write(aic_fiq_unmasked, 0);
  737. return 0;
  738. }
  739. static struct gic_kvm_info vgic_info __initdata = {
  740. .type = GIC_V3,
  741. .no_maint_irq_mask = true,
  742. .no_hw_deactivation = true,
  743. };
  744. static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff)
  745. {
  746. int i, n;
  747. u32 fiq;
  748. if (of_property_read_u32(aff, "apple,fiq-index", &fiq) ||
  749. WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq])
  750. return;
  751. n = of_property_count_elems_of_size(aff, "cpus", sizeof(u32));
  752. if (WARN_ON(n < 0))
  753. return;
  754. ic->fiq_aff[fiq] = kzalloc(sizeof(*ic->fiq_aff[fiq]), GFP_KERNEL);
  755. if (!ic->fiq_aff[fiq])
  756. return;
  757. for (i = 0; i < n; i++) {
  758. struct device_node *cpu_node;
  759. u32 cpu_phandle;
  760. int cpu;
  761. if (of_property_read_u32_index(aff, "cpus", i, &cpu_phandle))
  762. continue;
  763. cpu_node = of_find_node_by_phandle(cpu_phandle);
  764. if (WARN_ON(!cpu_node))
  765. continue;
  766. cpu = of_cpu_node_to_id(cpu_node);
  767. of_node_put(cpu_node);
  768. if (WARN_ON(cpu < 0))
  769. continue;
  770. cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff);
  771. }
  772. }
  773. static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
  774. {
  775. int i, die;
  776. u32 off, start_off;
  777. void __iomem *regs;
  778. struct aic_irq_chip *irqc;
  779. struct device_node *affs;
  780. const struct of_device_id *match;
  781. regs = of_iomap(node, 0);
  782. if (WARN_ON(!regs))
  783. return -EIO;
  784. irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
  785. if (!irqc) {
  786. iounmap(regs);
  787. return -ENOMEM;
  788. }
  789. irqc->base = regs;
  790. match = of_match_node(aic_info_match, node);
  791. if (!match)
  792. goto err_unmap;
  793. irqc->info = *(struct aic_info *)match->data;
  794. aic_irqc = irqc;
  795. switch (irqc->info.version) {
  796. case 1: {
  797. u32 info;
  798. info = aic_ic_read(irqc, AIC_INFO);
  799. irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info);
  800. irqc->max_irq = AIC_MAX_IRQ;
  801. irqc->nr_die = irqc->max_die = 1;
  802. off = start_off = irqc->info.target_cpu;
  803. off += sizeof(u32) * irqc->max_irq; /* TARGET_CPU */
  804. irqc->event = irqc->base;
  805. break;
  806. }
  807. case 2: {
  808. u32 info1, info3;
  809. info1 = aic_ic_read(irqc, AIC2_INFO1);
  810. info3 = aic_ic_read(irqc, AIC2_INFO3);
  811. irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1);
  812. irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3);
  813. irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1;
  814. irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3);
  815. off = start_off = irqc->info.irq_cfg;
  816. off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */
  817. irqc->event = of_iomap(node, 1);
  818. if (WARN_ON(!irqc->event))
  819. goto err_unmap;
  820. break;
  821. }
  822. }
  823. irqc->info.sw_set = off;
  824. off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_SET */
  825. irqc->info.sw_clr = off;
  826. off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_CLR */
  827. irqc->info.mask_set = off;
  828. off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_SET */
  829. irqc->info.mask_clr = off;
  830. off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */
  831. off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */
  832. if (!irqc->info.fast_ipi)
  833. static_branch_disable(&use_fast_ipi);
  834. if (!irqc->info.local_fast_ipi)
  835. static_branch_disable(&use_local_fast_ipi);
  836. irqc->info.die_stride = off - start_off;
  837. irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),
  838. &aic_irq_domain_ops, irqc);
  839. if (WARN_ON(!irqc->hw_domain))
  840. goto err_unmap;
  841. irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED);
  842. if (aic_init_smp(irqc, node))
  843. goto err_remove_domain;
  844. affs = of_get_child_by_name(node, "affinities");
  845. if (affs) {
  846. struct device_node *chld;
  847. for_each_child_of_node(affs, chld)
  848. build_fiq_affinity(irqc, chld);
  849. }
  850. of_node_put(affs);
  851. set_handle_irq(aic_handle_irq);
  852. set_handle_fiq(aic_handle_fiq);
  853. off = 0;
  854. for (die = 0; die < irqc->nr_die; die++) {
  855. for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
  856. aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX);
  857. for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
  858. aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX);
  859. if (irqc->info.target_cpu)
  860. for (i = 0; i < irqc->nr_irq; i++)
  861. aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1);
  862. off += irqc->info.die_stride;
  863. }
  864. if (irqc->info.version == 2) {
  865. u32 config = aic_ic_read(irqc, AIC2_CONFIG);
  866. config |= AIC2_CONFIG_ENABLE;
  867. aic_ic_write(irqc, AIC2_CONFIG, config);
  868. }
  869. if (!is_kernel_in_hyp_mode())
  870. pr_info("Kernel running in EL1, mapping interrupts");
  871. if (static_branch_likely(&use_fast_ipi))
  872. pr_info("Using Fast IPIs");
  873. cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
  874. "irqchip/apple-aic/ipi:starting",
  875. aic_init_cpu, NULL);
  876. if (is_kernel_in_hyp_mode()) {
  877. struct irq_fwspec mi = {
  878. .fwnode = of_node_to_fwnode(node),
  879. .param_count = 3,
  880. .param = {
  881. [0] = AIC_FIQ, /* This is a lie */
  882. [1] = AIC_VGIC_MI,
  883. [2] = IRQ_TYPE_LEVEL_HIGH,
  884. },
  885. };
  886. vgic_info.maint_irq = irq_create_fwspec_mapping(&mi);
  887. WARN_ON(!vgic_info.maint_irq);
  888. }
  889. vgic_set_kvm_info(&vgic_info);
  890. pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs",
  891. irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI);
  892. return 0;
  893. err_remove_domain:
  894. irq_domain_remove(irqc->hw_domain);
  895. err_unmap:
  896. if (irqc->event && irqc->event != irqc->base)
  897. iounmap(irqc->event);
  898. iounmap(irqc->base);
  899. kfree(irqc);
  900. return -ENODEV;
  901. }
  902. IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init);
  903. IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init);