irq-gic.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2002 ARM Limited, All Rights Reserved.
  4. *
  5. * Interrupt architecture for the GIC:
  6. *
  7. * o There is one Interrupt Distributor, which receives interrupts
  8. * from system devices and sends them to the Interrupt Controllers.
  9. *
  10. * o There is one CPU Interface per CPU, which sends interrupts sent
  11. * by the Distributor, and interrupts generated locally, to the
  12. * associated CPU. The base address of the CPU interface is usually
  13. * aliased so that the same address points to different chips depending
  14. * on the CPU it is accessed from.
  15. *
  16. * Note that IRQs 0-31 are special - they are local to each CPU.
  17. * As such, the enable set/clear, pending set/clear and active bit
  18. * registers are banked per-cpu for these sources.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/kernel.h>
  22. #include <linux/kstrtox.h>
  23. #include <linux/err.h>
  24. #include <linux/module.h>
  25. #include <linux/list.h>
  26. #include <linux/smp.h>
  27. #include <linux/cpu.h>
  28. #include <linux/cpu_pm.h>
  29. #include <linux/cpumask.h>
  30. #include <linux/io.h>
  31. #include <linux/of.h>
  32. #include <linux/of_address.h>
  33. #include <linux/of_irq.h>
  34. #include <linux/acpi.h>
  35. #include <linux/irqdomain.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/percpu.h>
  38. #include <linux/seq_file.h>
  39. #include <linux/slab.h>
  40. #include <linux/irqchip.h>
  41. #include <linux/irqchip/chained_irq.h>
  42. #include <linux/irqchip/arm-gic.h>
  43. #include <asm/cputype.h>
  44. #include <asm/irq.h>
  45. #include <asm/exception.h>
  46. #include <asm/smp_plat.h>
  47. #include <asm/virt.h>
  48. #include "irq-gic-common.h"
  49. #ifdef CONFIG_ARM64
  50. #include <asm/cpufeature.h>
  51. static void gic_check_cpu_features(void)
  52. {
  53. WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_GIC_CPUIF_SYSREGS),
  54. TAINT_CPU_OUT_OF_SPEC,
  55. "GICv3 system registers enabled, broken firmware!\n");
  56. }
  57. #else
  58. #define gic_check_cpu_features() do { } while(0)
  59. #endif
  60. union gic_base {
  61. void __iomem *common_base;
  62. void __iomem * __percpu *percpu_base;
  63. };
  64. struct gic_chip_data {
  65. union gic_base dist_base;
  66. union gic_base cpu_base;
  67. void __iomem *raw_dist_base;
  68. void __iomem *raw_cpu_base;
  69. u32 percpu_offset;
  70. #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
  71. u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
  72. u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
  73. u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
  74. u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
  75. u32 __percpu *saved_ppi_enable;
  76. u32 __percpu *saved_ppi_active;
  77. u32 __percpu *saved_ppi_conf;
  78. #endif
  79. struct irq_domain *domain;
  80. unsigned int gic_irqs;
  81. };
  82. #ifdef CONFIG_BL_SWITCHER
  83. static DEFINE_RAW_SPINLOCK(cpu_map_lock);
  84. #define gic_lock_irqsave(f) \
  85. raw_spin_lock_irqsave(&cpu_map_lock, (f))
  86. #define gic_unlock_irqrestore(f) \
  87. raw_spin_unlock_irqrestore(&cpu_map_lock, (f))
  88. #define gic_lock() raw_spin_lock(&cpu_map_lock)
  89. #define gic_unlock() raw_spin_unlock(&cpu_map_lock)
  90. #else
  91. #define gic_lock_irqsave(f) do { (void)(f); } while(0)
  92. #define gic_unlock_irqrestore(f) do { (void)(f); } while(0)
  93. #define gic_lock() do { } while(0)
  94. #define gic_unlock() do { } while(0)
  95. #endif
  96. static DEFINE_STATIC_KEY_FALSE(needs_rmw_access);
  97. /*
  98. * The GIC mapping of CPU interfaces does not necessarily match
  99. * the logical CPU numbering. Let's use a mapping as returned
  100. * by the GIC itself.
  101. */
  102. #define NR_GIC_CPU_IF 8
  103. static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
  104. static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
  105. static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
  106. static struct gic_kvm_info gic_v2_kvm_info __initdata;
  107. static DEFINE_PER_CPU(u32, sgi_intid);
  108. #ifdef CONFIG_GIC_NON_BANKED
  109. static DEFINE_STATIC_KEY_FALSE(frankengic_key);
  110. static void enable_frankengic(void)
  111. {
  112. static_branch_enable(&frankengic_key);
  113. }
  114. static inline void __iomem *__get_base(union gic_base *base)
  115. {
  116. if (static_branch_unlikely(&frankengic_key))
  117. return raw_cpu_read(*base->percpu_base);
  118. return base->common_base;
  119. }
  120. #define gic_data_dist_base(d) __get_base(&(d)->dist_base)
  121. #define gic_data_cpu_base(d) __get_base(&(d)->cpu_base)
  122. #else
  123. #define gic_data_dist_base(d) ((d)->dist_base.common_base)
  124. #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
  125. #define enable_frankengic() do { } while(0)
  126. #endif
  127. static inline void __iomem *gic_dist_base(struct irq_data *d)
  128. {
  129. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  130. return gic_data_dist_base(gic_data);
  131. }
  132. static inline void __iomem *gic_cpu_base(struct irq_data *d)
  133. {
  134. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  135. return gic_data_cpu_base(gic_data);
  136. }
  137. static inline bool cascading_gic_irq(struct irq_data *d)
  138. {
  139. void *data = irq_data_get_irq_handler_data(d);
  140. /*
  141. * If handler_data is set, this is a cascading interrupt, and
  142. * it cannot possibly be forwarded.
  143. */
  144. return data != NULL;
  145. }
  146. /*
  147. * Routines to acknowledge, disable and enable interrupts
  148. */
  149. static void gic_poke_irq(struct irq_data *d, u32 offset)
  150. {
  151. u32 mask = 1 << (irqd_to_hwirq(d) % 32);
  152. writel_relaxed(mask, gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4);
  153. }
  154. static int gic_peek_irq(struct irq_data *d, u32 offset)
  155. {
  156. u32 mask = 1 << (irqd_to_hwirq(d) % 32);
  157. return !!(readl_relaxed(gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4) & mask);
  158. }
  159. static void gic_mask_irq(struct irq_data *d)
  160. {
  161. gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
  162. }
  163. static void gic_eoimode1_mask_irq(struct irq_data *d)
  164. {
  165. gic_mask_irq(d);
  166. /*
  167. * When masking a forwarded interrupt, make sure it is
  168. * deactivated as well.
  169. *
  170. * This ensures that an interrupt that is getting
  171. * disabled/masked will not get "stuck", because there is
  172. * noone to deactivate it (guest is being terminated).
  173. */
  174. if (irqd_is_forwarded_to_vcpu(d))
  175. gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
  176. }
  177. static void gic_unmask_irq(struct irq_data *d)
  178. {
  179. gic_poke_irq(d, GIC_DIST_ENABLE_SET);
  180. }
  181. static void gic_eoi_irq(struct irq_data *d)
  182. {
  183. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  184. if (hwirq < 16)
  185. hwirq = this_cpu_read(sgi_intid);
  186. writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_EOI);
  187. }
  188. static void gic_eoimode1_eoi_irq(struct irq_data *d)
  189. {
  190. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  191. /* Do not deactivate an IRQ forwarded to a vcpu. */
  192. if (irqd_is_forwarded_to_vcpu(d))
  193. return;
  194. if (hwirq < 16)
  195. hwirq = this_cpu_read(sgi_intid);
  196. writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
  197. }
  198. static int gic_irq_set_irqchip_state(struct irq_data *d,
  199. enum irqchip_irq_state which, bool val)
  200. {
  201. u32 reg;
  202. switch (which) {
  203. case IRQCHIP_STATE_PENDING:
  204. reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR;
  205. break;
  206. case IRQCHIP_STATE_ACTIVE:
  207. reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR;
  208. break;
  209. case IRQCHIP_STATE_MASKED:
  210. reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET;
  211. break;
  212. default:
  213. return -EINVAL;
  214. }
  215. gic_poke_irq(d, reg);
  216. return 0;
  217. }
  218. static int gic_irq_get_irqchip_state(struct irq_data *d,
  219. enum irqchip_irq_state which, bool *val)
  220. {
  221. switch (which) {
  222. case IRQCHIP_STATE_PENDING:
  223. *val = gic_peek_irq(d, GIC_DIST_PENDING_SET);
  224. break;
  225. case IRQCHIP_STATE_ACTIVE:
  226. *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET);
  227. break;
  228. case IRQCHIP_STATE_MASKED:
  229. *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET);
  230. break;
  231. default:
  232. return -EINVAL;
  233. }
  234. return 0;
  235. }
  236. static int gic_set_type(struct irq_data *d, unsigned int type)
  237. {
  238. irq_hw_number_t gicirq = irqd_to_hwirq(d);
  239. void __iomem *base = gic_dist_base(d);
  240. int ret;
  241. /* Interrupt configuration for SGIs can't be changed */
  242. if (gicirq < 16)
  243. return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
  244. /* SPIs have restrictions on the supported types */
  245. if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
  246. type != IRQ_TYPE_EDGE_RISING)
  247. return -EINVAL;
  248. ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG);
  249. if (ret && gicirq < 32) {
  250. /* Misconfigured PPIs are usually not fatal */
  251. pr_warn("GIC: PPI%ld is secure or misconfigured\n", gicirq - 16);
  252. ret = 0;
  253. }
  254. return ret;
  255. }
  256. static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
  257. {
  258. /* Only interrupts on the primary GIC can be forwarded to a vcpu. */
  259. if (cascading_gic_irq(d) || irqd_to_hwirq(d) < 16)
  260. return -EINVAL;
  261. if (vcpu)
  262. irqd_set_forwarded_to_vcpu(d);
  263. else
  264. irqd_clr_forwarded_to_vcpu(d);
  265. return 0;
  266. }
  267. static int gic_retrigger(struct irq_data *data)
  268. {
  269. return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
  270. }
  271. static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
  272. {
  273. u32 irqstat, irqnr;
  274. struct gic_chip_data *gic = &gic_data[0];
  275. void __iomem *cpu_base = gic_data_cpu_base(gic);
  276. do {
  277. irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
  278. irqnr = irqstat & GICC_IAR_INT_ID_MASK;
  279. if (unlikely(irqnr >= 1020))
  280. break;
  281. if (static_branch_likely(&supports_deactivate_key))
  282. writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
  283. isb();
  284. /*
  285. * Ensure any shared data written by the CPU sending the IPI
  286. * is read after we've read the ACK register on the GIC.
  287. *
  288. * Pairs with the write barrier in gic_ipi_send_mask
  289. */
  290. if (irqnr <= 15) {
  291. smp_rmb();
  292. /*
  293. * The GIC encodes the source CPU in GICC_IAR,
  294. * leading to the deactivation to fail if not
  295. * written back as is to GICC_EOI. Stash the INTID
  296. * away for gic_eoi_irq() to write back. This only
  297. * works because we don't nest SGIs...
  298. */
  299. this_cpu_write(sgi_intid, irqstat);
  300. }
  301. generic_handle_domain_irq(gic->domain, irqnr);
  302. } while (1);
  303. }
  304. static void gic_handle_cascade_irq(struct irq_desc *desc)
  305. {
  306. struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
  307. struct irq_chip *chip = irq_desc_get_chip(desc);
  308. unsigned int gic_irq;
  309. unsigned long status;
  310. int ret;
  311. chained_irq_enter(chip, desc);
  312. status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
  313. gic_irq = (status & GICC_IAR_INT_ID_MASK);
  314. if (gic_irq == GICC_INT_SPURIOUS)
  315. goto out;
  316. isb();
  317. ret = generic_handle_domain_irq(chip_data->domain, gic_irq);
  318. if (unlikely(ret))
  319. handle_bad_irq(desc);
  320. out:
  321. chained_irq_exit(chip, desc);
  322. }
  323. static void gic_irq_print_chip(struct irq_data *d, struct seq_file *p)
  324. {
  325. struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
  326. if (gic->domain->pm_dev)
  327. seq_printf(p, gic->domain->pm_dev->of_node->name);
  328. else
  329. seq_printf(p, "GIC-%d", (int)(gic - &gic_data[0]));
  330. }
  331. void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
  332. {
  333. BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
  334. irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq,
  335. &gic_data[gic_nr]);
  336. }
  337. static u8 gic_get_cpumask(struct gic_chip_data *gic)
  338. {
  339. void __iomem *base = gic_data_dist_base(gic);
  340. u32 mask, i;
  341. for (i = mask = 0; i < 32; i += 4) {
  342. mask = readl_relaxed(base + GIC_DIST_TARGET + i);
  343. mask |= mask >> 16;
  344. mask |= mask >> 8;
  345. if (mask)
  346. break;
  347. }
  348. if (!mask && num_possible_cpus() > 1)
  349. pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
  350. return mask;
  351. }
  352. static bool gic_check_gicv2(void __iomem *base)
  353. {
  354. u32 val = readl_relaxed(base + GIC_CPU_IDENT);
  355. return (val & 0xff0fff) == 0x02043B;
  356. }
  357. static void gic_cpu_if_up(struct gic_chip_data *gic)
  358. {
  359. void __iomem *cpu_base = gic_data_cpu_base(gic);
  360. u32 bypass = 0;
  361. u32 mode = 0;
  362. int i;
  363. if (gic == &gic_data[0] && static_branch_likely(&supports_deactivate_key))
  364. mode = GIC_CPU_CTRL_EOImodeNS;
  365. if (gic_check_gicv2(cpu_base))
  366. for (i = 0; i < 4; i++)
  367. writel_relaxed(0, cpu_base + GIC_CPU_ACTIVEPRIO + i * 4);
  368. /*
  369. * Preserve bypass disable bits to be written back later
  370. */
  371. bypass = readl(cpu_base + GIC_CPU_CTRL);
  372. bypass &= GICC_DIS_BYPASS_MASK;
  373. writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
  374. }
  375. static void gic_dist_init(struct gic_chip_data *gic)
  376. {
  377. unsigned int i;
  378. u32 cpumask;
  379. unsigned int gic_irqs = gic->gic_irqs;
  380. void __iomem *base = gic_data_dist_base(gic);
  381. writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL);
  382. /*
  383. * Set all global interrupts to this CPU only.
  384. */
  385. cpumask = gic_get_cpumask(gic);
  386. cpumask |= cpumask << 8;
  387. cpumask |= cpumask << 16;
  388. for (i = 32; i < gic_irqs; i += 4)
  389. writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
  390. gic_dist_config(base, gic_irqs, GICD_INT_DEF_PRI);
  391. writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
  392. }
  393. static int gic_cpu_init(struct gic_chip_data *gic)
  394. {
  395. void __iomem *dist_base = gic_data_dist_base(gic);
  396. void __iomem *base = gic_data_cpu_base(gic);
  397. unsigned int cpu_mask, cpu = smp_processor_id();
  398. int i;
  399. /*
  400. * Setting up the CPU map is only relevant for the primary GIC
  401. * because any nested/secondary GICs do not directly interface
  402. * with the CPU(s).
  403. */
  404. if (gic == &gic_data[0]) {
  405. /*
  406. * Get what the GIC says our CPU mask is.
  407. */
  408. if (WARN_ON(cpu >= NR_GIC_CPU_IF))
  409. return -EINVAL;
  410. gic_check_cpu_features();
  411. cpu_mask = gic_get_cpumask(gic);
  412. gic_cpu_map[cpu] = cpu_mask;
  413. /*
  414. * Clear our mask from the other map entries in case they're
  415. * still undefined.
  416. */
  417. for (i = 0; i < NR_GIC_CPU_IF; i++)
  418. if (i != cpu)
  419. gic_cpu_map[i] &= ~cpu_mask;
  420. }
  421. gic_cpu_config(dist_base, 32, GICD_INT_DEF_PRI);
  422. writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
  423. gic_cpu_if_up(gic);
  424. return 0;
  425. }
  426. int gic_cpu_if_down(unsigned int gic_nr)
  427. {
  428. void __iomem *cpu_base;
  429. u32 val = 0;
  430. if (gic_nr >= CONFIG_ARM_GIC_MAX_NR)
  431. return -EINVAL;
  432. cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
  433. val = readl(cpu_base + GIC_CPU_CTRL);
  434. val &= ~GICC_ENABLE;
  435. writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
  436. return 0;
  437. }
  438. #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
  439. /*
  440. * Saves the GIC distributor registers during suspend or idle. Must be called
  441. * with interrupts disabled but before powering down the GIC. After calling
  442. * this function, no interrupts will be delivered by the GIC, and another
  443. * platform-specific wakeup source must be enabled.
  444. */
  445. void gic_dist_save(struct gic_chip_data *gic)
  446. {
  447. unsigned int gic_irqs;
  448. void __iomem *dist_base;
  449. int i;
  450. if (WARN_ON(!gic))
  451. return;
  452. gic_irqs = gic->gic_irqs;
  453. dist_base = gic_data_dist_base(gic);
  454. if (!dist_base)
  455. return;
  456. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
  457. gic->saved_spi_conf[i] =
  458. readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
  459. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  460. gic->saved_spi_target[i] =
  461. readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
  462. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
  463. gic->saved_spi_enable[i] =
  464. readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
  465. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
  466. gic->saved_spi_active[i] =
  467. readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
  468. }
  469. /*
  470. * Restores the GIC distributor registers during resume or when coming out of
  471. * idle. Must be called before enabling interrupts. If a level interrupt
  472. * that occurred while the GIC was suspended is still present, it will be
  473. * handled normally, but any edge interrupts that occurred will not be seen by
  474. * the GIC and need to be handled by the platform-specific wakeup source.
  475. */
  476. void gic_dist_restore(struct gic_chip_data *gic)
  477. {
  478. unsigned int gic_irqs;
  479. unsigned int i;
  480. void __iomem *dist_base;
  481. if (WARN_ON(!gic))
  482. return;
  483. gic_irqs = gic->gic_irqs;
  484. dist_base = gic_data_dist_base(gic);
  485. if (!dist_base)
  486. return;
  487. writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
  488. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
  489. writel_relaxed(gic->saved_spi_conf[i],
  490. dist_base + GIC_DIST_CONFIG + i * 4);
  491. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  492. writel_relaxed(REPEAT_BYTE_U32(GICD_INT_DEF_PRI),
  493. dist_base + GIC_DIST_PRI + i * 4);
  494. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  495. writel_relaxed(gic->saved_spi_target[i],
  496. dist_base + GIC_DIST_TARGET + i * 4);
  497. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
  498. writel_relaxed(GICD_INT_EN_CLR_X32,
  499. dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
  500. writel_relaxed(gic->saved_spi_enable[i],
  501. dist_base + GIC_DIST_ENABLE_SET + i * 4);
  502. }
  503. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
  504. writel_relaxed(GICD_INT_EN_CLR_X32,
  505. dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
  506. writel_relaxed(gic->saved_spi_active[i],
  507. dist_base + GIC_DIST_ACTIVE_SET + i * 4);
  508. }
  509. writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
  510. }
  511. void gic_cpu_save(struct gic_chip_data *gic)
  512. {
  513. int i;
  514. u32 *ptr;
  515. void __iomem *dist_base;
  516. void __iomem *cpu_base;
  517. if (WARN_ON(!gic))
  518. return;
  519. dist_base = gic_data_dist_base(gic);
  520. cpu_base = gic_data_cpu_base(gic);
  521. if (!dist_base || !cpu_base)
  522. return;
  523. ptr = raw_cpu_ptr(gic->saved_ppi_enable);
  524. for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
  525. ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
  526. ptr = raw_cpu_ptr(gic->saved_ppi_active);
  527. for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
  528. ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
  529. ptr = raw_cpu_ptr(gic->saved_ppi_conf);
  530. for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
  531. ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
  532. }
  533. void gic_cpu_restore(struct gic_chip_data *gic)
  534. {
  535. int i;
  536. u32 *ptr;
  537. void __iomem *dist_base;
  538. void __iomem *cpu_base;
  539. if (WARN_ON(!gic))
  540. return;
  541. dist_base = gic_data_dist_base(gic);
  542. cpu_base = gic_data_cpu_base(gic);
  543. if (!dist_base || !cpu_base)
  544. return;
  545. ptr = raw_cpu_ptr(gic->saved_ppi_enable);
  546. for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
  547. writel_relaxed(GICD_INT_EN_CLR_X32,
  548. dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
  549. writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
  550. }
  551. ptr = raw_cpu_ptr(gic->saved_ppi_active);
  552. for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
  553. writel_relaxed(GICD_INT_EN_CLR_X32,
  554. dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
  555. writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
  556. }
  557. ptr = raw_cpu_ptr(gic->saved_ppi_conf);
  558. for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
  559. writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
  560. for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
  561. writel_relaxed(REPEAT_BYTE_U32(GICD_INT_DEF_PRI),
  562. dist_base + GIC_DIST_PRI + i * 4);
  563. writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
  564. gic_cpu_if_up(gic);
  565. }
  566. static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
  567. {
  568. int i;
  569. for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) {
  570. switch (cmd) {
  571. case CPU_PM_ENTER:
  572. gic_cpu_save(&gic_data[i]);
  573. break;
  574. case CPU_PM_ENTER_FAILED:
  575. case CPU_PM_EXIT:
  576. gic_cpu_restore(&gic_data[i]);
  577. break;
  578. case CPU_CLUSTER_PM_ENTER:
  579. gic_dist_save(&gic_data[i]);
  580. break;
  581. case CPU_CLUSTER_PM_ENTER_FAILED:
  582. case CPU_CLUSTER_PM_EXIT:
  583. gic_dist_restore(&gic_data[i]);
  584. break;
  585. }
  586. }
  587. return NOTIFY_OK;
  588. }
  589. static struct notifier_block gic_notifier_block = {
  590. .notifier_call = gic_notifier,
  591. };
  592. static int gic_pm_init(struct gic_chip_data *gic)
  593. {
  594. gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
  595. sizeof(u32));
  596. if (WARN_ON(!gic->saved_ppi_enable))
  597. return -ENOMEM;
  598. gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
  599. sizeof(u32));
  600. if (WARN_ON(!gic->saved_ppi_active))
  601. goto free_ppi_enable;
  602. gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
  603. sizeof(u32));
  604. if (WARN_ON(!gic->saved_ppi_conf))
  605. goto free_ppi_active;
  606. if (gic == &gic_data[0])
  607. cpu_pm_register_notifier(&gic_notifier_block);
  608. return 0;
  609. free_ppi_active:
  610. free_percpu(gic->saved_ppi_active);
  611. free_ppi_enable:
  612. free_percpu(gic->saved_ppi_enable);
  613. return -ENOMEM;
  614. }
  615. #else
  616. static int gic_pm_init(struct gic_chip_data *gic)
  617. {
  618. return 0;
  619. }
  620. #endif
  621. #ifdef CONFIG_SMP
  622. static void rmw_writeb(u8 bval, void __iomem *addr)
  623. {
  624. static DEFINE_RAW_SPINLOCK(rmw_lock);
  625. unsigned long offset = (unsigned long)addr & 3UL;
  626. unsigned long shift = offset * 8;
  627. unsigned long flags;
  628. u32 val;
  629. raw_spin_lock_irqsave(&rmw_lock, flags);
  630. addr -= offset;
  631. val = readl_relaxed(addr);
  632. val &= ~GENMASK(shift + 7, shift);
  633. val |= bval << shift;
  634. writel_relaxed(val, addr);
  635. raw_spin_unlock_irqrestore(&rmw_lock, flags);
  636. }
  637. static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
  638. bool force)
  639. {
  640. void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + irqd_to_hwirq(d);
  641. struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
  642. unsigned int cpu;
  643. if (unlikely(gic != &gic_data[0]))
  644. return -EINVAL;
  645. if (!force)
  646. cpu = cpumask_any_and(mask_val, cpu_online_mask);
  647. else
  648. cpu = cpumask_first(mask_val);
  649. if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
  650. return -EINVAL;
  651. if (static_branch_unlikely(&needs_rmw_access))
  652. rmw_writeb(gic_cpu_map[cpu], reg);
  653. else
  654. writeb_relaxed(gic_cpu_map[cpu], reg);
  655. irq_data_update_effective_affinity(d, cpumask_of(cpu));
  656. return IRQ_SET_MASK_OK_DONE;
  657. }
  658. static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
  659. {
  660. int cpu;
  661. unsigned long flags, map = 0;
  662. if (unlikely(nr_cpu_ids == 1)) {
  663. /* Only one CPU? let's do a self-IPI... */
  664. writel_relaxed(2 << 24 | d->hwirq,
  665. gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
  666. return;
  667. }
  668. gic_lock_irqsave(flags);
  669. /* Convert our logical CPU mask into a physical one. */
  670. for_each_cpu(cpu, mask)
  671. map |= gic_cpu_map[cpu];
  672. /*
  673. * Ensure that stores to Normal memory are visible to the
  674. * other CPUs before they observe us issuing the IPI.
  675. */
  676. dmb(ishst);
  677. /* this always happens on GIC0 */
  678. writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
  679. gic_unlock_irqrestore(flags);
  680. }
  681. static int gic_starting_cpu(unsigned int cpu)
  682. {
  683. gic_cpu_init(&gic_data[0]);
  684. return 0;
  685. }
  686. static __init void gic_smp_init(void)
  687. {
  688. struct irq_fwspec sgi_fwspec = {
  689. .fwnode = gic_data[0].domain->fwnode,
  690. .param_count = 1,
  691. };
  692. int base_sgi;
  693. cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
  694. "irqchip/arm/gic:starting",
  695. gic_starting_cpu, NULL);
  696. base_sgi = irq_domain_alloc_irqs(gic_data[0].domain, 8, NUMA_NO_NODE, &sgi_fwspec);
  697. if (WARN_ON(base_sgi <= 0))
  698. return;
  699. set_smp_ipi_range(base_sgi, 8);
  700. }
  701. #else
  702. #define gic_smp_init() do { } while(0)
  703. #define gic_set_affinity NULL
  704. #define gic_ipi_send_mask NULL
  705. #endif
  706. static const struct irq_chip gic_chip = {
  707. .irq_mask = gic_mask_irq,
  708. .irq_unmask = gic_unmask_irq,
  709. .irq_eoi = gic_eoi_irq,
  710. .irq_set_type = gic_set_type,
  711. .irq_retrigger = gic_retrigger,
  712. .irq_set_affinity = gic_set_affinity,
  713. .ipi_send_mask = gic_ipi_send_mask,
  714. .irq_get_irqchip_state = gic_irq_get_irqchip_state,
  715. .irq_set_irqchip_state = gic_irq_set_irqchip_state,
  716. .irq_print_chip = gic_irq_print_chip,
  717. .flags = IRQCHIP_SET_TYPE_MASKED |
  718. IRQCHIP_SKIP_SET_WAKE |
  719. IRQCHIP_MASK_ON_SUSPEND,
  720. };
  721. static const struct irq_chip gic_chip_mode1 = {
  722. .name = "GICv2",
  723. .irq_mask = gic_eoimode1_mask_irq,
  724. .irq_unmask = gic_unmask_irq,
  725. .irq_eoi = gic_eoimode1_eoi_irq,
  726. .irq_set_type = gic_set_type,
  727. .irq_retrigger = gic_retrigger,
  728. .irq_set_affinity = gic_set_affinity,
  729. .ipi_send_mask = gic_ipi_send_mask,
  730. .irq_get_irqchip_state = gic_irq_get_irqchip_state,
  731. .irq_set_irqchip_state = gic_irq_set_irqchip_state,
  732. .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
  733. .flags = IRQCHIP_SET_TYPE_MASKED |
  734. IRQCHIP_SKIP_SET_WAKE |
  735. IRQCHIP_MASK_ON_SUSPEND,
  736. };
  737. #ifdef CONFIG_BL_SWITCHER
  738. /*
  739. * gic_send_sgi - send a SGI directly to given CPU interface number
  740. *
  741. * cpu_id: the ID for the destination CPU interface
  742. * irq: the IPI number to send a SGI for
  743. */
  744. void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
  745. {
  746. BUG_ON(cpu_id >= NR_GIC_CPU_IF);
  747. cpu_id = 1 << cpu_id;
  748. /* this always happens on GIC0 */
  749. writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
  750. }
  751. /*
  752. * gic_get_cpu_id - get the CPU interface ID for the specified CPU
  753. *
  754. * @cpu: the logical CPU number to get the GIC ID for.
  755. *
  756. * Return the CPU interface ID for the given logical CPU number,
  757. * or -1 if the CPU number is too large or the interface ID is
  758. * unknown (more than one bit set).
  759. */
  760. int gic_get_cpu_id(unsigned int cpu)
  761. {
  762. unsigned int cpu_bit;
  763. if (cpu >= NR_GIC_CPU_IF)
  764. return -1;
  765. cpu_bit = gic_cpu_map[cpu];
  766. if (cpu_bit & (cpu_bit - 1))
  767. return -1;
  768. return __ffs(cpu_bit);
  769. }
  770. /*
  771. * gic_migrate_target - migrate IRQs to another CPU interface
  772. *
  773. * @new_cpu_id: the CPU target ID to migrate IRQs to
  774. *
  775. * Migrate all peripheral interrupts with a target matching the current CPU
  776. * to the interface corresponding to @new_cpu_id. The CPU interface mapping
  777. * is also updated. Targets to other CPU interfaces are unchanged.
  778. * This must be called with IRQs locally disabled.
  779. */
  780. void gic_migrate_target(unsigned int new_cpu_id)
  781. {
  782. unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
  783. void __iomem *dist_base;
  784. int i, ror_val, cpu = smp_processor_id();
  785. u32 val, cur_target_mask, active_mask;
  786. BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
  787. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  788. if (!dist_base)
  789. return;
  790. gic_irqs = gic_data[gic_nr].gic_irqs;
  791. cur_cpu_id = __ffs(gic_cpu_map[cpu]);
  792. cur_target_mask = 0x01010101 << cur_cpu_id;
  793. ror_val = (cur_cpu_id - new_cpu_id) & 31;
  794. gic_lock();
  795. /* Update the target interface for this logical CPU */
  796. gic_cpu_map[cpu] = 1 << new_cpu_id;
  797. /*
  798. * Find all the peripheral interrupts targeting the current
  799. * CPU interface and migrate them to the new CPU interface.
  800. * We skip DIST_TARGET 0 to 7 as they are read-only.
  801. */
  802. for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
  803. val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
  804. active_mask = val & cur_target_mask;
  805. if (active_mask) {
  806. val &= ~active_mask;
  807. val |= ror32(active_mask, ror_val);
  808. writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
  809. }
  810. }
  811. gic_unlock();
  812. /*
  813. * Now let's migrate and clear any potential SGIs that might be
  814. * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
  815. * is a banked register, we can only forward the SGI using
  816. * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
  817. * doesn't use that information anyway.
  818. *
  819. * For the same reason we do not adjust SGI source information
  820. * for previously sent SGIs by us to other CPUs either.
  821. */
  822. for (i = 0; i < 16; i += 4) {
  823. int j;
  824. val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
  825. if (!val)
  826. continue;
  827. writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
  828. for (j = i; j < i + 4; j++) {
  829. if (val & 0xff)
  830. writel_relaxed((1 << (new_cpu_id + 16)) | j,
  831. dist_base + GIC_DIST_SOFTINT);
  832. val >>= 8;
  833. }
  834. }
  835. }
  836. /*
  837. * gic_get_sgir_physaddr - get the physical address for the SGI register
  838. *
  839. * Return the physical address of the SGI register to be used
  840. * by some early assembly code when the kernel is not yet available.
  841. */
  842. static unsigned long gic_dist_physaddr;
  843. unsigned long gic_get_sgir_physaddr(void)
  844. {
  845. if (!gic_dist_physaddr)
  846. return 0;
  847. return gic_dist_physaddr + GIC_DIST_SOFTINT;
  848. }
  849. static void __init gic_init_physaddr(struct device_node *node)
  850. {
  851. struct resource res;
  852. if (of_address_to_resource(node, 0, &res) == 0) {
  853. gic_dist_physaddr = res.start;
  854. pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
  855. }
  856. }
  857. #else
  858. #define gic_init_physaddr(node) do { } while (0)
  859. #endif
  860. static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
  861. irq_hw_number_t hw)
  862. {
  863. struct gic_chip_data *gic = d->host_data;
  864. struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
  865. const struct irq_chip *chip;
  866. chip = (static_branch_likely(&supports_deactivate_key) &&
  867. gic == &gic_data[0]) ? &gic_chip_mode1 : &gic_chip;
  868. switch (hw) {
  869. case 0 ... 31:
  870. irq_set_percpu_devid(irq);
  871. irq_domain_set_info(d, irq, hw, chip, d->host_data,
  872. handle_percpu_devid_irq, NULL, NULL);
  873. break;
  874. default:
  875. irq_domain_set_info(d, irq, hw, chip, d->host_data,
  876. handle_fasteoi_irq, NULL, NULL);
  877. irq_set_probe(irq);
  878. irqd_set_single_target(irqd);
  879. break;
  880. }
  881. /* Prevents SW retriggers which mess up the ACK/EOI ordering */
  882. irqd_set_handle_enforce_irqctx(irqd);
  883. return 0;
  884. }
  885. static int gic_irq_domain_translate(struct irq_domain *d,
  886. struct irq_fwspec *fwspec,
  887. unsigned long *hwirq,
  888. unsigned int *type)
  889. {
  890. if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
  891. *hwirq = fwspec->param[0];
  892. *type = IRQ_TYPE_EDGE_RISING;
  893. return 0;
  894. }
  895. if (is_of_node(fwspec->fwnode)) {
  896. if (fwspec->param_count < 3)
  897. return -EINVAL;
  898. switch (fwspec->param[0]) {
  899. case 0: /* SPI */
  900. *hwirq = fwspec->param[1] + 32;
  901. break;
  902. case 1: /* PPI */
  903. *hwirq = fwspec->param[1] + 16;
  904. break;
  905. default:
  906. return -EINVAL;
  907. }
  908. *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
  909. /* Make it clear that broken DTs are... broken */
  910. WARN(*type == IRQ_TYPE_NONE,
  911. "HW irq %ld has invalid type\n", *hwirq);
  912. return 0;
  913. }
  914. if (is_fwnode_irqchip(fwspec->fwnode)) {
  915. if(fwspec->param_count != 2)
  916. return -EINVAL;
  917. if (fwspec->param[0] < 16) {
  918. pr_err(FW_BUG "Illegal GSI%d translation request\n",
  919. fwspec->param[0]);
  920. return -EINVAL;
  921. }
  922. *hwirq = fwspec->param[0];
  923. *type = fwspec->param[1];
  924. WARN(*type == IRQ_TYPE_NONE,
  925. "HW irq %ld has invalid type\n", *hwirq);
  926. return 0;
  927. }
  928. return -EINVAL;
  929. }
  930. static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  931. unsigned int nr_irqs, void *arg)
  932. {
  933. int i, ret;
  934. irq_hw_number_t hwirq;
  935. unsigned int type = IRQ_TYPE_NONE;
  936. struct irq_fwspec *fwspec = arg;
  937. ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
  938. if (ret)
  939. return ret;
  940. for (i = 0; i < nr_irqs; i++) {
  941. ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
  942. if (ret)
  943. return ret;
  944. }
  945. return 0;
  946. }
  947. static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
  948. .translate = gic_irq_domain_translate,
  949. .alloc = gic_irq_domain_alloc,
  950. .free = irq_domain_free_irqs_top,
  951. };
  952. static int gic_init_bases(struct gic_chip_data *gic,
  953. struct fwnode_handle *handle)
  954. {
  955. int gic_irqs, ret;
  956. if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
  957. /* Frankein-GIC without banked registers... */
  958. unsigned int cpu;
  959. gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
  960. gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
  961. if (WARN_ON(!gic->dist_base.percpu_base ||
  962. !gic->cpu_base.percpu_base)) {
  963. ret = -ENOMEM;
  964. goto error;
  965. }
  966. for_each_possible_cpu(cpu) {
  967. u32 mpidr = cpu_logical_map(cpu);
  968. u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  969. unsigned long offset = gic->percpu_offset * core_id;
  970. *per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
  971. gic->raw_dist_base + offset;
  972. *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
  973. gic->raw_cpu_base + offset;
  974. }
  975. enable_frankengic();
  976. } else {
  977. /* Normal, sane GIC... */
  978. WARN(gic->percpu_offset,
  979. "GIC_NON_BANKED not enabled, ignoring %08x offset!",
  980. gic->percpu_offset);
  981. gic->dist_base.common_base = gic->raw_dist_base;
  982. gic->cpu_base.common_base = gic->raw_cpu_base;
  983. }
  984. /*
  985. * Find out how many interrupts are supported.
  986. * The GIC only supports up to 1020 interrupt sources.
  987. */
  988. gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
  989. gic_irqs = (gic_irqs + 1) * 32;
  990. if (gic_irqs > 1020)
  991. gic_irqs = 1020;
  992. gic->gic_irqs = gic_irqs;
  993. gic->domain = irq_domain_create_linear(handle, gic_irqs,
  994. &gic_irq_domain_hierarchy_ops,
  995. gic);
  996. if (WARN_ON(!gic->domain)) {
  997. ret = -ENODEV;
  998. goto error;
  999. }
  1000. gic_dist_init(gic);
  1001. ret = gic_cpu_init(gic);
  1002. if (ret)
  1003. goto error;
  1004. ret = gic_pm_init(gic);
  1005. if (ret)
  1006. goto error;
  1007. return 0;
  1008. error:
  1009. if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
  1010. free_percpu(gic->dist_base.percpu_base);
  1011. free_percpu(gic->cpu_base.percpu_base);
  1012. }
  1013. return ret;
  1014. }
  1015. static int __init __gic_init_bases(struct gic_chip_data *gic,
  1016. struct fwnode_handle *handle)
  1017. {
  1018. int i, ret;
  1019. if (WARN_ON(!gic || gic->domain))
  1020. return -EINVAL;
  1021. if (gic == &gic_data[0]) {
  1022. /*
  1023. * Initialize the CPU interface map to all CPUs.
  1024. * It will be refined as each CPU probes its ID.
  1025. * This is only necessary for the primary GIC.
  1026. */
  1027. for (i = 0; i < NR_GIC_CPU_IF; i++)
  1028. gic_cpu_map[i] = 0xff;
  1029. set_handle_irq(gic_handle_irq);
  1030. if (static_branch_likely(&supports_deactivate_key))
  1031. pr_info("GIC: Using split EOI/Deactivate mode\n");
  1032. }
  1033. ret = gic_init_bases(gic, handle);
  1034. if (gic == &gic_data[0])
  1035. gic_smp_init();
  1036. return ret;
  1037. }
  1038. static void gic_teardown(struct gic_chip_data *gic)
  1039. {
  1040. if (WARN_ON(!gic))
  1041. return;
  1042. if (gic->raw_dist_base)
  1043. iounmap(gic->raw_dist_base);
  1044. if (gic->raw_cpu_base)
  1045. iounmap(gic->raw_cpu_base);
  1046. }
  1047. static int gic_cnt __initdata;
  1048. static bool gicv2_force_probe;
  1049. static int __init gicv2_force_probe_cfg(char *buf)
  1050. {
  1051. return kstrtobool(buf, &gicv2_force_probe);
  1052. }
  1053. early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg);
  1054. static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
  1055. {
  1056. struct resource cpuif_res;
  1057. of_address_to_resource(node, 1, &cpuif_res);
  1058. if (!is_hyp_mode_available())
  1059. return false;
  1060. if (resource_size(&cpuif_res) < SZ_8K) {
  1061. void __iomem *alt;
  1062. /*
  1063. * Check for a stupid firmware that only exposes the
  1064. * first page of a GICv2.
  1065. */
  1066. if (!gic_check_gicv2(*base))
  1067. return false;
  1068. if (!gicv2_force_probe) {
  1069. pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n");
  1070. return false;
  1071. }
  1072. alt = ioremap(cpuif_res.start, SZ_8K);
  1073. if (!alt)
  1074. return false;
  1075. if (!gic_check_gicv2(alt + SZ_4K)) {
  1076. /*
  1077. * The first page was that of a GICv2, and
  1078. * the second was *something*. Let's trust it
  1079. * to be a GICv2, and update the mapping.
  1080. */
  1081. pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n",
  1082. &cpuif_res.start);
  1083. iounmap(*base);
  1084. *base = alt;
  1085. return true;
  1086. }
  1087. /*
  1088. * We detected *two* initial GICv2 pages in a
  1089. * row. Could be a GICv2 aliased over two 64kB
  1090. * pages. Update the resource, map the iospace, and
  1091. * pray.
  1092. */
  1093. iounmap(alt);
  1094. alt = ioremap(cpuif_res.start, SZ_128K);
  1095. if (!alt)
  1096. return false;
  1097. pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n",
  1098. &cpuif_res.start);
  1099. cpuif_res.end = cpuif_res.start + SZ_128K -1;
  1100. iounmap(*base);
  1101. *base = alt;
  1102. }
  1103. if (resource_size(&cpuif_res) == SZ_128K) {
  1104. /*
  1105. * Verify that we have the first 4kB of a GICv2
  1106. * aliased over the first 64kB by checking the
  1107. * GICC_IIDR register on both ends.
  1108. */
  1109. if (!gic_check_gicv2(*base) ||
  1110. !gic_check_gicv2(*base + 0xf000))
  1111. return false;
  1112. /*
  1113. * Move the base up by 60kB, so that we have a 8kB
  1114. * contiguous region, which allows us to use GICC_DIR
  1115. * at its normal offset. Please pass me that bucket.
  1116. */
  1117. *base += 0xf000;
  1118. cpuif_res.start += 0xf000;
  1119. pr_warn("GIC: Adjusting CPU interface base to %pa\n",
  1120. &cpuif_res.start);
  1121. }
  1122. return true;
  1123. }
  1124. static bool gic_enable_rmw_access(void *data)
  1125. {
  1126. /*
  1127. * The EMEV2 class of machines has a broken interconnect, and
  1128. * locks up on accesses that are less than 32bit. So far, only
  1129. * the affinity setting requires it.
  1130. */
  1131. if (of_machine_is_compatible("renesas,emev2")) {
  1132. static_branch_enable(&needs_rmw_access);
  1133. return true;
  1134. }
  1135. return false;
  1136. }
  1137. static const struct gic_quirk gic_quirks[] = {
  1138. {
  1139. .desc = "broken byte access",
  1140. .compatible = "arm,pl390",
  1141. .init = gic_enable_rmw_access,
  1142. },
  1143. { },
  1144. };
  1145. static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
  1146. {
  1147. if (!gic || !node)
  1148. return -EINVAL;
  1149. gic->raw_dist_base = of_iomap(node, 0);
  1150. if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
  1151. goto error;
  1152. gic->raw_cpu_base = of_iomap(node, 1);
  1153. if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
  1154. goto error;
  1155. if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
  1156. gic->percpu_offset = 0;
  1157. gic_enable_of_quirks(node, gic_quirks, gic);
  1158. return 0;
  1159. error:
  1160. gic_teardown(gic);
  1161. return -ENOMEM;
  1162. }
  1163. int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
  1164. {
  1165. int ret;
  1166. if (!dev || !dev->of_node || !gic || !irq)
  1167. return -EINVAL;
  1168. *gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL);
  1169. if (!*gic)
  1170. return -ENOMEM;
  1171. ret = gic_of_setup(*gic, dev->of_node);
  1172. if (ret)
  1173. return ret;
  1174. ret = gic_init_bases(*gic, &dev->of_node->fwnode);
  1175. if (ret) {
  1176. gic_teardown(*gic);
  1177. return ret;
  1178. }
  1179. irq_domain_set_pm_device((*gic)->domain, dev);
  1180. irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
  1181. return 0;
  1182. }
  1183. static void __init gic_of_setup_kvm_info(struct device_node *node)
  1184. {
  1185. int ret;
  1186. struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
  1187. struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
  1188. gic_v2_kvm_info.type = GIC_V2;
  1189. gic_v2_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
  1190. if (!gic_v2_kvm_info.maint_irq)
  1191. return;
  1192. ret = of_address_to_resource(node, 2, vctrl_res);
  1193. if (ret)
  1194. return;
  1195. ret = of_address_to_resource(node, 3, vcpu_res);
  1196. if (ret)
  1197. return;
  1198. if (static_branch_likely(&supports_deactivate_key))
  1199. vgic_set_kvm_info(&gic_v2_kvm_info);
  1200. }
  1201. int __init
  1202. gic_of_init(struct device_node *node, struct device_node *parent)
  1203. {
  1204. struct gic_chip_data *gic;
  1205. int irq, ret;
  1206. if (WARN_ON(!node))
  1207. return -ENODEV;
  1208. if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
  1209. return -EINVAL;
  1210. gic = &gic_data[gic_cnt];
  1211. ret = gic_of_setup(gic, node);
  1212. if (ret)
  1213. return ret;
  1214. /*
  1215. * Disable split EOI/Deactivate if either HYP is not available
  1216. * or the CPU interface is too small.
  1217. */
  1218. if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
  1219. static_branch_disable(&supports_deactivate_key);
  1220. ret = __gic_init_bases(gic, &node->fwnode);
  1221. if (ret) {
  1222. gic_teardown(gic);
  1223. return ret;
  1224. }
  1225. if (!gic_cnt) {
  1226. gic_init_physaddr(node);
  1227. gic_of_setup_kvm_info(node);
  1228. }
  1229. if (parent) {
  1230. irq = irq_of_parse_and_map(node, 0);
  1231. gic_cascade_irq(gic_cnt, irq);
  1232. }
  1233. if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
  1234. gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain);
  1235. gic_cnt++;
  1236. return 0;
  1237. }
  1238. IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
  1239. IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
  1240. IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
  1241. IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
  1242. IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
  1243. IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
  1244. IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
  1245. IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
  1246. IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
  1247. #ifdef CONFIG_ACPI
  1248. static struct
  1249. {
  1250. phys_addr_t cpu_phys_base;
  1251. u32 maint_irq;
  1252. int maint_irq_mode;
  1253. phys_addr_t vctrl_base;
  1254. phys_addr_t vcpu_base;
  1255. } acpi_data __initdata;
  1256. static int __init
  1257. gic_acpi_parse_madt_cpu(union acpi_subtable_headers *header,
  1258. const unsigned long end)
  1259. {
  1260. struct acpi_madt_generic_interrupt *processor;
  1261. phys_addr_t gic_cpu_base;
  1262. static int cpu_base_assigned;
  1263. processor = (struct acpi_madt_generic_interrupt *)header;
  1264. if (BAD_MADT_GICC_ENTRY(processor, end))
  1265. return -EINVAL;
  1266. /*
  1267. * There is no support for non-banked GICv1/2 register in ACPI spec.
  1268. * All CPU interface addresses have to be the same.
  1269. */
  1270. gic_cpu_base = processor->base_address;
  1271. if (cpu_base_assigned && gic_cpu_base != acpi_data.cpu_phys_base)
  1272. return -EINVAL;
  1273. acpi_data.cpu_phys_base = gic_cpu_base;
  1274. acpi_data.maint_irq = processor->vgic_interrupt;
  1275. acpi_data.maint_irq_mode = (processor->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
  1276. ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
  1277. acpi_data.vctrl_base = processor->gich_base_address;
  1278. acpi_data.vcpu_base = processor->gicv_base_address;
  1279. cpu_base_assigned = 1;
  1280. return 0;
  1281. }
  1282. /* The things you have to do to just *count* something... */
  1283. static int __init acpi_dummy_func(union acpi_subtable_headers *header,
  1284. const unsigned long end)
  1285. {
  1286. return 0;
  1287. }
  1288. static bool __init acpi_gic_redist_is_present(void)
  1289. {
  1290. return acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
  1291. acpi_dummy_func, 0) > 0;
  1292. }
  1293. static bool __init gic_validate_dist(struct acpi_subtable_header *header,
  1294. struct acpi_probe_entry *ape)
  1295. {
  1296. struct acpi_madt_generic_distributor *dist;
  1297. dist = (struct acpi_madt_generic_distributor *)header;
  1298. return (dist->version == ape->driver_data &&
  1299. (dist->version != ACPI_MADT_GIC_VERSION_NONE ||
  1300. !acpi_gic_redist_is_present()));
  1301. }
  1302. #define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
  1303. #define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
  1304. #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
  1305. #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
  1306. static void __init gic_acpi_setup_kvm_info(void)
  1307. {
  1308. int irq;
  1309. struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
  1310. struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
  1311. gic_v2_kvm_info.type = GIC_V2;
  1312. if (!acpi_data.vctrl_base)
  1313. return;
  1314. vctrl_res->flags = IORESOURCE_MEM;
  1315. vctrl_res->start = acpi_data.vctrl_base;
  1316. vctrl_res->end = vctrl_res->start + ACPI_GICV2_VCTRL_MEM_SIZE - 1;
  1317. if (!acpi_data.vcpu_base)
  1318. return;
  1319. vcpu_res->flags = IORESOURCE_MEM;
  1320. vcpu_res->start = acpi_data.vcpu_base;
  1321. vcpu_res->end = vcpu_res->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
  1322. irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
  1323. acpi_data.maint_irq_mode,
  1324. ACPI_ACTIVE_HIGH);
  1325. if (irq <= 0)
  1326. return;
  1327. gic_v2_kvm_info.maint_irq = irq;
  1328. vgic_set_kvm_info(&gic_v2_kvm_info);
  1329. }
  1330. static struct fwnode_handle *gsi_domain_handle;
  1331. static struct fwnode_handle *gic_v2_get_gsi_domain_id(u32 gsi)
  1332. {
  1333. return gsi_domain_handle;
  1334. }
  1335. static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
  1336. const unsigned long end)
  1337. {
  1338. struct acpi_madt_generic_distributor *dist;
  1339. struct gic_chip_data *gic = &gic_data[0];
  1340. int count, ret;
  1341. /* Collect CPU base addresses */
  1342. count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
  1343. gic_acpi_parse_madt_cpu, 0);
  1344. if (count <= 0) {
  1345. pr_err("No valid GICC entries exist\n");
  1346. return -EINVAL;
  1347. }
  1348. gic->raw_cpu_base = ioremap(acpi_data.cpu_phys_base, ACPI_GIC_CPU_IF_MEM_SIZE);
  1349. if (!gic->raw_cpu_base) {
  1350. pr_err("Unable to map GICC registers\n");
  1351. return -ENOMEM;
  1352. }
  1353. dist = (struct acpi_madt_generic_distributor *)header;
  1354. gic->raw_dist_base = ioremap(dist->base_address,
  1355. ACPI_GICV2_DIST_MEM_SIZE);
  1356. if (!gic->raw_dist_base) {
  1357. pr_err("Unable to map GICD registers\n");
  1358. gic_teardown(gic);
  1359. return -ENOMEM;
  1360. }
  1361. /*
  1362. * Disable split EOI/Deactivate if HYP is not available. ACPI
  1363. * guarantees that we'll always have a GICv2, so the CPU
  1364. * interface will always be the right size.
  1365. */
  1366. if (!is_hyp_mode_available())
  1367. static_branch_disable(&supports_deactivate_key);
  1368. /*
  1369. * Initialize GIC instance zero (no multi-GIC support).
  1370. */
  1371. gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
  1372. if (!gsi_domain_handle) {
  1373. pr_err("Unable to allocate domain handle\n");
  1374. gic_teardown(gic);
  1375. return -ENOMEM;
  1376. }
  1377. ret = __gic_init_bases(gic, gsi_domain_handle);
  1378. if (ret) {
  1379. pr_err("Failed to initialise GIC\n");
  1380. irq_domain_free_fwnode(gsi_domain_handle);
  1381. gic_teardown(gic);
  1382. return ret;
  1383. }
  1384. acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v2_get_gsi_domain_id);
  1385. if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
  1386. gicv2m_init(NULL, gic_data[0].domain);
  1387. if (static_branch_likely(&supports_deactivate_key))
  1388. gic_acpi_setup_kvm_info();
  1389. return 0;
  1390. }
  1391. IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
  1392. gic_validate_dist, ACPI_MADT_GIC_VERSION_V2,
  1393. gic_v2_acpi_init);
  1394. IRQCHIP_ACPI_DECLARE(gic_v2_maybe, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
  1395. gic_validate_dist, ACPI_MADT_GIC_VERSION_NONE,
  1396. gic_v2_acpi_init);
  1397. #endif