omap-wakeupgen.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637
  1. /*
  2. * OMAP WakeupGen Source file
  3. *
  4. * OMAP WakeupGen is the interrupt controller extension used along
  5. * with ARM GIC to wake the CPU out from low power states on
  6. * external interrupts. It is responsible for generating wakeup
  7. * event from the incoming interrupts and enable bits. It is
  8. * implemented in MPU always ON power domain. During normal operation,
  9. * WakeupGen delivers external interrupts directly to the GIC.
  10. *
  11. * Copyright (C) 2011 Texas Instruments, Inc.
  12. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/init.h>
  20. #include <linux/io.h>
  21. #include <linux/irq.h>
  22. #include <linux/irqchip.h>
  23. #include <linux/irqdomain.h>
  24. #include <linux/of_address.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/cpu.h>
  27. #include <linux/notifier.h>
  28. #include <linux/cpu_pm.h>
  29. #include "omap-wakeupgen.h"
  30. #include "omap-secure.h"
  31. #include "soc.h"
  32. #include "omap4-sar-layout.h"
  33. #include "common.h"
  34. #include "pm.h"
  35. #define AM43XX_NR_REG_BANKS 7
  36. #define AM43XX_IRQS 224
  37. #define MAX_NR_REG_BANKS AM43XX_NR_REG_BANKS
  38. #define MAX_IRQS AM43XX_IRQS
  39. #define DEFAULT_NR_REG_BANKS 5
  40. #define DEFAULT_IRQS 160
  41. #define WKG_MASK_ALL 0x00000000
  42. #define WKG_UNMASK_ALL 0xffffffff
  43. #define CPU_ENA_OFFSET 0x400
  44. #define CPU0_ID 0x0
  45. #define CPU1_ID 0x1
  46. #define OMAP4_NR_BANKS 4
  47. #define OMAP4_NR_IRQS 128
  48. #define SYS_NIRQ1_EXT_SYS_IRQ_1 7
  49. #define SYS_NIRQ2_EXT_SYS_IRQ_2 119
  50. static void __iomem *wakeupgen_base;
  51. static void __iomem *sar_base;
  52. static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
  53. static unsigned int irq_target_cpu[MAX_IRQS];
  54. static unsigned int irq_banks = DEFAULT_NR_REG_BANKS;
  55. static unsigned int max_irqs = DEFAULT_IRQS;
  56. static unsigned int omap_secure_apis;
  57. #ifdef CONFIG_CPU_PM
  58. static unsigned int wakeupgen_context[MAX_NR_REG_BANKS];
  59. #endif
  60. struct omap_wakeupgen_ops {
  61. void (*save_context)(void);
  62. void (*restore_context)(void);
  63. };
  64. static struct omap_wakeupgen_ops *wakeupgen_ops;
  65. /*
  66. * Static helper functions.
  67. */
  68. static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
  69. {
  70. return readl_relaxed(wakeupgen_base + OMAP_WKG_ENB_A_0 +
  71. (cpu * CPU_ENA_OFFSET) + (idx * 4));
  72. }
  73. static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
  74. {
  75. writel_relaxed(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
  76. (cpu * CPU_ENA_OFFSET) + (idx * 4));
  77. }
  78. static inline void sar_writel(u32 val, u32 offset, u8 idx)
  79. {
  80. writel_relaxed(val, sar_base + offset + (idx * 4));
  81. }
  82. static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
  83. {
  84. /*
  85. * Each WakeupGen register controls 32 interrupt.
  86. * i.e. 1 bit per SPI IRQ
  87. */
  88. *reg_index = irq >> 5;
  89. *bit_posn = irq %= 32;
  90. return 0;
  91. }
  92. static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
  93. {
  94. u32 val, bit_number;
  95. u8 i;
  96. if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
  97. return;
  98. val = wakeupgen_readl(i, cpu);
  99. val &= ~BIT(bit_number);
  100. wakeupgen_writel(val, i, cpu);
  101. }
  102. static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
  103. {
  104. u32 val, bit_number;
  105. u8 i;
  106. if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
  107. return;
  108. val = wakeupgen_readl(i, cpu);
  109. val |= BIT(bit_number);
  110. wakeupgen_writel(val, i, cpu);
  111. }
  112. /*
  113. * Architecture specific Mask extension
  114. */
  115. static void wakeupgen_mask(struct irq_data *d)
  116. {
  117. unsigned long flags;
  118. raw_spin_lock_irqsave(&wakeupgen_lock, flags);
  119. _wakeupgen_clear(d->hwirq, irq_target_cpu[d->hwirq]);
  120. raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
  121. irq_chip_mask_parent(d);
  122. }
  123. /*
  124. * Architecture specific Unmask extension
  125. */
  126. static void wakeupgen_unmask(struct irq_data *d)
  127. {
  128. unsigned long flags;
  129. raw_spin_lock_irqsave(&wakeupgen_lock, flags);
  130. _wakeupgen_set(d->hwirq, irq_target_cpu[d->hwirq]);
  131. raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
  132. irq_chip_unmask_parent(d);
  133. }
  134. /*
  135. * The sys_nirq pins bypass peripheral modules and are wired directly
  136. * to MPUSS wakeupgen. They get automatically inverted for GIC.
  137. */
  138. static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
  139. {
  140. bool inverted = false;
  141. switch (type) {
  142. case IRQ_TYPE_LEVEL_LOW:
  143. type &= ~IRQ_TYPE_LEVEL_MASK;
  144. type |= IRQ_TYPE_LEVEL_HIGH;
  145. inverted = true;
  146. break;
  147. case IRQ_TYPE_EDGE_FALLING:
  148. type &= ~IRQ_TYPE_EDGE_BOTH;
  149. type |= IRQ_TYPE_EDGE_RISING;
  150. inverted = true;
  151. break;
  152. default:
  153. break;
  154. }
  155. if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
  156. d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
  157. pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
  158. d->hwirq);
  159. return irq_chip_set_type_parent(d, type);
  160. }
  161. #ifdef CONFIG_HOTPLUG_CPU
  162. static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
  163. static void _wakeupgen_save_masks(unsigned int cpu)
  164. {
  165. u8 i;
  166. for (i = 0; i < irq_banks; i++)
  167. per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
  168. }
  169. static void _wakeupgen_restore_masks(unsigned int cpu)
  170. {
  171. u8 i;
  172. for (i = 0; i < irq_banks; i++)
  173. wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
  174. }
  175. static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
  176. {
  177. u8 i;
  178. for (i = 0; i < irq_banks; i++)
  179. wakeupgen_writel(reg, i, cpu);
  180. }
  181. /*
  182. * Mask or unmask all interrupts on given CPU.
  183. * 0 = Mask all interrupts on the 'cpu'
  184. * 1 = Unmask all interrupts on the 'cpu'
  185. * Ensure that the initial mask is maintained. This is faster than
  186. * iterating through GIC registers to arrive at the correct masks.
  187. */
  188. static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
  189. {
  190. unsigned long flags;
  191. raw_spin_lock_irqsave(&wakeupgen_lock, flags);
  192. if (set) {
  193. _wakeupgen_save_masks(cpu);
  194. _wakeupgen_set_all(cpu, WKG_MASK_ALL);
  195. } else {
  196. _wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
  197. _wakeupgen_restore_masks(cpu);
  198. }
  199. raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
  200. }
  201. #endif
  202. #ifdef CONFIG_CPU_PM
  203. static inline void omap4_irq_save_context(void)
  204. {
  205. u32 i, val;
  206. if (omap_rev() == OMAP4430_REV_ES1_0)
  207. return;
  208. for (i = 0; i < irq_banks; i++) {
  209. /* Save the CPUx interrupt mask for IRQ 0 to 127 */
  210. val = wakeupgen_readl(i, 0);
  211. sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
  212. val = wakeupgen_readl(i, 1);
  213. sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
  214. /*
  215. * Disable the secure interrupts for CPUx. The restore
  216. * code blindly restores secure and non-secure interrupt
  217. * masks from SAR RAM. Secure interrupts are not suppose
  218. * to be enabled from HLOS. So overwrite the SAR location
  219. * so that the secure interrupt remains disabled.
  220. */
  221. sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
  222. sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
  223. }
  224. /* Save AuxBoot* registers */
  225. val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
  226. writel_relaxed(val, sar_base + AUXCOREBOOT0_OFFSET);
  227. val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
  228. writel_relaxed(val, sar_base + AUXCOREBOOT1_OFFSET);
  229. /* Save SyncReq generation logic */
  230. val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
  231. writel_relaxed(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
  232. val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
  233. writel_relaxed(val, sar_base + PTMSYNCREQ_EN_OFFSET);
  234. /* Set the Backup Bit Mask status */
  235. val = readl_relaxed(sar_base + SAR_BACKUP_STATUS_OFFSET);
  236. val |= SAR_BACKUP_STATUS_WAKEUPGEN;
  237. writel_relaxed(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
  238. }
  239. static inline void omap5_irq_save_context(void)
  240. {
  241. u32 i, val;
  242. for (i = 0; i < irq_banks; i++) {
  243. /* Save the CPUx interrupt mask for IRQ 0 to 159 */
  244. val = wakeupgen_readl(i, 0);
  245. sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i);
  246. val = wakeupgen_readl(i, 1);
  247. sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i);
  248. sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
  249. sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
  250. }
  251. /* Save AuxBoot* registers */
  252. val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
  253. writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET);
  254. val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
  255. writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET);
  256. /* Set the Backup Bit Mask status */
  257. val = readl_relaxed(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
  258. val |= SAR_BACKUP_STATUS_WAKEUPGEN;
  259. writel_relaxed(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
  260. }
  261. static inline void am43xx_irq_save_context(void)
  262. {
  263. u32 i;
  264. for (i = 0; i < irq_banks; i++) {
  265. wakeupgen_context[i] = wakeupgen_readl(i, 0);
  266. wakeupgen_writel(0, i, CPU0_ID);
  267. }
  268. }
  269. /*
  270. * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
  271. * ROM code. WakeupGen IP is integrated along with GIC to manage the
  272. * interrupt wakeups from CPU low power states. It manages
  273. * masking/unmasking of Shared peripheral interrupts(SPI). So the
  274. * interrupt enable/disable control should be in sync and consistent
  275. * at WakeupGen and GIC so that interrupts are not lost.
  276. */
  277. static void irq_save_context(void)
  278. {
  279. /* DRA7 has no SAR to save */
  280. if (soc_is_dra7xx())
  281. return;
  282. if (wakeupgen_ops && wakeupgen_ops->save_context)
  283. wakeupgen_ops->save_context();
  284. }
  285. /*
  286. * Clear WakeupGen SAR backup status.
  287. */
  288. static void irq_sar_clear(void)
  289. {
  290. u32 val;
  291. u32 offset = SAR_BACKUP_STATUS_OFFSET;
  292. /* DRA7 has no SAR to save */
  293. if (soc_is_dra7xx())
  294. return;
  295. if (soc_is_omap54xx())
  296. offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
  297. val = readl_relaxed(sar_base + offset);
  298. val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
  299. writel_relaxed(val, sar_base + offset);
  300. }
  301. static void am43xx_irq_restore_context(void)
  302. {
  303. u32 i;
  304. for (i = 0; i < irq_banks; i++)
  305. wakeupgen_writel(wakeupgen_context[i], i, CPU0_ID);
  306. }
  307. static void irq_restore_context(void)
  308. {
  309. if (wakeupgen_ops && wakeupgen_ops->restore_context)
  310. wakeupgen_ops->restore_context();
  311. }
  312. /*
  313. * Save GIC and Wakeupgen interrupt context using secure API
  314. * for HS/EMU devices.
  315. */
  316. static void irq_save_secure_context(void)
  317. {
  318. u32 ret;
  319. ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
  320. FLAG_START_CRITICAL,
  321. 0, 0, 0, 0, 0);
  322. if (ret != API_HAL_RET_VALUE_OK)
  323. pr_err("GIC and Wakeupgen context save failed\n");
  324. }
  325. /* Define ops for context save and restore for each SoC */
  326. static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {
  327. .save_context = omap4_irq_save_context,
  328. .restore_context = irq_sar_clear,
  329. };
  330. static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {
  331. .save_context = omap5_irq_save_context,
  332. .restore_context = irq_sar_clear,
  333. };
  334. static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {
  335. .save_context = am43xx_irq_save_context,
  336. .restore_context = am43xx_irq_restore_context,
  337. };
  338. #else
  339. static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {};
  340. static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {};
  341. static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {};
  342. #endif
  343. #ifdef CONFIG_HOTPLUG_CPU
  344. static int omap_wakeupgen_cpu_online(unsigned int cpu)
  345. {
  346. wakeupgen_irqmask_all(cpu, 0);
  347. return 0;
  348. }
  349. static int omap_wakeupgen_cpu_dead(unsigned int cpu)
  350. {
  351. wakeupgen_irqmask_all(cpu, 1);
  352. return 0;
  353. }
  354. static void __init irq_hotplug_init(void)
  355. {
  356. cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/omap-wake:online",
  357. omap_wakeupgen_cpu_online, NULL);
  358. cpuhp_setup_state_nocalls(CPUHP_ARM_OMAP_WAKE_DEAD,
  359. "arm/omap-wake:dead", NULL,
  360. omap_wakeupgen_cpu_dead);
  361. }
  362. #else
  363. static void __init irq_hotplug_init(void)
  364. {}
  365. #endif
  366. #ifdef CONFIG_CPU_PM
  367. static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v)
  368. {
  369. switch (cmd) {
  370. case CPU_CLUSTER_PM_ENTER:
  371. if (omap_type() == OMAP2_DEVICE_TYPE_GP)
  372. irq_save_context();
  373. else
  374. irq_save_secure_context();
  375. break;
  376. case CPU_CLUSTER_PM_EXIT:
  377. if (omap_type() == OMAP2_DEVICE_TYPE_GP)
  378. irq_restore_context();
  379. break;
  380. }
  381. return NOTIFY_OK;
  382. }
  383. static struct notifier_block irq_notifier_block = {
  384. .notifier_call = irq_notifier,
  385. };
  386. static void __init irq_pm_init(void)
  387. {
  388. /* FIXME: Remove this when MPU OSWR support is added */
  389. if (!IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
  390. cpu_pm_register_notifier(&irq_notifier_block);
  391. }
  392. #else
  393. static void __init irq_pm_init(void)
  394. {}
  395. #endif
  396. void __iomem *omap_get_wakeupgen_base(void)
  397. {
  398. return wakeupgen_base;
  399. }
  400. int omap_secure_apis_support(void)
  401. {
  402. return omap_secure_apis;
  403. }
  404. static struct irq_chip wakeupgen_chip = {
  405. .name = "WUGEN",
  406. .irq_eoi = irq_chip_eoi_parent,
  407. .irq_mask = wakeupgen_mask,
  408. .irq_unmask = wakeupgen_unmask,
  409. .irq_retrigger = irq_chip_retrigger_hierarchy,
  410. .irq_set_type = wakeupgen_irq_set_type,
  411. .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
  412. #ifdef CONFIG_SMP
  413. .irq_set_affinity = irq_chip_set_affinity_parent,
  414. #endif
  415. };
  416. static int wakeupgen_domain_translate(struct irq_domain *d,
  417. struct irq_fwspec *fwspec,
  418. unsigned long *hwirq,
  419. unsigned int *type)
  420. {
  421. if (is_of_node(fwspec->fwnode)) {
  422. if (fwspec->param_count != 3)
  423. return -EINVAL;
  424. /* No PPI should point to this domain */
  425. if (fwspec->param[0] != 0)
  426. return -EINVAL;
  427. *hwirq = fwspec->param[1];
  428. *type = fwspec->param[2];
  429. return 0;
  430. }
  431. return -EINVAL;
  432. }
  433. static int wakeupgen_domain_alloc(struct irq_domain *domain,
  434. unsigned int virq,
  435. unsigned int nr_irqs, void *data)
  436. {
  437. struct irq_fwspec *fwspec = data;
  438. struct irq_fwspec parent_fwspec;
  439. irq_hw_number_t hwirq;
  440. int i;
  441. if (fwspec->param_count != 3)
  442. return -EINVAL; /* Not GIC compliant */
  443. if (fwspec->param[0] != 0)
  444. return -EINVAL; /* No PPI should point to this domain */
  445. hwirq = fwspec->param[1];
  446. if (hwirq >= MAX_IRQS)
  447. return -EINVAL; /* Can't deal with this */
  448. for (i = 0; i < nr_irqs; i++)
  449. irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
  450. &wakeupgen_chip, NULL);
  451. parent_fwspec = *fwspec;
  452. parent_fwspec.fwnode = domain->parent->fwnode;
  453. return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
  454. &parent_fwspec);
  455. }
  456. static const struct irq_domain_ops wakeupgen_domain_ops = {
  457. .translate = wakeupgen_domain_translate,
  458. .alloc = wakeupgen_domain_alloc,
  459. .free = irq_domain_free_irqs_common,
  460. };
  461. /*
  462. * Initialise the wakeupgen module.
  463. */
  464. static int __init wakeupgen_init(struct device_node *node,
  465. struct device_node *parent)
  466. {
  467. struct irq_domain *parent_domain, *domain;
  468. int i;
  469. unsigned int boot_cpu = smp_processor_id();
  470. u32 val;
  471. if (!parent) {
  472. pr_err("%pOF: no parent, giving up\n", node);
  473. return -ENODEV;
  474. }
  475. parent_domain = irq_find_host(parent);
  476. if (!parent_domain) {
  477. pr_err("%pOF: unable to obtain parent domain\n", node);
  478. return -ENXIO;
  479. }
  480. /* Not supported on OMAP4 ES1.0 silicon */
  481. if (omap_rev() == OMAP4430_REV_ES1_0) {
  482. WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
  483. return -EPERM;
  484. }
  485. /* Static mapping, never released */
  486. wakeupgen_base = of_iomap(node, 0);
  487. if (WARN_ON(!wakeupgen_base))
  488. return -ENOMEM;
  489. if (cpu_is_omap44xx()) {
  490. irq_banks = OMAP4_NR_BANKS;
  491. max_irqs = OMAP4_NR_IRQS;
  492. omap_secure_apis = 1;
  493. wakeupgen_ops = &omap4_wakeupgen_ops;
  494. } else if (soc_is_omap54xx()) {
  495. wakeupgen_ops = &omap5_wakeupgen_ops;
  496. } else if (soc_is_am43xx()) {
  497. irq_banks = AM43XX_NR_REG_BANKS;
  498. max_irqs = AM43XX_IRQS;
  499. wakeupgen_ops = &am43xx_wakeupgen_ops;
  500. }
  501. domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs,
  502. node, &wakeupgen_domain_ops,
  503. NULL);
  504. if (!domain) {
  505. iounmap(wakeupgen_base);
  506. return -ENOMEM;
  507. }
  508. /* Clear all IRQ bitmasks at wakeupGen level */
  509. for (i = 0; i < irq_banks; i++) {
  510. wakeupgen_writel(0, i, CPU0_ID);
  511. if (!soc_is_am43xx())
  512. wakeupgen_writel(0, i, CPU1_ID);
  513. }
  514. /*
  515. * FIXME: Add support to set_smp_affinity() once the core
  516. * GIC code has necessary hooks in place.
  517. */
  518. /* Associate all the IRQs to boot CPU like GIC init does. */
  519. for (i = 0; i < max_irqs; i++)
  520. irq_target_cpu[i] = boot_cpu;
  521. /*
  522. * Enables OMAP5 ES2 PM Mode using ES2_PM_MODE in AMBA_IF_MODE
  523. * 0x0: ES1 behavior, CPU cores would enter and exit OFF mode together.
  524. * 0x1: ES2 behavior, CPU cores are allowed to enter/exit OFF mode
  525. * independently.
  526. * This needs to be set one time thanks to always ON domain.
  527. *
  528. * We do not support ES1 behavior anymore. OMAP5 is assumed to be
  529. * ES2.0, and the same is applicable for DRA7.
  530. */
  531. if (soc_is_omap54xx() || soc_is_dra7xx()) {
  532. val = __raw_readl(wakeupgen_base + OMAP_AMBA_IF_MODE);
  533. val |= BIT(5);
  534. omap_smc1(OMAP5_MON_AMBA_IF_INDEX, val);
  535. }
  536. irq_hotplug_init();
  537. irq_pm_init();
  538. sar_base = omap4_get_sar_ram_base();
  539. return 0;
  540. }
  541. IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);