irq-mtk-cirq.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016 MediaTek Inc.
  4. * Author: Youlin.Pei <youlin.pei@mediatek.com>
  5. */
  6. #include <linux/interrupt.h>
  7. #include <linux/io.h>
  8. #include <linux/irq.h>
  9. #include <linux/irqchip.h>
  10. #include <linux/irqdomain.h>
  11. #include <linux/of.h>
  12. #include <linux/of_irq.h>
  13. #include <linux/of_address.h>
  14. #include <linux/slab.h>
  15. #include <linux/syscore_ops.h>
  16. enum mtk_cirq_regoffs_index {
  17. CIRQ_STA,
  18. CIRQ_ACK,
  19. CIRQ_MASK_SET,
  20. CIRQ_MASK_CLR,
  21. CIRQ_SENS_SET,
  22. CIRQ_SENS_CLR,
  23. CIRQ_POL_SET,
  24. CIRQ_POL_CLR,
  25. CIRQ_CONTROL
  26. };
  27. static const u32 mtk_cirq_regoffs_v1[] = {
  28. [CIRQ_STA] = 0x0,
  29. [CIRQ_ACK] = 0x40,
  30. [CIRQ_MASK_SET] = 0xc0,
  31. [CIRQ_MASK_CLR] = 0x100,
  32. [CIRQ_SENS_SET] = 0x180,
  33. [CIRQ_SENS_CLR] = 0x1c0,
  34. [CIRQ_POL_SET] = 0x240,
  35. [CIRQ_POL_CLR] = 0x280,
  36. [CIRQ_CONTROL] = 0x300,
  37. };
  38. static const u32 mtk_cirq_regoffs_v2[] = {
  39. [CIRQ_STA] = 0x0,
  40. [CIRQ_ACK] = 0x80,
  41. [CIRQ_MASK_SET] = 0x180,
  42. [CIRQ_MASK_CLR] = 0x200,
  43. [CIRQ_SENS_SET] = 0x300,
  44. [CIRQ_SENS_CLR] = 0x380,
  45. [CIRQ_POL_SET] = 0x480,
  46. [CIRQ_POL_CLR] = 0x500,
  47. [CIRQ_CONTROL] = 0x600,
  48. };
  49. #define CIRQ_EN 0x1
  50. #define CIRQ_EDGE 0x2
  51. #define CIRQ_FLUSH 0x4
  52. struct mtk_cirq_chip_data {
  53. void __iomem *base;
  54. unsigned int ext_irq_start;
  55. unsigned int ext_irq_end;
  56. const u32 *offsets;
  57. struct irq_domain *domain;
  58. };
  59. static struct mtk_cirq_chip_data *cirq_data;
  60. static void __iomem *mtk_cirq_reg(struct mtk_cirq_chip_data *chip_data,
  61. enum mtk_cirq_regoffs_index idx)
  62. {
  63. return chip_data->base + chip_data->offsets[idx];
  64. }
  65. static void __iomem *mtk_cirq_irq_reg(struct mtk_cirq_chip_data *chip_data,
  66. enum mtk_cirq_regoffs_index idx,
  67. unsigned int cirq_num)
  68. {
  69. return mtk_cirq_reg(chip_data, idx) + (cirq_num / 32) * 4;
  70. }
  71. static void mtk_cirq_write_mask(struct irq_data *data, enum mtk_cirq_regoffs_index idx)
  72. {
  73. struct mtk_cirq_chip_data *chip_data = data->chip_data;
  74. unsigned int cirq_num = data->hwirq;
  75. u32 mask = 1 << (cirq_num % 32);
  76. writel_relaxed(mask, mtk_cirq_irq_reg(chip_data, idx, cirq_num));
  77. }
  78. static void mtk_cirq_mask(struct irq_data *data)
  79. {
  80. mtk_cirq_write_mask(data, CIRQ_MASK_SET);
  81. irq_chip_mask_parent(data);
  82. }
  83. static void mtk_cirq_unmask(struct irq_data *data)
  84. {
  85. mtk_cirq_write_mask(data, CIRQ_MASK_CLR);
  86. irq_chip_unmask_parent(data);
  87. }
  88. static int mtk_cirq_set_type(struct irq_data *data, unsigned int type)
  89. {
  90. int ret;
  91. switch (type & IRQ_TYPE_SENSE_MASK) {
  92. case IRQ_TYPE_EDGE_FALLING:
  93. mtk_cirq_write_mask(data, CIRQ_POL_CLR);
  94. mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
  95. break;
  96. case IRQ_TYPE_EDGE_RISING:
  97. mtk_cirq_write_mask(data, CIRQ_POL_SET);
  98. mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
  99. break;
  100. case IRQ_TYPE_LEVEL_LOW:
  101. mtk_cirq_write_mask(data, CIRQ_POL_CLR);
  102. mtk_cirq_write_mask(data, CIRQ_SENS_SET);
  103. break;
  104. case IRQ_TYPE_LEVEL_HIGH:
  105. mtk_cirq_write_mask(data, CIRQ_POL_SET);
  106. mtk_cirq_write_mask(data, CIRQ_SENS_SET);
  107. break;
  108. default:
  109. break;
  110. }
  111. data = data->parent_data;
  112. ret = data->chip->irq_set_type(data, type);
  113. return ret;
  114. }
  115. static struct irq_chip mtk_cirq_chip = {
  116. .name = "MT_CIRQ",
  117. .irq_mask = mtk_cirq_mask,
  118. .irq_unmask = mtk_cirq_unmask,
  119. .irq_eoi = irq_chip_eoi_parent,
  120. .irq_set_type = mtk_cirq_set_type,
  121. .irq_retrigger = irq_chip_retrigger_hierarchy,
  122. #ifdef CONFIG_SMP
  123. .irq_set_affinity = irq_chip_set_affinity_parent,
  124. #endif
  125. };
  126. static int mtk_cirq_domain_translate(struct irq_domain *d,
  127. struct irq_fwspec *fwspec,
  128. unsigned long *hwirq,
  129. unsigned int *type)
  130. {
  131. if (is_of_node(fwspec->fwnode)) {
  132. if (fwspec->param_count != 3)
  133. return -EINVAL;
  134. /* No PPI should point to this domain */
  135. if (fwspec->param[0] != 0)
  136. return -EINVAL;
  137. /* cirq support irq number check */
  138. if (fwspec->param[1] < cirq_data->ext_irq_start ||
  139. fwspec->param[1] > cirq_data->ext_irq_end)
  140. return -EINVAL;
  141. *hwirq = fwspec->param[1] - cirq_data->ext_irq_start;
  142. *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
  143. return 0;
  144. }
  145. return -EINVAL;
  146. }
  147. static int mtk_cirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  148. unsigned int nr_irqs, void *arg)
  149. {
  150. int ret;
  151. irq_hw_number_t hwirq;
  152. unsigned int type;
  153. struct irq_fwspec *fwspec = arg;
  154. struct irq_fwspec parent_fwspec = *fwspec;
  155. ret = mtk_cirq_domain_translate(domain, fwspec, &hwirq, &type);
  156. if (ret)
  157. return ret;
  158. if (WARN_ON(nr_irqs != 1))
  159. return -EINVAL;
  160. irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
  161. &mtk_cirq_chip,
  162. domain->host_data);
  163. parent_fwspec.fwnode = domain->parent->fwnode;
  164. return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
  165. &parent_fwspec);
  166. }
  167. static const struct irq_domain_ops cirq_domain_ops = {
  168. .translate = mtk_cirq_domain_translate,
  169. .alloc = mtk_cirq_domain_alloc,
  170. .free = irq_domain_free_irqs_common,
  171. };
  172. #ifdef CONFIG_PM_SLEEP
  173. static int mtk_cirq_suspend(void)
  174. {
  175. void __iomem *reg;
  176. u32 value, mask;
  177. unsigned int irq, hwirq_num;
  178. bool pending, masked;
  179. int i, pendret, maskret;
  180. /*
  181. * When external interrupts happened, CIRQ will record the status
  182. * even CIRQ is not enabled. When execute flush command, CIRQ will
  183. * resend the signals according to the status. So if don't clear the
  184. * status, CIRQ will resend the wrong signals.
  185. *
  186. * arch_suspend_disable_irqs() will be called before CIRQ suspend
  187. * callback. If clear all the status simply, the external interrupts
  188. * which happened between arch_suspend_disable_irqs and CIRQ suspend
  189. * callback will be lost. Using following steps to avoid this issue;
  190. *
  191. * - Iterate over all the CIRQ supported interrupts;
  192. * - For each interrupt, inspect its pending and masked status at GIC
  193. * level;
  194. * - If pending and unmasked, it happened between
  195. * arch_suspend_disable_irqs and CIRQ suspend callback, don't ACK
  196. * it. Otherwise, ACK it.
  197. */
  198. hwirq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
  199. for (i = 0; i < hwirq_num; i++) {
  200. irq = irq_find_mapping(cirq_data->domain, i);
  201. if (irq) {
  202. pendret = irq_get_irqchip_state(irq,
  203. IRQCHIP_STATE_PENDING,
  204. &pending);
  205. maskret = irq_get_irqchip_state(irq,
  206. IRQCHIP_STATE_MASKED,
  207. &masked);
  208. if (pendret == 0 && maskret == 0 &&
  209. (pending && !masked))
  210. continue;
  211. }
  212. reg = mtk_cirq_irq_reg(cirq_data, CIRQ_ACK, i);
  213. mask = 1 << (i % 32);
  214. writel_relaxed(mask, reg);
  215. }
  216. /* set edge_only mode, record edge-triggerd interrupts */
  217. /* enable cirq */
  218. reg = mtk_cirq_reg(cirq_data, CIRQ_CONTROL);
  219. value = readl_relaxed(reg);
  220. value |= (CIRQ_EDGE | CIRQ_EN);
  221. writel_relaxed(value, reg);
  222. return 0;
  223. }
  224. static void mtk_cirq_resume(void)
  225. {
  226. void __iomem *reg = mtk_cirq_reg(cirq_data, CIRQ_CONTROL);
  227. u32 value;
  228. /* flush recorded interrupts, will send signals to parent controller */
  229. value = readl_relaxed(reg);
  230. writel_relaxed(value | CIRQ_FLUSH, reg);
  231. /* disable cirq */
  232. value = readl_relaxed(reg);
  233. value &= ~(CIRQ_EDGE | CIRQ_EN);
  234. writel_relaxed(value, reg);
  235. }
  236. static struct syscore_ops mtk_cirq_syscore_ops = {
  237. .suspend = mtk_cirq_suspend,
  238. .resume = mtk_cirq_resume,
  239. };
  240. static void mtk_cirq_syscore_init(void)
  241. {
  242. register_syscore_ops(&mtk_cirq_syscore_ops);
  243. }
  244. #else
  245. static inline void mtk_cirq_syscore_init(void) {}
  246. #endif
  247. static const struct of_device_id mtk_cirq_of_match[] = {
  248. { .compatible = "mediatek,mt2701-cirq", .data = &mtk_cirq_regoffs_v1 },
  249. { .compatible = "mediatek,mt8135-cirq", .data = &mtk_cirq_regoffs_v1 },
  250. { .compatible = "mediatek,mt8173-cirq", .data = &mtk_cirq_regoffs_v1 },
  251. { .compatible = "mediatek,mt8192-cirq", .data = &mtk_cirq_regoffs_v2 },
  252. { /* sentinel */ }
  253. };
  254. static int __init mtk_cirq_of_init(struct device_node *node,
  255. struct device_node *parent)
  256. {
  257. struct irq_domain *domain, *domain_parent;
  258. const struct of_device_id *match;
  259. unsigned int irq_num;
  260. int ret;
  261. domain_parent = irq_find_host(parent);
  262. if (!domain_parent) {
  263. pr_err("mtk_cirq: interrupt-parent not found\n");
  264. return -EINVAL;
  265. }
  266. cirq_data = kzalloc(sizeof(*cirq_data), GFP_KERNEL);
  267. if (!cirq_data)
  268. return -ENOMEM;
  269. cirq_data->base = of_iomap(node, 0);
  270. if (!cirq_data->base) {
  271. pr_err("mtk_cirq: unable to map cirq register\n");
  272. ret = -ENXIO;
  273. goto out_free;
  274. }
  275. ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 0,
  276. &cirq_data->ext_irq_start);
  277. if (ret)
  278. goto out_unmap;
  279. ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 1,
  280. &cirq_data->ext_irq_end);
  281. if (ret)
  282. goto out_unmap;
  283. match = of_match_node(mtk_cirq_of_match, node);
  284. if (!match) {
  285. ret = -ENODEV;
  286. goto out_unmap;
  287. }
  288. cirq_data->offsets = match->data;
  289. irq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
  290. domain = irq_domain_add_hierarchy(domain_parent, 0,
  291. irq_num, node,
  292. &cirq_domain_ops, cirq_data);
  293. if (!domain) {
  294. ret = -ENOMEM;
  295. goto out_unmap;
  296. }
  297. cirq_data->domain = domain;
  298. mtk_cirq_syscore_init();
  299. return 0;
  300. out_unmap:
  301. iounmap(cirq_data->base);
  302. out_free:
  303. kfree(cirq_data);
  304. return ret;
  305. }
  306. IRQCHIP_DECLARE(mtk_cirq, "mediatek,mtk-cirq", mtk_cirq_of_init);