qcom-irq-combiner.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. /*
  13. * Driver for interrupt combiners in the Top-level Control and Status
  14. * Registers (TCSR) hardware block in Qualcomm Technologies chips.
  15. * An interrupt combiner in this block combines a set of interrupts by
  16. * OR'ing the individual interrupt signals into a summary interrupt
  17. * signal routed to a parent interrupt controller, and provides read-
  18. * only, 32-bit registers to query the status of individual interrupts.
  19. * The status bit for IRQ n is bit (n % 32) within register (n / 32)
  20. * of the given combiner. Thus, each combiner can be described as a set
  21. * of register offsets and the number of IRQs managed.
  22. */
  23. #define pr_fmt(fmt) "QCOM80B1:" fmt
  24. #include <linux/acpi.h>
  25. #include <linux/irqchip/chained_irq.h>
  26. #include <linux/irqdomain.h>
  27. #include <linux/platform_device.h>
  28. #define REG_SIZE 32
  29. struct combiner_reg {
  30. void __iomem *addr;
  31. unsigned long enabled;
  32. };
  33. struct combiner {
  34. struct irq_domain *domain;
  35. int parent_irq;
  36. u32 nirqs;
  37. u32 nregs;
  38. struct combiner_reg regs[0];
  39. };
  40. static inline int irq_nr(u32 reg, u32 bit)
  41. {
  42. return reg * REG_SIZE + bit;
  43. }
  44. /*
  45. * Handler for the cascaded IRQ.
  46. */
  47. static void combiner_handle_irq(struct irq_desc *desc)
  48. {
  49. struct combiner *combiner = irq_desc_get_handler_data(desc);
  50. struct irq_chip *chip = irq_desc_get_chip(desc);
  51. u32 reg;
  52. chained_irq_enter(chip, desc);
  53. for (reg = 0; reg < combiner->nregs; reg++) {
  54. int virq;
  55. int hwirq;
  56. u32 bit;
  57. u32 status;
  58. bit = readl_relaxed(combiner->regs[reg].addr);
  59. status = bit & combiner->regs[reg].enabled;
  60. if (bit && !status)
  61. pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n",
  62. smp_processor_id(), bit,
  63. combiner->regs[reg].enabled,
  64. combiner->regs[reg].addr);
  65. while (status) {
  66. bit = __ffs(status);
  67. status &= ~(1 << bit);
  68. hwirq = irq_nr(reg, bit);
  69. virq = irq_find_mapping(combiner->domain, hwirq);
  70. if (virq > 0)
  71. generic_handle_irq(virq);
  72. }
  73. }
  74. chained_irq_exit(chip, desc);
  75. }
  76. static void combiner_irq_chip_mask_irq(struct irq_data *data)
  77. {
  78. struct combiner *combiner = irq_data_get_irq_chip_data(data);
  79. struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE;
  80. clear_bit(data->hwirq % REG_SIZE, &reg->enabled);
  81. }
  82. static void combiner_irq_chip_unmask_irq(struct irq_data *data)
  83. {
  84. struct combiner *combiner = irq_data_get_irq_chip_data(data);
  85. struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE;
  86. set_bit(data->hwirq % REG_SIZE, &reg->enabled);
  87. }
  88. static struct irq_chip irq_chip = {
  89. .irq_mask = combiner_irq_chip_mask_irq,
  90. .irq_unmask = combiner_irq_chip_unmask_irq,
  91. .name = "qcom-irq-combiner"
  92. };
  93. static int combiner_irq_map(struct irq_domain *domain, unsigned int irq,
  94. irq_hw_number_t hwirq)
  95. {
  96. irq_set_chip_and_handler(irq, &irq_chip, handle_level_irq);
  97. irq_set_chip_data(irq, domain->host_data);
  98. irq_set_noprobe(irq);
  99. return 0;
  100. }
  101. static void combiner_irq_unmap(struct irq_domain *domain, unsigned int irq)
  102. {
  103. irq_domain_reset_irq_data(irq_get_irq_data(irq));
  104. }
  105. static int combiner_irq_translate(struct irq_domain *d, struct irq_fwspec *fws,
  106. unsigned long *hwirq, unsigned int *type)
  107. {
  108. struct combiner *combiner = d->host_data;
  109. if (is_acpi_node(fws->fwnode)) {
  110. if (WARN_ON((fws->param_count != 2) ||
  111. (fws->param[0] >= combiner->nirqs) ||
  112. (fws->param[1] & IORESOURCE_IRQ_LOWEDGE) ||
  113. (fws->param[1] & IORESOURCE_IRQ_HIGHEDGE)))
  114. return -EINVAL;
  115. *hwirq = fws->param[0];
  116. *type = fws->param[1];
  117. return 0;
  118. }
  119. return -EINVAL;
  120. }
  121. static const struct irq_domain_ops domain_ops = {
  122. .map = combiner_irq_map,
  123. .unmap = combiner_irq_unmap,
  124. .translate = combiner_irq_translate
  125. };
  126. static acpi_status count_registers_cb(struct acpi_resource *ares, void *context)
  127. {
  128. int *count = context;
  129. if (ares->type == ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
  130. ++(*count);
  131. return AE_OK;
  132. }
  133. static int count_registers(struct platform_device *pdev)
  134. {
  135. acpi_handle ahandle = ACPI_HANDLE(&pdev->dev);
  136. acpi_status status;
  137. int count = 0;
  138. if (!acpi_has_method(ahandle, METHOD_NAME__CRS))
  139. return -EINVAL;
  140. status = acpi_walk_resources(ahandle, METHOD_NAME__CRS,
  141. count_registers_cb, &count);
  142. if (ACPI_FAILURE(status))
  143. return -EINVAL;
  144. return count;
  145. }
  146. struct get_registers_context {
  147. struct device *dev;
  148. struct combiner *combiner;
  149. int err;
  150. };
  151. static acpi_status get_registers_cb(struct acpi_resource *ares, void *context)
  152. {
  153. struct get_registers_context *ctx = context;
  154. struct acpi_resource_generic_register *reg;
  155. phys_addr_t paddr;
  156. void __iomem *vaddr;
  157. if (ares->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
  158. return AE_OK;
  159. reg = &ares->data.generic_reg;
  160. paddr = reg->address;
  161. if ((reg->space_id != ACPI_SPACE_MEM) ||
  162. (reg->bit_offset != 0) ||
  163. (reg->bit_width > REG_SIZE)) {
  164. dev_err(ctx->dev, "Bad register resource @%pa\n", &paddr);
  165. ctx->err = -EINVAL;
  166. return AE_ERROR;
  167. }
  168. vaddr = devm_ioremap(ctx->dev, reg->address, REG_SIZE);
  169. if (!vaddr) {
  170. dev_err(ctx->dev, "Can't map register @%pa\n", &paddr);
  171. ctx->err = -ENOMEM;
  172. return AE_ERROR;
  173. }
  174. ctx->combiner->regs[ctx->combiner->nregs].addr = vaddr;
  175. ctx->combiner->nirqs += reg->bit_width;
  176. ctx->combiner->nregs++;
  177. return AE_OK;
  178. }
  179. static int get_registers(struct platform_device *pdev, struct combiner *comb)
  180. {
  181. acpi_handle ahandle = ACPI_HANDLE(&pdev->dev);
  182. acpi_status status;
  183. struct get_registers_context ctx;
  184. if (!acpi_has_method(ahandle, METHOD_NAME__CRS))
  185. return -EINVAL;
  186. ctx.dev = &pdev->dev;
  187. ctx.combiner = comb;
  188. ctx.err = 0;
  189. status = acpi_walk_resources(ahandle, METHOD_NAME__CRS,
  190. get_registers_cb, &ctx);
  191. if (ACPI_FAILURE(status))
  192. return ctx.err;
  193. return 0;
  194. }
  195. static int __init combiner_probe(struct platform_device *pdev)
  196. {
  197. struct combiner *combiner;
  198. size_t alloc_sz;
  199. int nregs;
  200. int err;
  201. nregs = count_registers(pdev);
  202. if (nregs <= 0) {
  203. dev_err(&pdev->dev, "Error reading register resources\n");
  204. return -EINVAL;
  205. }
  206. alloc_sz = sizeof(*combiner) + sizeof(struct combiner_reg) * nregs;
  207. combiner = devm_kzalloc(&pdev->dev, alloc_sz, GFP_KERNEL);
  208. if (!combiner)
  209. return -ENOMEM;
  210. err = get_registers(pdev, combiner);
  211. if (err < 0)
  212. return err;
  213. combiner->parent_irq = platform_get_irq(pdev, 0);
  214. if (combiner->parent_irq <= 0) {
  215. dev_err(&pdev->dev, "Error getting IRQ resource\n");
  216. return -EPROBE_DEFER;
  217. }
  218. combiner->domain = irq_domain_create_linear(pdev->dev.fwnode, combiner->nirqs,
  219. &domain_ops, combiner);
  220. if (!combiner->domain)
  221. /* Errors printed by irq_domain_create_linear */
  222. return -ENODEV;
  223. irq_set_chained_handler_and_data(combiner->parent_irq,
  224. combiner_handle_irq, combiner);
  225. dev_info(&pdev->dev, "Initialized with [p=%d,n=%d,r=%p]\n",
  226. combiner->parent_irq, combiner->nirqs, combiner->regs[0].addr);
  227. return 0;
  228. }
  229. static const struct acpi_device_id qcom_irq_combiner_ids[] = {
  230. { "QCOM80B1", },
  231. { }
  232. };
  233. static struct platform_driver qcom_irq_combiner_probe = {
  234. .driver = {
  235. .name = "qcom-irq-combiner",
  236. .acpi_match_table = ACPI_PTR(qcom_irq_combiner_ids),
  237. },
  238. .probe = combiner_probe,
  239. };
  240. builtin_platform_driver(qcom_irq_combiner_probe);