irq-ls-scfg-msi.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Freescale SCFG MSI(-X) support
  4. *
  5. * Copyright (C) 2016 Freescale Semiconductor.
  6. *
  7. * Author: Minghuan Lian <Minghuan.Lian@nxp.com>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/msi.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/iommu.h>
  14. #include <linux/irq.h>
  15. #include <linux/irqchip/chained_irq.h>
  16. #include <linux/irqdomain.h>
  17. #include <linux/of_irq.h>
  18. #include <linux/of_pci.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/property.h>
  21. #include <linux/spinlock.h>
  22. #define MSI_IRQS_PER_MSIR 32
  23. #define MSI_MSIR_OFFSET 4
  24. #define MSI_LS1043V1_1_IRQS_PER_MSIR 8
  25. #define MSI_LS1043V1_1_MSIR_OFFSET 0x10
  26. struct ls_scfg_msi_cfg {
  27. u32 ibs_shift; /* Shift of interrupt bit select */
  28. u32 msir_irqs; /* The irq number per MSIR */
  29. u32 msir_base; /* The base address of MSIR */
  30. };
  31. struct ls_scfg_msir {
  32. struct ls_scfg_msi *msi_data;
  33. unsigned int index;
  34. unsigned int gic_irq;
  35. unsigned int bit_start;
  36. unsigned int bit_end;
  37. unsigned int srs; /* Shared interrupt register select */
  38. void __iomem *reg;
  39. };
  40. struct ls_scfg_msi {
  41. spinlock_t lock;
  42. struct platform_device *pdev;
  43. struct irq_domain *parent;
  44. struct irq_domain *msi_domain;
  45. void __iomem *regs;
  46. phys_addr_t msiir_addr;
  47. struct ls_scfg_msi_cfg *cfg;
  48. u32 msir_num;
  49. struct ls_scfg_msir *msir;
  50. u32 irqs_num;
  51. unsigned long *used;
  52. };
  53. static struct irq_chip ls_scfg_msi_irq_chip = {
  54. .name = "MSI",
  55. .irq_mask = pci_msi_mask_irq,
  56. .irq_unmask = pci_msi_unmask_irq,
  57. };
  58. static struct msi_domain_info ls_scfg_msi_domain_info = {
  59. .flags = (MSI_FLAG_USE_DEF_DOM_OPS |
  60. MSI_FLAG_USE_DEF_CHIP_OPS |
  61. MSI_FLAG_PCI_MSIX),
  62. .chip = &ls_scfg_msi_irq_chip,
  63. };
  64. static int msi_affinity_flag = 1;
  65. static int __init early_parse_ls_scfg_msi(char *p)
  66. {
  67. if (p && strncmp(p, "no-affinity", 11) == 0)
  68. msi_affinity_flag = 0;
  69. else
  70. msi_affinity_flag = 1;
  71. return 0;
  72. }
  73. early_param("lsmsi", early_parse_ls_scfg_msi);
  74. static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
  75. {
  76. struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
  77. msg->address_hi = upper_32_bits(msi_data->msiir_addr);
  78. msg->address_lo = lower_32_bits(msi_data->msiir_addr);
  79. msg->data = data->hwirq;
  80. if (msi_affinity_flag) {
  81. const struct cpumask *mask;
  82. mask = irq_data_get_effective_affinity_mask(data);
  83. msg->data |= cpumask_first(mask);
  84. }
  85. iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
  86. }
  87. static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
  88. const struct cpumask *mask, bool force)
  89. {
  90. struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
  91. u32 cpu;
  92. if (!msi_affinity_flag)
  93. return -EINVAL;
  94. if (!force)
  95. cpu = cpumask_any_and(mask, cpu_online_mask);
  96. else
  97. cpu = cpumask_first(mask);
  98. if (cpu >= msi_data->msir_num)
  99. return -EINVAL;
  100. if (msi_data->msir[cpu].gic_irq <= 0) {
  101. pr_warn("cannot bind the irq to cpu%d\n", cpu);
  102. return -EINVAL;
  103. }
  104. irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
  105. return IRQ_SET_MASK_OK;
  106. }
  107. static struct irq_chip ls_scfg_msi_parent_chip = {
  108. .name = "SCFG",
  109. .irq_compose_msi_msg = ls_scfg_msi_compose_msg,
  110. .irq_set_affinity = ls_scfg_msi_set_affinity,
  111. };
  112. static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
  113. unsigned int virq,
  114. unsigned int nr_irqs,
  115. void *args)
  116. {
  117. msi_alloc_info_t *info = args;
  118. struct ls_scfg_msi *msi_data = domain->host_data;
  119. int pos, err = 0;
  120. WARN_ON(nr_irqs != 1);
  121. spin_lock(&msi_data->lock);
  122. pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
  123. if (pos < msi_data->irqs_num)
  124. __set_bit(pos, msi_data->used);
  125. else
  126. err = -ENOSPC;
  127. spin_unlock(&msi_data->lock);
  128. if (err)
  129. return err;
  130. err = iommu_dma_prepare_msi(info->desc, msi_data->msiir_addr);
  131. if (err)
  132. return err;
  133. irq_domain_set_info(domain, virq, pos,
  134. &ls_scfg_msi_parent_chip, msi_data,
  135. handle_simple_irq, NULL, NULL);
  136. return 0;
  137. }
  138. static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
  139. unsigned int virq, unsigned int nr_irqs)
  140. {
  141. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  142. struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
  143. int pos;
  144. pos = d->hwirq;
  145. if (pos < 0 || pos >= msi_data->irqs_num) {
  146. pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
  147. return;
  148. }
  149. spin_lock(&msi_data->lock);
  150. __clear_bit(pos, msi_data->used);
  151. spin_unlock(&msi_data->lock);
  152. }
  153. static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
  154. .alloc = ls_scfg_msi_domain_irq_alloc,
  155. .free = ls_scfg_msi_domain_irq_free,
  156. };
  157. static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
  158. {
  159. struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
  160. struct ls_scfg_msi *msi_data = msir->msi_data;
  161. unsigned long val;
  162. int pos, size, hwirq;
  163. chained_irq_enter(irq_desc_get_chip(desc), desc);
  164. val = ioread32be(msir->reg);
  165. pos = msir->bit_start;
  166. size = msir->bit_end + 1;
  167. for_each_set_bit_from(pos, &val, size) {
  168. hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
  169. msir->srs;
  170. generic_handle_domain_irq(msi_data->parent, hwirq);
  171. }
  172. chained_irq_exit(irq_desc_get_chip(desc), desc);
  173. }
  174. static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
  175. {
  176. /* Initialize MSI domain parent */
  177. msi_data->parent = irq_domain_add_linear(NULL,
  178. msi_data->irqs_num,
  179. &ls_scfg_msi_domain_ops,
  180. msi_data);
  181. if (!msi_data->parent) {
  182. dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
  183. return -ENOMEM;
  184. }
  185. msi_data->msi_domain = pci_msi_create_irq_domain(
  186. of_node_to_fwnode(msi_data->pdev->dev.of_node),
  187. &ls_scfg_msi_domain_info,
  188. msi_data->parent);
  189. if (!msi_data->msi_domain) {
  190. dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
  191. irq_domain_remove(msi_data->parent);
  192. return -ENOMEM;
  193. }
  194. return 0;
  195. }
  196. static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
  197. {
  198. struct ls_scfg_msir *msir;
  199. int virq, i, hwirq;
  200. virq = platform_get_irq(msi_data->pdev, index);
  201. if (virq <= 0)
  202. return -ENODEV;
  203. msir = &msi_data->msir[index];
  204. msir->index = index;
  205. msir->msi_data = msi_data;
  206. msir->gic_irq = virq;
  207. msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
  208. if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
  209. msir->bit_start = 32 - ((msir->index + 1) *
  210. MSI_LS1043V1_1_IRQS_PER_MSIR);
  211. msir->bit_end = msir->bit_start +
  212. MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
  213. } else {
  214. msir->bit_start = 0;
  215. msir->bit_end = msi_data->cfg->msir_irqs - 1;
  216. }
  217. irq_set_chained_handler_and_data(msir->gic_irq,
  218. ls_scfg_msi_irq_handler,
  219. msir);
  220. if (msi_affinity_flag) {
  221. /* Associate MSIR interrupt to the cpu */
  222. irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
  223. msir->srs = 0; /* This value is determined by the CPU */
  224. } else
  225. msir->srs = index;
  226. /* Release the hwirqs corresponding to this MSIR */
  227. if (!msi_affinity_flag || msir->index == 0) {
  228. for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
  229. hwirq = i << msi_data->cfg->ibs_shift | msir->index;
  230. bitmap_clear(msi_data->used, hwirq, 1);
  231. }
  232. }
  233. return 0;
  234. }
  235. static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
  236. {
  237. struct ls_scfg_msi *msi_data = msir->msi_data;
  238. int i, hwirq;
  239. if (msir->gic_irq > 0)
  240. irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
  241. for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
  242. hwirq = i << msi_data->cfg->ibs_shift | msir->index;
  243. bitmap_set(msi_data->used, hwirq, 1);
  244. }
  245. return 0;
  246. }
  247. static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
  248. .ibs_shift = 3,
  249. .msir_irqs = MSI_IRQS_PER_MSIR,
  250. .msir_base = MSI_MSIR_OFFSET,
  251. };
  252. static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
  253. .ibs_shift = 2,
  254. .msir_irqs = MSI_IRQS_PER_MSIR,
  255. .msir_base = MSI_MSIR_OFFSET,
  256. };
  257. static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
  258. .ibs_shift = 2,
  259. .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
  260. .msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
  261. };
  262. static const struct of_device_id ls_scfg_msi_id[] = {
  263. /* The following two misspelled compatibles are obsolete */
  264. { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
  265. { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
  266. { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
  267. { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
  268. { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
  269. { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
  270. { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
  271. {},
  272. };
  273. MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
  274. static int ls_scfg_msi_probe(struct platform_device *pdev)
  275. {
  276. struct ls_scfg_msi *msi_data;
  277. struct resource *res;
  278. int i, ret;
  279. msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
  280. if (!msi_data)
  281. return -ENOMEM;
  282. msi_data->cfg = (struct ls_scfg_msi_cfg *)device_get_match_data(&pdev->dev);
  283. if (!msi_data->cfg)
  284. return -ENODEV;
  285. msi_data->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
  286. if (IS_ERR(msi_data->regs)) {
  287. dev_err(&pdev->dev, "failed to initialize 'regs'\n");
  288. return PTR_ERR(msi_data->regs);
  289. }
  290. msi_data->msiir_addr = res->start;
  291. msi_data->pdev = pdev;
  292. spin_lock_init(&msi_data->lock);
  293. msi_data->irqs_num = MSI_IRQS_PER_MSIR *
  294. (1 << msi_data->cfg->ibs_shift);
  295. msi_data->used = devm_bitmap_zalloc(&pdev->dev, msi_data->irqs_num, GFP_KERNEL);
  296. if (!msi_data->used)
  297. return -ENOMEM;
  298. /*
  299. * Reserve all the hwirqs
  300. * The available hwirqs will be released in ls1_msi_setup_hwirq()
  301. */
  302. bitmap_set(msi_data->used, 0, msi_data->irqs_num);
  303. msi_data->msir_num = of_irq_count(pdev->dev.of_node);
  304. if (msi_affinity_flag) {
  305. u32 cpu_num;
  306. cpu_num = num_possible_cpus();
  307. if (msi_data->msir_num >= cpu_num)
  308. msi_data->msir_num = cpu_num;
  309. else
  310. msi_affinity_flag = 0;
  311. }
  312. msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
  313. sizeof(*msi_data->msir),
  314. GFP_KERNEL);
  315. if (!msi_data->msir)
  316. return -ENOMEM;
  317. for (i = 0; i < msi_data->msir_num; i++)
  318. ls_scfg_msi_setup_hwirq(msi_data, i);
  319. ret = ls_scfg_msi_domains_init(msi_data);
  320. if (ret)
  321. return ret;
  322. platform_set_drvdata(pdev, msi_data);
  323. return 0;
  324. }
  325. static void ls_scfg_msi_remove(struct platform_device *pdev)
  326. {
  327. struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
  328. int i;
  329. for (i = 0; i < msi_data->msir_num; i++)
  330. ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
  331. irq_domain_remove(msi_data->msi_domain);
  332. irq_domain_remove(msi_data->parent);
  333. platform_set_drvdata(pdev, NULL);
  334. }
  335. static struct platform_driver ls_scfg_msi_driver = {
  336. .driver = {
  337. .name = "ls-scfg-msi",
  338. .of_match_table = ls_scfg_msi_id,
  339. },
  340. .probe = ls_scfg_msi_probe,
  341. .remove_new = ls_scfg_msi_remove,
  342. };
  343. module_platform_driver(ls_scfg_msi_driver);
  344. MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
  345. MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");