pcie-iproc-msi.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015 Broadcom Corporation
  4. */
  5. #include <linux/interrupt.h>
  6. #include <linux/irqchip/chained_irq.h>
  7. #include <linux/irqdomain.h>
  8. #include <linux/msi.h>
  9. #include <linux/of_irq.h>
  10. #include <linux/of_pci.h>
  11. #include <linux/pci.h>
  12. #include "pcie-iproc.h"
  13. #define IPROC_MSI_INTR_EN_SHIFT 11
  14. #define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT)
  15. #define IPROC_MSI_INT_N_EVENT_SHIFT 1
  16. #define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT)
  17. #define IPROC_MSI_EQ_EN_SHIFT 0
  18. #define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT)
  19. #define IPROC_MSI_EQ_MASK 0x3f
  20. /* Max number of GIC interrupts */
  21. #define NR_HW_IRQS 6
  22. /* Number of entries in each event queue */
  23. #define EQ_LEN 64
  24. /* Size of each event queue memory region */
  25. #define EQ_MEM_REGION_SIZE SZ_4K
  26. /* Size of each MSI address region */
  27. #define MSI_MEM_REGION_SIZE SZ_4K
  28. enum iproc_msi_reg {
  29. IPROC_MSI_EQ_PAGE = 0,
  30. IPROC_MSI_EQ_PAGE_UPPER,
  31. IPROC_MSI_PAGE,
  32. IPROC_MSI_PAGE_UPPER,
  33. IPROC_MSI_CTRL,
  34. IPROC_MSI_EQ_HEAD,
  35. IPROC_MSI_EQ_TAIL,
  36. IPROC_MSI_INTS_EN,
  37. IPROC_MSI_REG_SIZE,
  38. };
  39. struct iproc_msi;
  40. /**
  41. * iProc MSI group
  42. *
  43. * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI
  44. * event queue.
  45. *
  46. * @msi: pointer to iProc MSI data
  47. * @gic_irq: GIC interrupt
  48. * @eq: Event queue number
  49. */
  50. struct iproc_msi_grp {
  51. struct iproc_msi *msi;
  52. int gic_irq;
  53. unsigned int eq;
  54. };
  55. /**
  56. * iProc event queue based MSI
  57. *
  58. * Only meant to be used on platforms without MSI support integrated into the
  59. * GIC.
  60. *
  61. * @pcie: pointer to iProc PCIe data
  62. * @reg_offsets: MSI register offsets
  63. * @grps: MSI groups
  64. * @nr_irqs: number of total interrupts connected to GIC
  65. * @nr_cpus: number of toal CPUs
  66. * @has_inten_reg: indicates the MSI interrupt enable register needs to be
  67. * set explicitly (required for some legacy platforms)
  68. * @bitmap: MSI vector bitmap
  69. * @bitmap_lock: lock to protect access to the MSI bitmap
  70. * @nr_msi_vecs: total number of MSI vectors
  71. * @inner_domain: inner IRQ domain
  72. * @msi_domain: MSI IRQ domain
  73. * @nr_eq_region: required number of 4K aligned memory region for MSI event
  74. * queues
  75. * @nr_msi_region: required number of 4K aligned address region for MSI posted
  76. * writes
  77. * @eq_cpu: pointer to allocated memory region for MSI event queues
  78. * @eq_dma: DMA address of MSI event queues
  79. * @msi_addr: MSI address
  80. */
  81. struct iproc_msi {
  82. struct iproc_pcie *pcie;
  83. const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE];
  84. struct iproc_msi_grp *grps;
  85. int nr_irqs;
  86. int nr_cpus;
  87. bool has_inten_reg;
  88. unsigned long *bitmap;
  89. struct mutex bitmap_lock;
  90. unsigned int nr_msi_vecs;
  91. struct irq_domain *inner_domain;
  92. struct irq_domain *msi_domain;
  93. unsigned int nr_eq_region;
  94. unsigned int nr_msi_region;
  95. void *eq_cpu;
  96. dma_addr_t eq_dma;
  97. phys_addr_t msi_addr;
  98. };
  99. static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
  100. { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 },
  101. { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 },
  102. { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 },
  103. { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 },
  104. { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 },
  105. { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 },
  106. };
  107. static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
  108. { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 },
  109. { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 },
  110. { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 },
  111. { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c },
  112. };
  113. static inline u32 iproc_msi_read_reg(struct iproc_msi *msi,
  114. enum iproc_msi_reg reg,
  115. unsigned int eq)
  116. {
  117. struct iproc_pcie *pcie = msi->pcie;
  118. return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]);
  119. }
  120. static inline void iproc_msi_write_reg(struct iproc_msi *msi,
  121. enum iproc_msi_reg reg,
  122. int eq, u32 val)
  123. {
  124. struct iproc_pcie *pcie = msi->pcie;
  125. writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]);
  126. }
  127. static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq)
  128. {
  129. return (hwirq % msi->nr_irqs);
  130. }
  131. static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi,
  132. unsigned long hwirq)
  133. {
  134. if (msi->nr_msi_region > 1)
  135. return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE;
  136. else
  137. return hwirq_to_group(msi, hwirq) * sizeof(u32);
  138. }
  139. static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
  140. {
  141. if (msi->nr_eq_region > 1)
  142. return eq * EQ_MEM_REGION_SIZE;
  143. else
  144. return eq * EQ_LEN * sizeof(u32);
  145. }
  146. static struct irq_chip iproc_msi_irq_chip = {
  147. .name = "iProc-MSI",
  148. };
  149. static struct msi_domain_info iproc_msi_domain_info = {
  150. .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  151. MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
  152. .chip = &iproc_msi_irq_chip,
  153. };
  154. /*
  155. * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
  156. * dedicated event queue. Each MSI group can support up to 64 MSI vectors.
  157. *
  158. * The number of MSI groups varies between different iProc SoCs. The total
  159. * number of CPU cores also varies. To support MSI IRQ affinity, we
  160. * distribute GIC interrupts across all available CPUs. MSI vector is moved
  161. * from one GIC interrupt to another to steer to the target CPU.
  162. *
  163. * Assuming:
  164. * - the number of MSI groups is M
  165. * - the number of CPU cores is N
  166. * - M is always a multiple of N
  167. *
  168. * Total number of raw MSI vectors = M * 64
  169. * Total number of supported MSI vectors = (M * 64) / N
  170. */
  171. static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq)
  172. {
  173. return (hwirq % msi->nr_cpus);
  174. }
  175. static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi,
  176. unsigned long hwirq)
  177. {
  178. return (hwirq - hwirq_to_cpu(msi, hwirq));
  179. }
  180. static int iproc_msi_irq_set_affinity(struct irq_data *data,
  181. const struct cpumask *mask, bool force)
  182. {
  183. struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
  184. int target_cpu = cpumask_first(mask);
  185. int curr_cpu;
  186. int ret;
  187. curr_cpu = hwirq_to_cpu(msi, data->hwirq);
  188. if (curr_cpu == target_cpu)
  189. ret = IRQ_SET_MASK_OK_DONE;
  190. else {
  191. /* steer MSI to the target CPU */
  192. data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
  193. ret = IRQ_SET_MASK_OK;
  194. }
  195. irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
  196. return ret;
  197. }
  198. static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
  199. struct msi_msg *msg)
  200. {
  201. struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
  202. dma_addr_t addr;
  203. addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq);
  204. msg->address_lo = lower_32_bits(addr);
  205. msg->address_hi = upper_32_bits(addr);
  206. msg->data = data->hwirq << 5;
  207. }
  208. static struct irq_chip iproc_msi_bottom_irq_chip = {
  209. .name = "MSI",
  210. .irq_set_affinity = iproc_msi_irq_set_affinity,
  211. .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg,
  212. };
  213. static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
  214. unsigned int virq, unsigned int nr_irqs,
  215. void *args)
  216. {
  217. struct iproc_msi *msi = domain->host_data;
  218. int hwirq, i;
  219. mutex_lock(&msi->bitmap_lock);
  220. /* Allocate 'nr_cpus' number of MSI vectors each time */
  221. hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0,
  222. msi->nr_cpus, 0);
  223. if (hwirq < msi->nr_msi_vecs) {
  224. bitmap_set(msi->bitmap, hwirq, msi->nr_cpus);
  225. } else {
  226. mutex_unlock(&msi->bitmap_lock);
  227. return -ENOSPC;
  228. }
  229. mutex_unlock(&msi->bitmap_lock);
  230. for (i = 0; i < nr_irqs; i++) {
  231. irq_domain_set_info(domain, virq + i, hwirq + i,
  232. &iproc_msi_bottom_irq_chip,
  233. domain->host_data, handle_simple_irq,
  234. NULL, NULL);
  235. }
  236. return 0;
  237. }
  238. static void iproc_msi_irq_domain_free(struct irq_domain *domain,
  239. unsigned int virq, unsigned int nr_irqs)
  240. {
  241. struct irq_data *data = irq_domain_get_irq_data(domain, virq);
  242. struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
  243. unsigned int hwirq;
  244. mutex_lock(&msi->bitmap_lock);
  245. hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq);
  246. bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus);
  247. mutex_unlock(&msi->bitmap_lock);
  248. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  249. }
  250. static const struct irq_domain_ops msi_domain_ops = {
  251. .alloc = iproc_msi_irq_domain_alloc,
  252. .free = iproc_msi_irq_domain_free,
  253. };
  254. static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
  255. {
  256. u32 *msg, hwirq;
  257. unsigned int offs;
  258. offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
  259. msg = (u32 *)(msi->eq_cpu + offs);
  260. hwirq = readl(msg);
  261. hwirq = (hwirq >> 5) + (hwirq & 0x1f);
  262. /*
  263. * Since we have multiple hwirq mapped to a single MSI vector,
  264. * now we need to derive the hwirq at CPU0. It can then be used to
  265. * mapped back to virq.
  266. */
  267. return hwirq_to_canonical_hwirq(msi, hwirq);
  268. }
  269. static void iproc_msi_handler(struct irq_desc *desc)
  270. {
  271. struct irq_chip *chip = irq_desc_get_chip(desc);
  272. struct iproc_msi_grp *grp;
  273. struct iproc_msi *msi;
  274. u32 eq, head, tail, nr_events;
  275. unsigned long hwirq;
  276. int virq;
  277. chained_irq_enter(chip, desc);
  278. grp = irq_desc_get_handler_data(desc);
  279. msi = grp->msi;
  280. eq = grp->eq;
  281. /*
  282. * iProc MSI event queue is tracked by head and tail pointers. Head
  283. * pointer indicates the next entry (MSI data) to be consumed by SW in
  284. * the queue and needs to be updated by SW. iProc MSI core uses the
  285. * tail pointer as the next data insertion point.
  286. *
  287. * Entries between head and tail pointers contain valid MSI data. MSI
  288. * data is guaranteed to be in the event queue memory before the tail
  289. * pointer is updated by the iProc MSI core.
  290. */
  291. head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD,
  292. eq) & IPROC_MSI_EQ_MASK;
  293. do {
  294. tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL,
  295. eq) & IPROC_MSI_EQ_MASK;
  296. /*
  297. * Figure out total number of events (MSI data) to be
  298. * processed.
  299. */
  300. nr_events = (tail < head) ?
  301. (EQ_LEN - (head - tail)) : (tail - head);
  302. if (!nr_events)
  303. break;
  304. /* process all outstanding events */
  305. while (nr_events--) {
  306. hwirq = decode_msi_hwirq(msi, eq, head);
  307. virq = irq_find_mapping(msi->inner_domain, hwirq);
  308. generic_handle_irq(virq);
  309. head++;
  310. head %= EQ_LEN;
  311. }
  312. /*
  313. * Now all outstanding events have been processed. Update the
  314. * head pointer.
  315. */
  316. iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head);
  317. /*
  318. * Now go read the tail pointer again to see if there are new
  319. * oustanding events that came in during the above window.
  320. */
  321. } while (true);
  322. chained_irq_exit(chip, desc);
  323. }
  324. static void iproc_msi_enable(struct iproc_msi *msi)
  325. {
  326. int i, eq;
  327. u32 val;
  328. /* Program memory region for each event queue */
  329. for (i = 0; i < msi->nr_eq_region; i++) {
  330. dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE);
  331. iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i,
  332. lower_32_bits(addr));
  333. iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i,
  334. upper_32_bits(addr));
  335. }
  336. /* Program address region for MSI posted writes */
  337. for (i = 0; i < msi->nr_msi_region; i++) {
  338. phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE);
  339. iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i,
  340. lower_32_bits(addr));
  341. iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i,
  342. upper_32_bits(addr));
  343. }
  344. for (eq = 0; eq < msi->nr_irqs; eq++) {
  345. /* Enable MSI event queue */
  346. val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
  347. IPROC_MSI_EQ_EN;
  348. iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
  349. /*
  350. * Some legacy platforms require the MSI interrupt enable
  351. * register to be set explicitly.
  352. */
  353. if (msi->has_inten_reg) {
  354. val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
  355. val |= BIT(eq);
  356. iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
  357. }
  358. }
  359. }
  360. static void iproc_msi_disable(struct iproc_msi *msi)
  361. {
  362. u32 eq, val;
  363. for (eq = 0; eq < msi->nr_irqs; eq++) {
  364. if (msi->has_inten_reg) {
  365. val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
  366. val &= ~BIT(eq);
  367. iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
  368. }
  369. val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq);
  370. val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
  371. IPROC_MSI_EQ_EN);
  372. iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
  373. }
  374. }
  375. static int iproc_msi_alloc_domains(struct device_node *node,
  376. struct iproc_msi *msi)
  377. {
  378. msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
  379. &msi_domain_ops, msi);
  380. if (!msi->inner_domain)
  381. return -ENOMEM;
  382. msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
  383. &iproc_msi_domain_info,
  384. msi->inner_domain);
  385. if (!msi->msi_domain) {
  386. irq_domain_remove(msi->inner_domain);
  387. return -ENOMEM;
  388. }
  389. return 0;
  390. }
  391. static void iproc_msi_free_domains(struct iproc_msi *msi)
  392. {
  393. if (msi->msi_domain)
  394. irq_domain_remove(msi->msi_domain);
  395. if (msi->inner_domain)
  396. irq_domain_remove(msi->inner_domain);
  397. }
  398. static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu)
  399. {
  400. int i;
  401. for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
  402. irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
  403. NULL, NULL);
  404. }
  405. }
  406. static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu)
  407. {
  408. int i, ret;
  409. cpumask_var_t mask;
  410. struct iproc_pcie *pcie = msi->pcie;
  411. for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
  412. irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
  413. iproc_msi_handler,
  414. &msi->grps[i]);
  415. /* Dedicate GIC interrupt to each CPU core */
  416. if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
  417. cpumask_clear(mask);
  418. cpumask_set_cpu(cpu, mask);
  419. ret = irq_set_affinity(msi->grps[i].gic_irq, mask);
  420. if (ret)
  421. dev_err(pcie->dev,
  422. "failed to set affinity for IRQ%d\n",
  423. msi->grps[i].gic_irq);
  424. free_cpumask_var(mask);
  425. } else {
  426. dev_err(pcie->dev, "failed to alloc CPU mask\n");
  427. ret = -EINVAL;
  428. }
  429. if (ret) {
  430. /* Free all configured/unconfigured IRQs */
  431. iproc_msi_irq_free(msi, cpu);
  432. return ret;
  433. }
  434. }
  435. return 0;
  436. }
  437. int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
  438. {
  439. struct iproc_msi *msi;
  440. int i, ret;
  441. unsigned int cpu;
  442. if (!of_device_is_compatible(node, "brcm,iproc-msi"))
  443. return -ENODEV;
  444. if (!of_find_property(node, "msi-controller", NULL))
  445. return -ENODEV;
  446. if (pcie->msi)
  447. return -EBUSY;
  448. msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL);
  449. if (!msi)
  450. return -ENOMEM;
  451. msi->pcie = pcie;
  452. pcie->msi = msi;
  453. msi->msi_addr = pcie->base_addr;
  454. mutex_init(&msi->bitmap_lock);
  455. msi->nr_cpus = num_possible_cpus();
  456. msi->nr_irqs = of_irq_count(node);
  457. if (!msi->nr_irqs) {
  458. dev_err(pcie->dev, "found no MSI GIC interrupt\n");
  459. return -ENODEV;
  460. }
  461. if (msi->nr_irqs > NR_HW_IRQS) {
  462. dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n",
  463. msi->nr_irqs);
  464. msi->nr_irqs = NR_HW_IRQS;
  465. }
  466. if (msi->nr_irqs < msi->nr_cpus) {
  467. dev_err(pcie->dev,
  468. "not enough GIC interrupts for MSI affinity\n");
  469. return -EINVAL;
  470. }
  471. if (msi->nr_irqs % msi->nr_cpus != 0) {
  472. msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus;
  473. dev_warn(pcie->dev, "Reducing number of interrupts to %d\n",
  474. msi->nr_irqs);
  475. }
  476. switch (pcie->type) {
  477. case IPROC_PCIE_PAXB_BCMA:
  478. case IPROC_PCIE_PAXB:
  479. msi->reg_offsets = iproc_msi_reg_paxb;
  480. msi->nr_eq_region = 1;
  481. msi->nr_msi_region = 1;
  482. break;
  483. case IPROC_PCIE_PAXC:
  484. msi->reg_offsets = iproc_msi_reg_paxc;
  485. msi->nr_eq_region = msi->nr_irqs;
  486. msi->nr_msi_region = msi->nr_irqs;
  487. break;
  488. default:
  489. dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
  490. return -EINVAL;
  491. }
  492. if (of_find_property(node, "brcm,pcie-msi-inten", NULL))
  493. msi->has_inten_reg = true;
  494. msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
  495. msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs),
  496. sizeof(*msi->bitmap), GFP_KERNEL);
  497. if (!msi->bitmap)
  498. return -ENOMEM;
  499. msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps),
  500. GFP_KERNEL);
  501. if (!msi->grps)
  502. return -ENOMEM;
  503. for (i = 0; i < msi->nr_irqs; i++) {
  504. unsigned int irq = irq_of_parse_and_map(node, i);
  505. if (!irq) {
  506. dev_err(pcie->dev, "unable to parse/map interrupt\n");
  507. ret = -ENODEV;
  508. goto free_irqs;
  509. }
  510. msi->grps[i].gic_irq = irq;
  511. msi->grps[i].msi = msi;
  512. msi->grps[i].eq = i;
  513. }
  514. /* Reserve memory for event queue and make sure memories are zeroed */
  515. msi->eq_cpu = dma_zalloc_coherent(pcie->dev,
  516. msi->nr_eq_region * EQ_MEM_REGION_SIZE,
  517. &msi->eq_dma, GFP_KERNEL);
  518. if (!msi->eq_cpu) {
  519. ret = -ENOMEM;
  520. goto free_irqs;
  521. }
  522. ret = iproc_msi_alloc_domains(node, msi);
  523. if (ret) {
  524. dev_err(pcie->dev, "failed to create MSI domains\n");
  525. goto free_eq_dma;
  526. }
  527. for_each_online_cpu(cpu) {
  528. ret = iproc_msi_irq_setup(msi, cpu);
  529. if (ret)
  530. goto free_msi_irq;
  531. }
  532. iproc_msi_enable(msi);
  533. return 0;
  534. free_msi_irq:
  535. for_each_online_cpu(cpu)
  536. iproc_msi_irq_free(msi, cpu);
  537. iproc_msi_free_domains(msi);
  538. free_eq_dma:
  539. dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
  540. msi->eq_cpu, msi->eq_dma);
  541. free_irqs:
  542. for (i = 0; i < msi->nr_irqs; i++) {
  543. if (msi->grps[i].gic_irq)
  544. irq_dispose_mapping(msi->grps[i].gic_irq);
  545. }
  546. pcie->msi = NULL;
  547. return ret;
  548. }
  549. EXPORT_SYMBOL(iproc_msi_init);
  550. void iproc_msi_exit(struct iproc_pcie *pcie)
  551. {
  552. struct iproc_msi *msi = pcie->msi;
  553. unsigned int i, cpu;
  554. if (!msi)
  555. return;
  556. iproc_msi_disable(msi);
  557. for_each_online_cpu(cpu)
  558. iproc_msi_irq_free(msi, cpu);
  559. iproc_msi_free_domains(msi);
  560. dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
  561. msi->eq_cpu, msi->eq_dma);
  562. for (i = 0; i < msi->nr_irqs; i++) {
  563. if (msi->grps[i].gic_irq)
  564. irq_dispose_mapping(msi->grps[i].gic_irq);
  565. }
  566. }
  567. EXPORT_SYMBOL(iproc_msi_exit);