pcie-altera.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright Altera Corporation (C) 2013-2015. All rights reserved
  4. *
  5. * Author: Ley Foon Tan <lftan@altera.com>
  6. * Description: Altera PCIe host controller driver
  7. */
  8. #include <linux/delay.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/irqchip/chained_irq.h>
  11. #include <linux/init.h>
  12. #include <linux/of_address.h>
  13. #include <linux/of_irq.h>
  14. #include <linux/of_pci.h>
  15. #include <linux/pci.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/slab.h>
  18. #include "../pci.h"
  19. #define RP_TX_REG0 0x2000
  20. #define RP_TX_REG1 0x2004
  21. #define RP_TX_CNTRL 0x2008
  22. #define RP_TX_EOP 0x2
  23. #define RP_TX_SOP 0x1
  24. #define RP_RXCPL_STATUS 0x2010
  25. #define RP_RXCPL_EOP 0x2
  26. #define RP_RXCPL_SOP 0x1
  27. #define RP_RXCPL_REG0 0x2014
  28. #define RP_RXCPL_REG1 0x2018
  29. #define P2A_INT_STATUS 0x3060
  30. #define P2A_INT_STS_ALL 0xf
  31. #define P2A_INT_ENABLE 0x3070
  32. #define P2A_INT_ENA_ALL 0xf
  33. #define RP_LTSSM 0x3c64
  34. #define RP_LTSSM_MASK 0x1f
  35. #define LTSSM_L0 0xf
  36. #define PCIE_CAP_OFFSET 0x80
  37. /* TLP configuration type 0 and 1 */
  38. #define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
  39. #define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */
  40. #define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */
  41. #define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */
  42. #define TLP_PAYLOAD_SIZE 0x01
  43. #define TLP_READ_TAG 0x1d
  44. #define TLP_WRITE_TAG 0x10
  45. #define RP_DEVFN 0
  46. #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
  47. #define TLP_CFGRD_DW0(pcie, bus) \
  48. ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \
  49. : TLP_FMTTYPE_CFGRD1) << 24) | \
  50. TLP_PAYLOAD_SIZE)
  51. #define TLP_CFGWR_DW0(pcie, bus) \
  52. ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGWR0 \
  53. : TLP_FMTTYPE_CFGWR1) << 24) | \
  54. TLP_PAYLOAD_SIZE)
  55. #define TLP_CFG_DW1(pcie, tag, be) \
  56. (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
  57. #define TLP_CFG_DW2(bus, devfn, offset) \
  58. (((bus) << 24) | ((devfn) << 16) | (offset))
  59. #define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
  60. #define TLP_HDR_SIZE 3
  61. #define TLP_LOOP 500
  62. #define LINK_UP_TIMEOUT HZ
  63. #define LINK_RETRAIN_TIMEOUT HZ
  64. #define DWORD_MASK 3
  65. struct altera_pcie {
  66. struct platform_device *pdev;
  67. void __iomem *cra_base; /* DT Cra */
  68. int irq;
  69. u8 root_bus_nr;
  70. struct irq_domain *irq_domain;
  71. struct resource bus_range;
  72. struct list_head resources;
  73. };
  74. struct tlp_rp_regpair_t {
  75. u32 ctrl;
  76. u32 reg0;
  77. u32 reg1;
  78. };
  79. static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
  80. const u32 reg)
  81. {
  82. writel_relaxed(value, pcie->cra_base + reg);
  83. }
  84. static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
  85. {
  86. return readl_relaxed(pcie->cra_base + reg);
  87. }
  88. static bool altera_pcie_link_up(struct altera_pcie *pcie)
  89. {
  90. return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
  91. }
  92. /*
  93. * Altera PCIe port uses BAR0 of RC's configuration space as the translation
  94. * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
  95. * using these registers, so it can be reached by DMA from EP devices.
  96. * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt
  97. * from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge
  98. * should be hidden during enumeration to avoid the sizing and resource
  99. * allocation by PCIe core.
  100. */
  101. static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
  102. int offset)
  103. {
  104. if (pci_is_root_bus(bus) && (devfn == 0) &&
  105. (offset == PCI_BASE_ADDRESS_0))
  106. return true;
  107. return false;
  108. }
  109. static void tlp_write_tx(struct altera_pcie *pcie,
  110. struct tlp_rp_regpair_t *tlp_rp_regdata)
  111. {
  112. cra_writel(pcie, tlp_rp_regdata->reg0, RP_TX_REG0);
  113. cra_writel(pcie, tlp_rp_regdata->reg1, RP_TX_REG1);
  114. cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
  115. }
  116. static bool altera_pcie_valid_device(struct altera_pcie *pcie,
  117. struct pci_bus *bus, int dev)
  118. {
  119. /* If there is no link, then there is no device */
  120. if (bus->number != pcie->root_bus_nr) {
  121. if (!altera_pcie_link_up(pcie))
  122. return false;
  123. }
  124. /* access only one slot on each root port */
  125. if (bus->number == pcie->root_bus_nr && dev > 0)
  126. return false;
  127. return true;
  128. }
  129. static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
  130. {
  131. int i;
  132. bool sop = false;
  133. u32 ctrl;
  134. u32 reg0, reg1;
  135. u32 comp_status = 1;
  136. /*
  137. * Minimum 2 loops to read TLP headers and 1 loop to read data
  138. * payload.
  139. */
  140. for (i = 0; i < TLP_LOOP; i++) {
  141. ctrl = cra_readl(pcie, RP_RXCPL_STATUS);
  142. if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) {
  143. reg0 = cra_readl(pcie, RP_RXCPL_REG0);
  144. reg1 = cra_readl(pcie, RP_RXCPL_REG1);
  145. if (ctrl & RP_RXCPL_SOP) {
  146. sop = true;
  147. comp_status = TLP_COMP_STATUS(reg1);
  148. }
  149. if (ctrl & RP_RXCPL_EOP) {
  150. if (comp_status)
  151. return PCIBIOS_DEVICE_NOT_FOUND;
  152. if (value)
  153. *value = reg0;
  154. return PCIBIOS_SUCCESSFUL;
  155. }
  156. }
  157. udelay(5);
  158. }
  159. return PCIBIOS_DEVICE_NOT_FOUND;
  160. }
  161. static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
  162. u32 data, bool align)
  163. {
  164. struct tlp_rp_regpair_t tlp_rp_regdata;
  165. tlp_rp_regdata.reg0 = headers[0];
  166. tlp_rp_regdata.reg1 = headers[1];
  167. tlp_rp_regdata.ctrl = RP_TX_SOP;
  168. tlp_write_tx(pcie, &tlp_rp_regdata);
  169. if (align) {
  170. tlp_rp_regdata.reg0 = headers[2];
  171. tlp_rp_regdata.reg1 = 0;
  172. tlp_rp_regdata.ctrl = 0;
  173. tlp_write_tx(pcie, &tlp_rp_regdata);
  174. tlp_rp_regdata.reg0 = data;
  175. tlp_rp_regdata.reg1 = 0;
  176. } else {
  177. tlp_rp_regdata.reg0 = headers[2];
  178. tlp_rp_regdata.reg1 = data;
  179. }
  180. tlp_rp_regdata.ctrl = RP_TX_EOP;
  181. tlp_write_tx(pcie, &tlp_rp_regdata);
  182. }
  183. static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
  184. int where, u8 byte_en, u32 *value)
  185. {
  186. u32 headers[TLP_HDR_SIZE];
  187. headers[0] = TLP_CFGRD_DW0(pcie, bus);
  188. headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en);
  189. headers[2] = TLP_CFG_DW2(bus, devfn, where);
  190. tlp_write_packet(pcie, headers, 0, false);
  191. return tlp_read_packet(pcie, value);
  192. }
  193. static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
  194. int where, u8 byte_en, u32 value)
  195. {
  196. u32 headers[TLP_HDR_SIZE];
  197. int ret;
  198. headers[0] = TLP_CFGWR_DW0(pcie, bus);
  199. headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en);
  200. headers[2] = TLP_CFG_DW2(bus, devfn, where);
  201. /* check alignment to Qword */
  202. if ((where & 0x7) == 0)
  203. tlp_write_packet(pcie, headers, value, true);
  204. else
  205. tlp_write_packet(pcie, headers, value, false);
  206. ret = tlp_read_packet(pcie, NULL);
  207. if (ret != PCIBIOS_SUCCESSFUL)
  208. return ret;
  209. /*
  210. * Monitor changes to PCI_PRIMARY_BUS register on root port
  211. * and update local copy of root bus number accordingly.
  212. */
  213. if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS))
  214. pcie->root_bus_nr = (u8)(value);
  215. return PCIBIOS_SUCCESSFUL;
  216. }
  217. static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
  218. unsigned int devfn, int where, int size,
  219. u32 *value)
  220. {
  221. int ret;
  222. u32 data;
  223. u8 byte_en;
  224. switch (size) {
  225. case 1:
  226. byte_en = 1 << (where & 3);
  227. break;
  228. case 2:
  229. byte_en = 3 << (where & 3);
  230. break;
  231. default:
  232. byte_en = 0xf;
  233. break;
  234. }
  235. ret = tlp_cfg_dword_read(pcie, busno, devfn,
  236. (where & ~DWORD_MASK), byte_en, &data);
  237. if (ret != PCIBIOS_SUCCESSFUL)
  238. return ret;
  239. switch (size) {
  240. case 1:
  241. *value = (data >> (8 * (where & 0x3))) & 0xff;
  242. break;
  243. case 2:
  244. *value = (data >> (8 * (where & 0x2))) & 0xffff;
  245. break;
  246. default:
  247. *value = data;
  248. break;
  249. }
  250. return PCIBIOS_SUCCESSFUL;
  251. }
  252. static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
  253. unsigned int devfn, int where, int size,
  254. u32 value)
  255. {
  256. u32 data32;
  257. u32 shift = 8 * (where & 3);
  258. u8 byte_en;
  259. switch (size) {
  260. case 1:
  261. data32 = (value & 0xff) << shift;
  262. byte_en = 1 << (where & 3);
  263. break;
  264. case 2:
  265. data32 = (value & 0xffff) << shift;
  266. byte_en = 3 << (where & 3);
  267. break;
  268. default:
  269. data32 = value;
  270. byte_en = 0xf;
  271. break;
  272. }
  273. return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK),
  274. byte_en, data32);
  275. }
  276. static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
  277. int where, int size, u32 *value)
  278. {
  279. struct altera_pcie *pcie = bus->sysdata;
  280. if (altera_pcie_hide_rc_bar(bus, devfn, where))
  281. return PCIBIOS_BAD_REGISTER_NUMBER;
  282. if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) {
  283. *value = 0xffffffff;
  284. return PCIBIOS_DEVICE_NOT_FOUND;
  285. }
  286. return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size,
  287. value);
  288. }
  289. static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
  290. int where, int size, u32 value)
  291. {
  292. struct altera_pcie *pcie = bus->sysdata;
  293. if (altera_pcie_hide_rc_bar(bus, devfn, where))
  294. return PCIBIOS_BAD_REGISTER_NUMBER;
  295. if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
  296. return PCIBIOS_DEVICE_NOT_FOUND;
  297. return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
  298. value);
  299. }
  300. static struct pci_ops altera_pcie_ops = {
  301. .read = altera_pcie_cfg_read,
  302. .write = altera_pcie_cfg_write,
  303. };
  304. static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno,
  305. unsigned int devfn, int offset, u16 *value)
  306. {
  307. u32 data;
  308. int ret;
  309. ret = _altera_pcie_cfg_read(pcie, busno, devfn,
  310. PCIE_CAP_OFFSET + offset, sizeof(*value),
  311. &data);
  312. *value = data;
  313. return ret;
  314. }
  315. static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
  316. unsigned int devfn, int offset, u16 value)
  317. {
  318. return _altera_pcie_cfg_write(pcie, busno, devfn,
  319. PCIE_CAP_OFFSET + offset, sizeof(value),
  320. value);
  321. }
  322. static void altera_wait_link_retrain(struct altera_pcie *pcie)
  323. {
  324. struct device *dev = &pcie->pdev->dev;
  325. u16 reg16;
  326. unsigned long start_jiffies;
  327. /* Wait for link training end. */
  328. start_jiffies = jiffies;
  329. for (;;) {
  330. altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
  331. PCI_EXP_LNKSTA, &reg16);
  332. if (!(reg16 & PCI_EXP_LNKSTA_LT))
  333. break;
  334. if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
  335. dev_err(dev, "link retrain timeout\n");
  336. break;
  337. }
  338. udelay(100);
  339. }
  340. /* Wait for link is up */
  341. start_jiffies = jiffies;
  342. for (;;) {
  343. if (altera_pcie_link_up(pcie))
  344. break;
  345. if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
  346. dev_err(dev, "link up timeout\n");
  347. break;
  348. }
  349. udelay(100);
  350. }
  351. }
  352. static void altera_pcie_retrain(struct altera_pcie *pcie)
  353. {
  354. u16 linkcap, linkstat, linkctl;
  355. if (!altera_pcie_link_up(pcie))
  356. return;
  357. /*
  358. * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
  359. * current speed is 2.5 GB/s.
  360. */
  361. altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP,
  362. &linkcap);
  363. if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
  364. return;
  365. altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA,
  366. &linkstat);
  367. if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
  368. altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
  369. PCI_EXP_LNKCTL, &linkctl);
  370. linkctl |= PCI_EXP_LNKCTL_RL;
  371. altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
  372. PCI_EXP_LNKCTL, linkctl);
  373. altera_wait_link_retrain(pcie);
  374. }
  375. }
  376. static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
  377. irq_hw_number_t hwirq)
  378. {
  379. irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
  380. irq_set_chip_data(irq, domain->host_data);
  381. return 0;
  382. }
  383. static const struct irq_domain_ops intx_domain_ops = {
  384. .map = altera_pcie_intx_map,
  385. .xlate = pci_irqd_intx_xlate,
  386. };
  387. static void altera_pcie_isr(struct irq_desc *desc)
  388. {
  389. struct irq_chip *chip = irq_desc_get_chip(desc);
  390. struct altera_pcie *pcie;
  391. struct device *dev;
  392. unsigned long status;
  393. u32 bit;
  394. u32 virq;
  395. chained_irq_enter(chip, desc);
  396. pcie = irq_desc_get_handler_data(desc);
  397. dev = &pcie->pdev->dev;
  398. while ((status = cra_readl(pcie, P2A_INT_STATUS)
  399. & P2A_INT_STS_ALL) != 0) {
  400. for_each_set_bit(bit, &status, PCI_NUM_INTX) {
  401. /* clear interrupts */
  402. cra_writel(pcie, 1 << bit, P2A_INT_STATUS);
  403. virq = irq_find_mapping(pcie->irq_domain, bit);
  404. if (virq)
  405. generic_handle_irq(virq);
  406. else
  407. dev_err(dev, "unexpected IRQ, INT%d\n", bit);
  408. }
  409. }
  410. chained_irq_exit(chip, desc);
  411. }
  412. static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
  413. {
  414. int err, res_valid = 0;
  415. struct device *dev = &pcie->pdev->dev;
  416. struct resource_entry *win;
  417. err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
  418. &pcie->resources, NULL);
  419. if (err)
  420. return err;
  421. err = devm_request_pci_bus_resources(dev, &pcie->resources);
  422. if (err)
  423. goto out_release_res;
  424. resource_list_for_each_entry(win, &pcie->resources) {
  425. struct resource *res = win->res;
  426. if (resource_type(res) == IORESOURCE_MEM)
  427. res_valid |= !(res->flags & IORESOURCE_PREFETCH);
  428. }
  429. if (res_valid)
  430. return 0;
  431. dev_err(dev, "non-prefetchable memory resource required\n");
  432. err = -EINVAL;
  433. out_release_res:
  434. pci_free_resource_list(&pcie->resources);
  435. return err;
  436. }
  437. static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
  438. {
  439. struct device *dev = &pcie->pdev->dev;
  440. struct device_node *node = dev->of_node;
  441. /* Setup INTx */
  442. pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
  443. &intx_domain_ops, pcie);
  444. if (!pcie->irq_domain) {
  445. dev_err(dev, "Failed to get a INTx IRQ domain\n");
  446. return -ENOMEM;
  447. }
  448. return 0;
  449. }
  450. static int altera_pcie_parse_dt(struct altera_pcie *pcie)
  451. {
  452. struct device *dev = &pcie->pdev->dev;
  453. struct platform_device *pdev = pcie->pdev;
  454. struct resource *cra;
  455. cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
  456. pcie->cra_base = devm_ioremap_resource(dev, cra);
  457. if (IS_ERR(pcie->cra_base))
  458. return PTR_ERR(pcie->cra_base);
  459. /* setup IRQ */
  460. pcie->irq = platform_get_irq(pdev, 0);
  461. if (pcie->irq < 0) {
  462. dev_err(dev, "failed to get IRQ: %d\n", pcie->irq);
  463. return pcie->irq;
  464. }
  465. irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
  466. return 0;
  467. }
  468. static void altera_pcie_host_init(struct altera_pcie *pcie)
  469. {
  470. altera_pcie_retrain(pcie);
  471. }
  472. static int altera_pcie_probe(struct platform_device *pdev)
  473. {
  474. struct device *dev = &pdev->dev;
  475. struct altera_pcie *pcie;
  476. struct pci_bus *bus;
  477. struct pci_bus *child;
  478. struct pci_host_bridge *bridge;
  479. int ret;
  480. bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
  481. if (!bridge)
  482. return -ENOMEM;
  483. pcie = pci_host_bridge_priv(bridge);
  484. pcie->pdev = pdev;
  485. ret = altera_pcie_parse_dt(pcie);
  486. if (ret) {
  487. dev_err(dev, "Parsing DT failed\n");
  488. return ret;
  489. }
  490. INIT_LIST_HEAD(&pcie->resources);
  491. ret = altera_pcie_parse_request_of_pci_ranges(pcie);
  492. if (ret) {
  493. dev_err(dev, "Failed add resources\n");
  494. return ret;
  495. }
  496. ret = altera_pcie_init_irq_domain(pcie);
  497. if (ret) {
  498. dev_err(dev, "Failed creating IRQ Domain\n");
  499. return ret;
  500. }
  501. /* clear all interrupts */
  502. cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
  503. /* enable all interrupts */
  504. cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
  505. altera_pcie_host_init(pcie);
  506. list_splice_init(&pcie->resources, &bridge->windows);
  507. bridge->dev.parent = dev;
  508. bridge->sysdata = pcie;
  509. bridge->busnr = pcie->root_bus_nr;
  510. bridge->ops = &altera_pcie_ops;
  511. bridge->map_irq = of_irq_parse_and_map_pci;
  512. bridge->swizzle_irq = pci_common_swizzle;
  513. ret = pci_scan_root_bus_bridge(bridge);
  514. if (ret < 0)
  515. return ret;
  516. bus = bridge->bus;
  517. pci_assign_unassigned_bus_resources(bus);
  518. /* Configure PCI Express setting. */
  519. list_for_each_entry(child, &bus->children, node)
  520. pcie_bus_configure_settings(child);
  521. pci_bus_add_devices(bus);
  522. return ret;
  523. }
  524. static const struct of_device_id altera_pcie_of_match[] = {
  525. { .compatible = "altr,pcie-root-port-1.0", },
  526. {},
  527. };
  528. static struct platform_driver altera_pcie_driver = {
  529. .probe = altera_pcie_probe,
  530. .driver = {
  531. .name = "altera-pcie",
  532. .of_match_table = altera_pcie_of_match,
  533. .suppress_bind_attrs = true,
  534. },
  535. };
  536. builtin_platform_driver(altera_pcie_driver);