pci-mvebu.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCIe driver for Marvell Armada 370 and Armada XP SoCs
  4. *
  5. * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/pci.h>
  10. #include <linux/bitfield.h>
  11. #include <linux/clk.h>
  12. #include <linux/delay.h>
  13. #include <linux/gpio/consumer.h>
  14. #include <linux/init.h>
  15. #include <linux/irqchip/chained_irq.h>
  16. #include <linux/irqdomain.h>
  17. #include <linux/mbus.h>
  18. #include <linux/slab.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/of_address.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/of_pci.h>
  23. #include <linux/of_platform.h>
  24. #include "../pci.h"
  25. #include "../pci-bridge-emul.h"
  26. /*
  27. * PCIe unit register offsets.
  28. */
  29. #define PCIE_DEV_ID_OFF 0x0000
  30. #define PCIE_CMD_OFF 0x0004
  31. #define PCIE_DEV_REV_OFF 0x0008
  32. #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
  33. #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
  34. #define PCIE_SSDEV_ID_OFF 0x002c
  35. #define PCIE_CAP_PCIEXP 0x0060
  36. #define PCIE_CAP_PCIERR_OFF 0x0100
  37. #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
  38. #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
  39. #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
  40. #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
  41. #define PCIE_WIN5_CTRL_OFF 0x1880
  42. #define PCIE_WIN5_BASE_OFF 0x1884
  43. #define PCIE_WIN5_REMAP_OFF 0x188c
  44. #define PCIE_CONF_ADDR_OFF 0x18f8
  45. #define PCIE_CONF_ADDR_EN 0x80000000
  46. #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
  47. #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
  48. #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
  49. #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
  50. #define PCIE_CONF_ADDR(bus, devfn, where) \
  51. (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
  52. PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
  53. PCIE_CONF_ADDR_EN)
  54. #define PCIE_CONF_DATA_OFF 0x18fc
  55. #define PCIE_INT_CAUSE_OFF 0x1900
  56. #define PCIE_INT_UNMASK_OFF 0x1910
  57. #define PCIE_INT_INTX(i) BIT(24+i)
  58. #define PCIE_INT_PM_PME BIT(28)
  59. #define PCIE_INT_ALL_MASK GENMASK(31, 0)
  60. #define PCIE_CTRL_OFF 0x1a00
  61. #define PCIE_CTRL_X1_MODE 0x0001
  62. #define PCIE_CTRL_RC_MODE BIT(1)
  63. #define PCIE_CTRL_MASTER_HOT_RESET BIT(24)
  64. #define PCIE_STAT_OFF 0x1a04
  65. #define PCIE_STAT_BUS 0xff00
  66. #define PCIE_STAT_DEV 0x1f0000
  67. #define PCIE_STAT_LINK_DOWN BIT(0)
  68. #define PCIE_SSPL_OFF 0x1a0c
  69. #define PCIE_SSPL_VALUE_SHIFT 0
  70. #define PCIE_SSPL_VALUE_MASK GENMASK(7, 0)
  71. #define PCIE_SSPL_SCALE_SHIFT 8
  72. #define PCIE_SSPL_SCALE_MASK GENMASK(9, 8)
  73. #define PCIE_SSPL_ENABLE BIT(16)
  74. #define PCIE_RC_RTSTA 0x1a14
  75. #define PCIE_DEBUG_CTRL 0x1a60
  76. #define PCIE_DEBUG_SOFT_RESET BIT(20)
  77. struct mvebu_pcie_port;
  78. /* Structure representing all PCIe interfaces */
  79. struct mvebu_pcie {
  80. struct platform_device *pdev;
  81. struct mvebu_pcie_port *ports;
  82. struct resource io;
  83. struct resource realio;
  84. struct resource mem;
  85. int nports;
  86. };
  87. struct mvebu_pcie_window {
  88. phys_addr_t base;
  89. phys_addr_t remap;
  90. size_t size;
  91. };
  92. /* Structure representing one PCIe interface */
  93. struct mvebu_pcie_port {
  94. char *name;
  95. void __iomem *base;
  96. u32 port;
  97. u32 lane;
  98. bool is_x4;
  99. int devfn;
  100. unsigned int mem_target;
  101. unsigned int mem_attr;
  102. unsigned int io_target;
  103. unsigned int io_attr;
  104. struct clk *clk;
  105. struct gpio_desc *reset_gpio;
  106. char *reset_name;
  107. struct pci_bridge_emul bridge;
  108. struct device_node *dn;
  109. struct mvebu_pcie *pcie;
  110. struct mvebu_pcie_window memwin;
  111. struct mvebu_pcie_window iowin;
  112. u32 saved_pcie_stat;
  113. struct resource regs;
  114. u8 slot_power_limit_value;
  115. u8 slot_power_limit_scale;
  116. struct irq_domain *intx_irq_domain;
  117. raw_spinlock_t irq_lock;
  118. int intx_irq;
  119. };
  120. static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
  121. {
  122. writel(val, port->base + reg);
  123. }
  124. static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
  125. {
  126. return readl(port->base + reg);
  127. }
  128. static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
  129. {
  130. return port->io_target != -1 && port->io_attr != -1;
  131. }
  132. static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
  133. {
  134. return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
  135. }
  136. static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port)
  137. {
  138. return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8;
  139. }
  140. static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
  141. {
  142. u32 stat;
  143. stat = mvebu_readl(port, PCIE_STAT_OFF);
  144. stat &= ~PCIE_STAT_BUS;
  145. stat |= nr << 8;
  146. mvebu_writel(port, stat, PCIE_STAT_OFF);
  147. }
  148. static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
  149. {
  150. u32 stat;
  151. stat = mvebu_readl(port, PCIE_STAT_OFF);
  152. stat &= ~PCIE_STAT_DEV;
  153. stat |= nr << 16;
  154. mvebu_writel(port, stat, PCIE_STAT_OFF);
  155. }
  156. static void mvebu_pcie_disable_wins(struct mvebu_pcie_port *port)
  157. {
  158. int i;
  159. mvebu_writel(port, 0, PCIE_BAR_LO_OFF(0));
  160. mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
  161. for (i = 1; i < 3; i++) {
  162. mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
  163. mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
  164. mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
  165. }
  166. for (i = 0; i < 5; i++) {
  167. mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
  168. mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
  169. mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
  170. }
  171. mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
  172. mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
  173. mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
  174. }
  175. /*
  176. * Setup PCIE BARs and Address Decode Wins:
  177. * BAR[0] -> internal registers (needed for MSI)
  178. * BAR[1] -> covers all DRAM banks
  179. * BAR[2] -> Disabled
  180. * WIN[0-3] -> DRAM bank[0-3]
  181. */
  182. static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
  183. {
  184. const struct mbus_dram_target_info *dram;
  185. u32 size;
  186. int i;
  187. dram = mv_mbus_dram_info();
  188. /* First, disable and clear BARs and windows. */
  189. mvebu_pcie_disable_wins(port);
  190. /* Setup windows for DDR banks. Count total DDR size on the fly. */
  191. size = 0;
  192. for (i = 0; i < dram->num_cs; i++) {
  193. const struct mbus_dram_window *cs = dram->cs + i;
  194. mvebu_writel(port, cs->base & 0xffff0000,
  195. PCIE_WIN04_BASE_OFF(i));
  196. mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
  197. mvebu_writel(port,
  198. ((cs->size - 1) & 0xffff0000) |
  199. (cs->mbus_attr << 8) |
  200. (dram->mbus_dram_target_id << 4) | 1,
  201. PCIE_WIN04_CTRL_OFF(i));
  202. size += cs->size;
  203. }
  204. /* Round up 'size' to the nearest power of two. */
  205. if ((size & (size - 1)) != 0)
  206. size = 1 << fls(size);
  207. /* Setup BAR[1] to all DRAM banks. */
  208. mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
  209. mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
  210. mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
  211. PCIE_BAR_CTRL_OFF(1));
  212. /*
  213. * Point BAR[0] to the device's internal registers.
  214. */
  215. mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
  216. mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
  217. }
  218. static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
  219. {
  220. u32 ctrl, lnkcap, cmd, dev_rev, unmask, sspl;
  221. /* Setup PCIe controller to Root Complex mode. */
  222. ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
  223. ctrl |= PCIE_CTRL_RC_MODE;
  224. mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
  225. /*
  226. * Set Maximum Link Width to X1 or X4 in Root Port's PCIe Link
  227. * Capability register. This register is defined by PCIe specification
  228. * as read-only but this mvebu controller has it as read-write and must
  229. * be set to number of SerDes PCIe lanes (1 or 4). If this register is
  230. * not set correctly then link with endpoint card is not established.
  231. */
  232. lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
  233. lnkcap &= ~PCI_EXP_LNKCAP_MLW;
  234. lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, port->is_x4 ? 4 : 1);
  235. mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
  236. /* Disable Root Bridge I/O space, memory space and bus mastering. */
  237. cmd = mvebu_readl(port, PCIE_CMD_OFF);
  238. cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
  239. mvebu_writel(port, cmd, PCIE_CMD_OFF);
  240. /*
  241. * Change Class Code of PCI Bridge device to PCI Bridge (0x6004)
  242. * because default value is Memory controller (0x5080).
  243. *
  244. * Note that this mvebu PCI Bridge does not have compliant Type 1
  245. * Configuration Space. Header Type is reported as Type 0 and it
  246. * has format of Type 0 config space.
  247. *
  248. * Moreover Type 0 BAR registers (ranges 0x10 - 0x28 and 0x30 - 0x34)
  249. * have the same format in Marvell's specification as in PCIe
  250. * specification, but their meaning is totally different and they do
  251. * different things: they are aliased into internal mvebu registers
  252. * (e.g. PCIE_BAR_LO_OFF) and these should not be changed or
  253. * reconfigured by pci device drivers.
  254. *
  255. * Therefore driver uses emulation of PCI Bridge which emulates
  256. * access to configuration space via internal mvebu registers or
  257. * emulated configuration buffer. Driver access these PCI Bridge
  258. * directly for simplification, but these registers can be accessed
  259. * also via standard mvebu way for accessing PCI config space.
  260. */
  261. dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
  262. dev_rev &= ~0xffffff00;
  263. dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
  264. mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF);
  265. /* Point PCIe unit MBUS decode windows to DRAM space. */
  266. mvebu_pcie_setup_wins(port);
  267. /*
  268. * Program Root Port to automatically send Set_Slot_Power_Limit
  269. * PCIe Message when changing status from Dl_Down to Dl_Up and valid
  270. * slot power limit was specified.
  271. */
  272. sspl = mvebu_readl(port, PCIE_SSPL_OFF);
  273. sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
  274. if (port->slot_power_limit_value) {
  275. sspl |= port->slot_power_limit_value << PCIE_SSPL_VALUE_SHIFT;
  276. sspl |= port->slot_power_limit_scale << PCIE_SSPL_SCALE_SHIFT;
  277. sspl |= PCIE_SSPL_ENABLE;
  278. }
  279. mvebu_writel(port, sspl, PCIE_SSPL_OFF);
  280. /* Mask all interrupt sources. */
  281. mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
  282. /* Clear all interrupt causes. */
  283. mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
  284. /* Check if "intx" interrupt was specified in DT. */
  285. if (port->intx_irq > 0)
  286. return;
  287. /*
  288. * Fallback code when "intx" interrupt was not specified in DT:
  289. * Unmask all legacy INTx interrupts as driver does not provide a way
  290. * for masking and unmasking of individual legacy INTx interrupts.
  291. * Legacy INTx are reported via one shared GIC source and therefore
  292. * kernel cannot distinguish which individual legacy INTx was triggered.
  293. * These interrupts are shared, so it should not cause any issue. Just
  294. * performance penalty as every PCIe interrupt handler needs to be
  295. * called when some interrupt is triggered.
  296. */
  297. unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
  298. unmask |= PCIE_INT_INTX(0) | PCIE_INT_INTX(1) |
  299. PCIE_INT_INTX(2) | PCIE_INT_INTX(3);
  300. mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
  301. }
  302. static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
  303. struct pci_bus *bus,
  304. int devfn);
  305. static int mvebu_pcie_child_rd_conf(struct pci_bus *bus, u32 devfn, int where,
  306. int size, u32 *val)
  307. {
  308. struct mvebu_pcie *pcie = bus->sysdata;
  309. struct mvebu_pcie_port *port;
  310. void __iomem *conf_data;
  311. port = mvebu_pcie_find_port(pcie, bus, devfn);
  312. if (!port)
  313. return PCIBIOS_DEVICE_NOT_FOUND;
  314. if (!mvebu_pcie_link_up(port))
  315. return PCIBIOS_DEVICE_NOT_FOUND;
  316. conf_data = port->base + PCIE_CONF_DATA_OFF;
  317. mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
  318. PCIE_CONF_ADDR_OFF);
  319. switch (size) {
  320. case 1:
  321. *val = readb_relaxed(conf_data + (where & 3));
  322. break;
  323. case 2:
  324. *val = readw_relaxed(conf_data + (where & 2));
  325. break;
  326. case 4:
  327. *val = readl_relaxed(conf_data);
  328. break;
  329. default:
  330. return PCIBIOS_BAD_REGISTER_NUMBER;
  331. }
  332. return PCIBIOS_SUCCESSFUL;
  333. }
  334. static int mvebu_pcie_child_wr_conf(struct pci_bus *bus, u32 devfn,
  335. int where, int size, u32 val)
  336. {
  337. struct mvebu_pcie *pcie = bus->sysdata;
  338. struct mvebu_pcie_port *port;
  339. void __iomem *conf_data;
  340. port = mvebu_pcie_find_port(pcie, bus, devfn);
  341. if (!port)
  342. return PCIBIOS_DEVICE_NOT_FOUND;
  343. if (!mvebu_pcie_link_up(port))
  344. return PCIBIOS_DEVICE_NOT_FOUND;
  345. conf_data = port->base + PCIE_CONF_DATA_OFF;
  346. mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
  347. PCIE_CONF_ADDR_OFF);
  348. switch (size) {
  349. case 1:
  350. writeb(val, conf_data + (where & 3));
  351. break;
  352. case 2:
  353. writew(val, conf_data + (where & 2));
  354. break;
  355. case 4:
  356. writel(val, conf_data);
  357. break;
  358. default:
  359. return PCIBIOS_BAD_REGISTER_NUMBER;
  360. }
  361. return PCIBIOS_SUCCESSFUL;
  362. }
  363. static struct pci_ops mvebu_pcie_child_ops = {
  364. .read = mvebu_pcie_child_rd_conf,
  365. .write = mvebu_pcie_child_wr_conf,
  366. };
  367. /*
  368. * Remove windows, starting from the largest ones to the smallest
  369. * ones.
  370. */
  371. static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
  372. phys_addr_t base, size_t size)
  373. {
  374. while (size) {
  375. size_t sz = 1 << (fls(size) - 1);
  376. mvebu_mbus_del_window(base, sz);
  377. base += sz;
  378. size -= sz;
  379. }
  380. }
  381. /*
  382. * MBus windows can only have a power of two size, but PCI BARs do not
  383. * have this constraint. Therefore, we have to split the PCI BAR into
  384. * areas each having a power of two size. We start from the largest
  385. * one (i.e highest order bit set in the size).
  386. */
  387. static int mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
  388. unsigned int target, unsigned int attribute,
  389. phys_addr_t base, size_t size,
  390. phys_addr_t remap)
  391. {
  392. size_t size_mapped = 0;
  393. while (size) {
  394. size_t sz = 1 << (fls(size) - 1);
  395. int ret;
  396. ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
  397. sz, remap);
  398. if (ret) {
  399. phys_addr_t end = base + sz - 1;
  400. dev_err(&port->pcie->pdev->dev,
  401. "Could not create MBus window at [mem %pa-%pa]: %d\n",
  402. &base, &end, ret);
  403. mvebu_pcie_del_windows(port, base - size_mapped,
  404. size_mapped);
  405. return ret;
  406. }
  407. size -= sz;
  408. size_mapped += sz;
  409. base += sz;
  410. if (remap != MVEBU_MBUS_NO_REMAP)
  411. remap += sz;
  412. }
  413. return 0;
  414. }
  415. static int mvebu_pcie_set_window(struct mvebu_pcie_port *port,
  416. unsigned int target, unsigned int attribute,
  417. const struct mvebu_pcie_window *desired,
  418. struct mvebu_pcie_window *cur)
  419. {
  420. int ret;
  421. if (desired->base == cur->base && desired->remap == cur->remap &&
  422. desired->size == cur->size)
  423. return 0;
  424. if (cur->size != 0) {
  425. mvebu_pcie_del_windows(port, cur->base, cur->size);
  426. cur->size = 0;
  427. cur->base = 0;
  428. /*
  429. * If something tries to change the window while it is enabled
  430. * the change will not be done atomically. That would be
  431. * difficult to do in the general case.
  432. */
  433. }
  434. if (desired->size == 0)
  435. return 0;
  436. ret = mvebu_pcie_add_windows(port, target, attribute, desired->base,
  437. desired->size, desired->remap);
  438. if (ret) {
  439. cur->size = 0;
  440. cur->base = 0;
  441. return ret;
  442. }
  443. *cur = *desired;
  444. return 0;
  445. }
  446. static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
  447. {
  448. struct mvebu_pcie_window desired = {};
  449. struct pci_bridge_emul_conf *conf = &port->bridge.conf;
  450. /* Are the new iobase/iolimit values invalid? */
  451. if (conf->iolimit < conf->iobase ||
  452. le16_to_cpu(conf->iolimitupper) < le16_to_cpu(conf->iobaseupper))
  453. return mvebu_pcie_set_window(port, port->io_target, port->io_attr,
  454. &desired, &port->iowin);
  455. /*
  456. * We read the PCI-to-PCI bridge emulated registers, and
  457. * calculate the base address and size of the address decoding
  458. * window to setup, according to the PCI-to-PCI bridge
  459. * specifications. iobase is the bus address, port->iowin_base
  460. * is the CPU address.
  461. */
  462. desired.remap = ((conf->iobase & 0xF0) << 8) |
  463. (le16_to_cpu(conf->iobaseupper) << 16);
  464. desired.base = port->pcie->io.start + desired.remap;
  465. desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
  466. (le16_to_cpu(conf->iolimitupper) << 16)) -
  467. desired.remap) +
  468. 1;
  469. return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
  470. &port->iowin);
  471. }
  472. static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
  473. {
  474. struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
  475. struct pci_bridge_emul_conf *conf = &port->bridge.conf;
  476. /* Are the new membase/memlimit values invalid? */
  477. if (le16_to_cpu(conf->memlimit) < le16_to_cpu(conf->membase))
  478. return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
  479. &desired, &port->memwin);
  480. /*
  481. * We read the PCI-to-PCI bridge emulated registers, and
  482. * calculate the base address and size of the address decoding
  483. * window to setup, according to the PCI-to-PCI bridge
  484. * specifications.
  485. */
  486. desired.base = ((le16_to_cpu(conf->membase) & 0xFFF0) << 16);
  487. desired.size = (((le16_to_cpu(conf->memlimit) & 0xFFF0) << 16) | 0xFFFFF) -
  488. desired.base + 1;
  489. return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
  490. &port->memwin);
  491. }
  492. static pci_bridge_emul_read_status_t
  493. mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
  494. int reg, u32 *value)
  495. {
  496. struct mvebu_pcie_port *port = bridge->data;
  497. switch (reg) {
  498. case PCI_COMMAND:
  499. *value = mvebu_readl(port, PCIE_CMD_OFF);
  500. break;
  501. case PCI_PRIMARY_BUS: {
  502. /*
  503. * From the whole 32bit register we support reading from HW only
  504. * secondary bus number which is mvebu local bus number.
  505. * Other bits are retrieved only from emulated config buffer.
  506. */
  507. __le32 *cfgspace = (__le32 *)&bridge->conf;
  508. u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]);
  509. val &= ~0xff00;
  510. val |= mvebu_pcie_get_local_bus_nr(port) << 8;
  511. *value = val;
  512. break;
  513. }
  514. case PCI_INTERRUPT_LINE: {
  515. /*
  516. * From the whole 32bit register we support reading from HW only
  517. * one bit: PCI_BRIDGE_CTL_BUS_RESET.
  518. * Other bits are retrieved only from emulated config buffer.
  519. */
  520. __le32 *cfgspace = (__le32 *)&bridge->conf;
  521. u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
  522. if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET)
  523. val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
  524. else
  525. val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
  526. *value = val;
  527. break;
  528. }
  529. default:
  530. return PCI_BRIDGE_EMUL_NOT_HANDLED;
  531. }
  532. return PCI_BRIDGE_EMUL_HANDLED;
  533. }
  534. static pci_bridge_emul_read_status_t
  535. mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
  536. int reg, u32 *value)
  537. {
  538. struct mvebu_pcie_port *port = bridge->data;
  539. switch (reg) {
  540. case PCI_EXP_DEVCAP:
  541. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
  542. break;
  543. case PCI_EXP_DEVCTL:
  544. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
  545. break;
  546. case PCI_EXP_LNKCAP:
  547. /*
  548. * PCIe requires that the Clock Power Management capability bit
  549. * is hard-wired to zero for downstream ports but HW returns 1.
  550. * Additionally enable Data Link Layer Link Active Reporting
  551. * Capable bit as DL_Active indication is provided too.
  552. */
  553. *value = (mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
  554. ~PCI_EXP_LNKCAP_CLKPM) | PCI_EXP_LNKCAP_DLLLARC;
  555. break;
  556. case PCI_EXP_LNKCTL:
  557. /* DL_Active indication is provided via PCIE_STAT_OFF */
  558. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL) |
  559. (mvebu_pcie_link_up(port) ?
  560. (PCI_EXP_LNKSTA_DLLLA << 16) : 0);
  561. break;
  562. case PCI_EXP_SLTCTL: {
  563. u16 slotctl = le16_to_cpu(bridge->pcie_conf.slotctl);
  564. u16 slotsta = le16_to_cpu(bridge->pcie_conf.slotsta);
  565. u32 val = 0;
  566. /*
  567. * When slot power limit was not specified in DT then
  568. * ASPL_DISABLE bit is stored only in emulated config space.
  569. * Otherwise reflect status of PCIE_SSPL_ENABLE bit in HW.
  570. */
  571. if (!port->slot_power_limit_value)
  572. val |= slotctl & PCI_EXP_SLTCTL_ASPL_DISABLE;
  573. else if (!(mvebu_readl(port, PCIE_SSPL_OFF) & PCIE_SSPL_ENABLE))
  574. val |= PCI_EXP_SLTCTL_ASPL_DISABLE;
  575. /* This callback is 32-bit and in high bits is slot status. */
  576. val |= slotsta << 16;
  577. *value = val;
  578. break;
  579. }
  580. case PCI_EXP_RTSTA:
  581. *value = mvebu_readl(port, PCIE_RC_RTSTA);
  582. break;
  583. case PCI_EXP_DEVCAP2:
  584. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2);
  585. break;
  586. case PCI_EXP_DEVCTL2:
  587. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
  588. break;
  589. case PCI_EXP_LNKCTL2:
  590. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
  591. break;
  592. default:
  593. return PCI_BRIDGE_EMUL_NOT_HANDLED;
  594. }
  595. return PCI_BRIDGE_EMUL_HANDLED;
  596. }
  597. static pci_bridge_emul_read_status_t
  598. mvebu_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
  599. int reg, u32 *value)
  600. {
  601. struct mvebu_pcie_port *port = bridge->data;
  602. switch (reg) {
  603. case 0:
  604. case PCI_ERR_UNCOR_STATUS:
  605. case PCI_ERR_UNCOR_MASK:
  606. case PCI_ERR_UNCOR_SEVER:
  607. case PCI_ERR_COR_STATUS:
  608. case PCI_ERR_COR_MASK:
  609. case PCI_ERR_CAP:
  610. case PCI_ERR_HEADER_LOG+0:
  611. case PCI_ERR_HEADER_LOG+4:
  612. case PCI_ERR_HEADER_LOG+8:
  613. case PCI_ERR_HEADER_LOG+12:
  614. case PCI_ERR_ROOT_COMMAND:
  615. case PCI_ERR_ROOT_STATUS:
  616. case PCI_ERR_ROOT_ERR_SRC:
  617. *value = mvebu_readl(port, PCIE_CAP_PCIERR_OFF + reg);
  618. break;
  619. default:
  620. return PCI_BRIDGE_EMUL_NOT_HANDLED;
  621. }
  622. return PCI_BRIDGE_EMUL_HANDLED;
  623. }
  624. static void
  625. mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
  626. int reg, u32 old, u32 new, u32 mask)
  627. {
  628. struct mvebu_pcie_port *port = bridge->data;
  629. struct pci_bridge_emul_conf *conf = &bridge->conf;
  630. switch (reg) {
  631. case PCI_COMMAND:
  632. mvebu_writel(port, new, PCIE_CMD_OFF);
  633. break;
  634. case PCI_IO_BASE:
  635. if ((mask & 0xffff) && mvebu_has_ioport(port) &&
  636. mvebu_pcie_handle_iobase_change(port)) {
  637. /* On error disable IO range */
  638. conf->iobase &= ~0xf0;
  639. conf->iolimit &= ~0xf0;
  640. conf->iobase |= 0xf0;
  641. conf->iobaseupper = cpu_to_le16(0x0000);
  642. conf->iolimitupper = cpu_to_le16(0x0000);
  643. }
  644. break;
  645. case PCI_MEMORY_BASE:
  646. if (mvebu_pcie_handle_membase_change(port)) {
  647. /* On error disable mem range */
  648. conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) & ~0xfff0);
  649. conf->memlimit = cpu_to_le16(le16_to_cpu(conf->memlimit) & ~0xfff0);
  650. conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) | 0xfff0);
  651. }
  652. break;
  653. case PCI_IO_BASE_UPPER16:
  654. if (mvebu_has_ioport(port) &&
  655. mvebu_pcie_handle_iobase_change(port)) {
  656. /* On error disable IO range */
  657. conf->iobase &= ~0xf0;
  658. conf->iolimit &= ~0xf0;
  659. conf->iobase |= 0xf0;
  660. conf->iobaseupper = cpu_to_le16(0x0000);
  661. conf->iolimitupper = cpu_to_le16(0x0000);
  662. }
  663. break;
  664. case PCI_PRIMARY_BUS:
  665. if (mask & 0xff00)
  666. mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
  667. break;
  668. case PCI_INTERRUPT_LINE:
  669. if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
  670. u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
  671. if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
  672. ctrl |= PCIE_CTRL_MASTER_HOT_RESET;
  673. else
  674. ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET;
  675. mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
  676. }
  677. break;
  678. default:
  679. break;
  680. }
  681. }
  682. static void
  683. mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
  684. int reg, u32 old, u32 new, u32 mask)
  685. {
  686. struct mvebu_pcie_port *port = bridge->data;
  687. switch (reg) {
  688. case PCI_EXP_DEVCTL:
  689. mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
  690. break;
  691. case PCI_EXP_LNKCTL:
  692. /*
  693. * PCIe requires that the Enable Clock Power Management bit
  694. * is hard-wired to zero for downstream ports but HW allows
  695. * to change it.
  696. */
  697. new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
  698. mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
  699. break;
  700. case PCI_EXP_SLTCTL:
  701. /*
  702. * Allow to change PCIE_SSPL_ENABLE bit only when slot power
  703. * limit was specified in DT and configured into HW.
  704. */
  705. if ((mask & PCI_EXP_SLTCTL_ASPL_DISABLE) &&
  706. port->slot_power_limit_value) {
  707. u32 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
  708. if (new & PCI_EXP_SLTCTL_ASPL_DISABLE)
  709. sspl &= ~PCIE_SSPL_ENABLE;
  710. else
  711. sspl |= PCIE_SSPL_ENABLE;
  712. mvebu_writel(port, sspl, PCIE_SSPL_OFF);
  713. }
  714. break;
  715. case PCI_EXP_RTSTA:
  716. /*
  717. * PME Status bit in Root Status Register (PCIE_RC_RTSTA)
  718. * is read-only and can be cleared only by writing 0b to the
  719. * Interrupt Cause RW0C register (PCIE_INT_CAUSE_OFF). So
  720. * clear PME via Interrupt Cause.
  721. */
  722. if (new & PCI_EXP_RTSTA_PME)
  723. mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF);
  724. break;
  725. case PCI_EXP_DEVCTL2:
  726. mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
  727. break;
  728. case PCI_EXP_LNKCTL2:
  729. mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
  730. break;
  731. default:
  732. break;
  733. }
  734. }
  735. static void
  736. mvebu_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
  737. int reg, u32 old, u32 new, u32 mask)
  738. {
  739. struct mvebu_pcie_port *port = bridge->data;
  740. switch (reg) {
  741. /* These are W1C registers, so clear other bits */
  742. case PCI_ERR_UNCOR_STATUS:
  743. case PCI_ERR_COR_STATUS:
  744. case PCI_ERR_ROOT_STATUS:
  745. new &= mask;
  746. fallthrough;
  747. case PCI_ERR_UNCOR_MASK:
  748. case PCI_ERR_UNCOR_SEVER:
  749. case PCI_ERR_COR_MASK:
  750. case PCI_ERR_CAP:
  751. case PCI_ERR_HEADER_LOG+0:
  752. case PCI_ERR_HEADER_LOG+4:
  753. case PCI_ERR_HEADER_LOG+8:
  754. case PCI_ERR_HEADER_LOG+12:
  755. case PCI_ERR_ROOT_COMMAND:
  756. case PCI_ERR_ROOT_ERR_SRC:
  757. mvebu_writel(port, new, PCIE_CAP_PCIERR_OFF + reg);
  758. break;
  759. default:
  760. break;
  761. }
  762. }
  763. static const struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
  764. .read_base = mvebu_pci_bridge_emul_base_conf_read,
  765. .write_base = mvebu_pci_bridge_emul_base_conf_write,
  766. .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
  767. .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
  768. .read_ext = mvebu_pci_bridge_emul_ext_conf_read,
  769. .write_ext = mvebu_pci_bridge_emul_ext_conf_write,
  770. };
  771. /*
  772. * Initialize the configuration space of the PCI-to-PCI bridge
  773. * associated with the given PCIe interface.
  774. */
  775. static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
  776. {
  777. unsigned int bridge_flags = PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD;
  778. struct pci_bridge_emul *bridge = &port->bridge;
  779. u32 dev_id = mvebu_readl(port, PCIE_DEV_ID_OFF);
  780. u32 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
  781. u32 ssdev_id = mvebu_readl(port, PCIE_SSDEV_ID_OFF);
  782. u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP);
  783. u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS);
  784. bridge->conf.vendor = cpu_to_le16(dev_id & 0xffff);
  785. bridge->conf.device = cpu_to_le16(dev_id >> 16);
  786. bridge->conf.class_revision = cpu_to_le32(dev_rev & 0xff);
  787. if (mvebu_has_ioport(port)) {
  788. /* We support 32 bits I/O addressing */
  789. bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
  790. bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
  791. } else {
  792. bridge_flags |= PCI_BRIDGE_EMUL_NO_IO_FORWARD;
  793. }
  794. /*
  795. * Older mvebu hardware provides PCIe Capability structure only in
  796. * version 1. New hardware provides it in version 2.
  797. * Enable slot support which is emulated.
  798. */
  799. bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver | PCI_EXP_FLAGS_SLOT);
  800. /*
  801. * Set Presence Detect State bit permanently as there is no support for
  802. * unplugging PCIe card from the slot. Assume that PCIe card is always
  803. * connected in slot.
  804. *
  805. * Set physical slot number to port+1 as mvebu ports are indexed from
  806. * zero and zero value is reserved for ports within the same silicon
  807. * as Root Port which is not mvebu case.
  808. *
  809. * Also set correct slot power limit.
  810. */
  811. bridge->pcie_conf.slotcap = cpu_to_le32(
  812. FIELD_PREP(PCI_EXP_SLTCAP_SPLV, port->slot_power_limit_value) |
  813. FIELD_PREP(PCI_EXP_SLTCAP_SPLS, port->slot_power_limit_scale) |
  814. FIELD_PREP(PCI_EXP_SLTCAP_PSN, port->port+1));
  815. bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
  816. bridge->subsystem_vendor_id = ssdev_id & 0xffff;
  817. bridge->subsystem_id = ssdev_id >> 16;
  818. bridge->has_pcie = true;
  819. bridge->pcie_start = PCIE_CAP_PCIEXP;
  820. bridge->data = port;
  821. bridge->ops = &mvebu_pci_bridge_emul_ops;
  822. return pci_bridge_emul_init(bridge, bridge_flags);
  823. }
  824. static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
  825. {
  826. return sys->private_data;
  827. }
  828. static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
  829. struct pci_bus *bus,
  830. int devfn)
  831. {
  832. int i;
  833. for (i = 0; i < pcie->nports; i++) {
  834. struct mvebu_pcie_port *port = &pcie->ports[i];
  835. if (!port->base)
  836. continue;
  837. if (bus->number == 0 && port->devfn == devfn)
  838. return port;
  839. if (bus->number != 0 &&
  840. bus->number >= port->bridge.conf.secondary_bus &&
  841. bus->number <= port->bridge.conf.subordinate_bus)
  842. return port;
  843. }
  844. return NULL;
  845. }
  846. /* PCI configuration space write function */
  847. static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
  848. int where, int size, u32 val)
  849. {
  850. struct mvebu_pcie *pcie = bus->sysdata;
  851. struct mvebu_pcie_port *port;
  852. port = mvebu_pcie_find_port(pcie, bus, devfn);
  853. if (!port)
  854. return PCIBIOS_DEVICE_NOT_FOUND;
  855. return pci_bridge_emul_conf_write(&port->bridge, where, size, val);
  856. }
  857. /* PCI configuration space read function */
  858. static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
  859. int size, u32 *val)
  860. {
  861. struct mvebu_pcie *pcie = bus->sysdata;
  862. struct mvebu_pcie_port *port;
  863. port = mvebu_pcie_find_port(pcie, bus, devfn);
  864. if (!port)
  865. return PCIBIOS_DEVICE_NOT_FOUND;
  866. return pci_bridge_emul_conf_read(&port->bridge, where, size, val);
  867. }
  868. static struct pci_ops mvebu_pcie_ops = {
  869. .read = mvebu_pcie_rd_conf,
  870. .write = mvebu_pcie_wr_conf,
  871. };
  872. static void mvebu_pcie_intx_irq_mask(struct irq_data *d)
  873. {
  874. struct mvebu_pcie_port *port = d->domain->host_data;
  875. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  876. unsigned long flags;
  877. u32 unmask;
  878. raw_spin_lock_irqsave(&port->irq_lock, flags);
  879. unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
  880. unmask &= ~PCIE_INT_INTX(hwirq);
  881. mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
  882. raw_spin_unlock_irqrestore(&port->irq_lock, flags);
  883. }
  884. static void mvebu_pcie_intx_irq_unmask(struct irq_data *d)
  885. {
  886. struct mvebu_pcie_port *port = d->domain->host_data;
  887. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  888. unsigned long flags;
  889. u32 unmask;
  890. raw_spin_lock_irqsave(&port->irq_lock, flags);
  891. unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
  892. unmask |= PCIE_INT_INTX(hwirq);
  893. mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
  894. raw_spin_unlock_irqrestore(&port->irq_lock, flags);
  895. }
  896. static struct irq_chip intx_irq_chip = {
  897. .name = "mvebu-INTx",
  898. .irq_mask = mvebu_pcie_intx_irq_mask,
  899. .irq_unmask = mvebu_pcie_intx_irq_unmask,
  900. };
  901. static int mvebu_pcie_intx_irq_map(struct irq_domain *h,
  902. unsigned int virq, irq_hw_number_t hwirq)
  903. {
  904. struct mvebu_pcie_port *port = h->host_data;
  905. irq_set_status_flags(virq, IRQ_LEVEL);
  906. irq_set_chip_and_handler(virq, &intx_irq_chip, handle_level_irq);
  907. irq_set_chip_data(virq, port);
  908. return 0;
  909. }
  910. static const struct irq_domain_ops mvebu_pcie_intx_irq_domain_ops = {
  911. .map = mvebu_pcie_intx_irq_map,
  912. .xlate = irq_domain_xlate_onecell,
  913. };
  914. static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
  915. {
  916. struct device *dev = &port->pcie->pdev->dev;
  917. struct device_node *pcie_intc_node;
  918. raw_spin_lock_init(&port->irq_lock);
  919. pcie_intc_node = of_get_next_child(port->dn, NULL);
  920. if (!pcie_intc_node) {
  921. dev_err(dev, "No PCIe Intc node found for %s\n", port->name);
  922. return -ENODEV;
  923. }
  924. port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
  925. &mvebu_pcie_intx_irq_domain_ops,
  926. port);
  927. of_node_put(pcie_intc_node);
  928. if (!port->intx_irq_domain) {
  929. dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
  930. return -ENOMEM;
  931. }
  932. return 0;
  933. }
  934. static void mvebu_pcie_irq_handler(struct irq_desc *desc)
  935. {
  936. struct mvebu_pcie_port *port = irq_desc_get_handler_data(desc);
  937. struct irq_chip *chip = irq_desc_get_chip(desc);
  938. struct device *dev = &port->pcie->pdev->dev;
  939. u32 cause, unmask, status;
  940. int i;
  941. chained_irq_enter(chip, desc);
  942. cause = mvebu_readl(port, PCIE_INT_CAUSE_OFF);
  943. unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
  944. status = cause & unmask;
  945. /* Process legacy INTx interrupts */
  946. for (i = 0; i < PCI_NUM_INTX; i++) {
  947. if (!(status & PCIE_INT_INTX(i)))
  948. continue;
  949. if (generic_handle_domain_irq(port->intx_irq_domain, i) == -EINVAL)
  950. dev_err_ratelimited(dev, "unexpected INT%c IRQ\n", (char)i+'A');
  951. }
  952. chained_irq_exit(chip, desc);
  953. }
  954. static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
  955. {
  956. /* Interrupt support on mvebu emulated bridges is not implemented yet */
  957. if (dev->bus->number == 0)
  958. return 0; /* Proper return code 0 == NO_IRQ */
  959. return of_irq_parse_and_map_pci(dev, slot, pin);
  960. }
  961. static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
  962. const struct resource *res,
  963. resource_size_t start,
  964. resource_size_t size,
  965. resource_size_t align)
  966. {
  967. if (dev->bus->number != 0)
  968. return start;
  969. /*
  970. * On the PCI-to-PCI bridge side, the I/O windows must have at
  971. * least a 64 KB size and the memory windows must have at
  972. * least a 1 MB size. Moreover, MBus windows need to have a
  973. * base address aligned on their size, and their size must be
  974. * a power of two. This means that if the BAR doesn't have a
  975. * power of two size, several MBus windows will actually be
  976. * created. We need to ensure that the biggest MBus window
  977. * (which will be the first one) is aligned on its size, which
  978. * explains the rounddown_pow_of_two() being done here.
  979. */
  980. if (res->flags & IORESOURCE_IO)
  981. return round_up(start, max_t(resource_size_t, SZ_64K,
  982. rounddown_pow_of_two(size)));
  983. else if (res->flags & IORESOURCE_MEM)
  984. return round_up(start, max_t(resource_size_t, SZ_1M,
  985. rounddown_pow_of_two(size)));
  986. else
  987. return start;
  988. }
  989. static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
  990. struct device_node *np,
  991. struct mvebu_pcie_port *port)
  992. {
  993. int ret = 0;
  994. ret = of_address_to_resource(np, 0, &port->regs);
  995. if (ret)
  996. return (void __iomem *)ERR_PTR(ret);
  997. return devm_ioremap_resource(&pdev->dev, &port->regs);
  998. }
  999. #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
  1000. #define DT_TYPE_IO 0x1
  1001. #define DT_TYPE_MEM32 0x2
  1002. #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
  1003. #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
  1004. static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
  1005. unsigned long type,
  1006. unsigned int *tgt,
  1007. unsigned int *attr)
  1008. {
  1009. const int na = 3, ns = 2;
  1010. const __be32 *range;
  1011. int rlen, nranges, rangesz, pna, i;
  1012. *tgt = -1;
  1013. *attr = -1;
  1014. range = of_get_property(np, "ranges", &rlen);
  1015. if (!range)
  1016. return -EINVAL;
  1017. pna = of_n_addr_cells(np);
  1018. rangesz = pna + na + ns;
  1019. nranges = rlen / sizeof(__be32) / rangesz;
  1020. for (i = 0; i < nranges; i++, range += rangesz) {
  1021. u32 flags = of_read_number(range, 1);
  1022. u32 slot = of_read_number(range + 1, 1);
  1023. u64 cpuaddr = of_read_number(range + na, pna);
  1024. unsigned long rtype;
  1025. if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
  1026. rtype = IORESOURCE_IO;
  1027. else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
  1028. rtype = IORESOURCE_MEM;
  1029. else
  1030. continue;
  1031. if (slot == PCI_SLOT(devfn) && type == rtype) {
  1032. *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
  1033. *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
  1034. return 0;
  1035. }
  1036. }
  1037. return -ENOENT;
  1038. }
  1039. static int mvebu_pcie_suspend(struct device *dev)
  1040. {
  1041. struct mvebu_pcie *pcie;
  1042. int i;
  1043. pcie = dev_get_drvdata(dev);
  1044. for (i = 0; i < pcie->nports; i++) {
  1045. struct mvebu_pcie_port *port = pcie->ports + i;
  1046. if (!port->base)
  1047. continue;
  1048. port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
  1049. }
  1050. return 0;
  1051. }
  1052. static int mvebu_pcie_resume(struct device *dev)
  1053. {
  1054. struct mvebu_pcie *pcie;
  1055. int i;
  1056. pcie = dev_get_drvdata(dev);
  1057. for (i = 0; i < pcie->nports; i++) {
  1058. struct mvebu_pcie_port *port = pcie->ports + i;
  1059. if (!port->base)
  1060. continue;
  1061. mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
  1062. mvebu_pcie_setup_hw(port);
  1063. }
  1064. return 0;
  1065. }
  1066. static void mvebu_pcie_port_clk_put(void *data)
  1067. {
  1068. struct mvebu_pcie_port *port = data;
  1069. clk_put(port->clk);
  1070. }
  1071. static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
  1072. struct mvebu_pcie_port *port, struct device_node *child)
  1073. {
  1074. struct device *dev = &pcie->pdev->dev;
  1075. u32 slot_power_limit;
  1076. int ret;
  1077. u32 num_lanes;
  1078. port->pcie = pcie;
  1079. if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
  1080. dev_warn(dev, "ignoring %pOF, missing pcie-port property\n",
  1081. child);
  1082. goto skip;
  1083. }
  1084. if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
  1085. port->lane = 0;
  1086. if (!of_property_read_u32(child, "num-lanes", &num_lanes) && num_lanes == 4)
  1087. port->is_x4 = true;
  1088. port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
  1089. port->lane);
  1090. if (!port->name) {
  1091. ret = -ENOMEM;
  1092. goto err;
  1093. }
  1094. port->devfn = of_pci_get_devfn(child);
  1095. if (port->devfn < 0)
  1096. goto skip;
  1097. if (PCI_FUNC(port->devfn) != 0) {
  1098. dev_err(dev, "%s: invalid function number, must be zero\n",
  1099. port->name);
  1100. goto skip;
  1101. }
  1102. ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
  1103. &port->mem_target, &port->mem_attr);
  1104. if (ret < 0) {
  1105. dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
  1106. port->name);
  1107. goto skip;
  1108. }
  1109. if (resource_size(&pcie->io) != 0) {
  1110. mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
  1111. &port->io_target, &port->io_attr);
  1112. } else {
  1113. port->io_target = -1;
  1114. port->io_attr = -1;
  1115. }
  1116. /*
  1117. * Old DT bindings do not contain "intx" interrupt
  1118. * so do not fail probing driver when interrupt does not exist.
  1119. */
  1120. port->intx_irq = of_irq_get_byname(child, "intx");
  1121. if (port->intx_irq == -EPROBE_DEFER) {
  1122. ret = port->intx_irq;
  1123. goto err;
  1124. }
  1125. if (port->intx_irq <= 0) {
  1126. dev_warn(dev, "%s: legacy INTx interrupts cannot be masked individually, "
  1127. "%pOF does not contain intx interrupt\n",
  1128. port->name, child);
  1129. }
  1130. port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
  1131. port->name);
  1132. if (!port->reset_name) {
  1133. ret = -ENOMEM;
  1134. goto err;
  1135. }
  1136. port->reset_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(child),
  1137. "reset", GPIOD_OUT_HIGH,
  1138. port->name);
  1139. ret = PTR_ERR_OR_ZERO(port->reset_gpio);
  1140. if (ret) {
  1141. if (ret != -ENOENT)
  1142. goto err;
  1143. /* reset gpio is optional */
  1144. port->reset_gpio = NULL;
  1145. devm_kfree(dev, port->reset_name);
  1146. port->reset_name = NULL;
  1147. }
  1148. slot_power_limit = of_pci_get_slot_power_limit(child,
  1149. &port->slot_power_limit_value,
  1150. &port->slot_power_limit_scale);
  1151. if (slot_power_limit)
  1152. dev_info(dev, "%s: Slot power limit %u.%uW\n",
  1153. port->name,
  1154. slot_power_limit / 1000,
  1155. (slot_power_limit / 100) % 10);
  1156. port->clk = of_clk_get_by_name(child, NULL);
  1157. if (IS_ERR(port->clk)) {
  1158. dev_err(dev, "%s: cannot get clock\n", port->name);
  1159. goto skip;
  1160. }
  1161. ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
  1162. if (ret < 0) {
  1163. clk_put(port->clk);
  1164. goto err;
  1165. }
  1166. return 1;
  1167. skip:
  1168. ret = 0;
  1169. /* In the case of skipping, we need to free these */
  1170. devm_kfree(dev, port->reset_name);
  1171. port->reset_name = NULL;
  1172. devm_kfree(dev, port->name);
  1173. port->name = NULL;
  1174. err:
  1175. return ret;
  1176. }
  1177. /*
  1178. * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs
  1179. * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications
  1180. * of the PCI Express Card Electromechanical Specification, 1.1.
  1181. */
  1182. static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
  1183. {
  1184. int ret;
  1185. ret = clk_prepare_enable(port->clk);
  1186. if (ret < 0)
  1187. return ret;
  1188. if (port->reset_gpio) {
  1189. u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000;
  1190. of_property_read_u32(port->dn, "reset-delay-us",
  1191. &reset_udelay);
  1192. udelay(100);
  1193. gpiod_set_value_cansleep(port->reset_gpio, 0);
  1194. msleep(reset_udelay / 1000);
  1195. }
  1196. return 0;
  1197. }
  1198. /*
  1199. * Power down a PCIe port. Strictly, PCIe requires us to place the card
  1200. * in D3hot state before asserting PERST#.
  1201. */
  1202. static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
  1203. {
  1204. gpiod_set_value_cansleep(port->reset_gpio, 1);
  1205. clk_disable_unprepare(port->clk);
  1206. }
  1207. /*
  1208. * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
  1209. * so we need extra resource setup parsing our special DT properties encoding
  1210. * the MEM and IO apertures.
  1211. */
  1212. static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
  1213. {
  1214. struct device *dev = &pcie->pdev->dev;
  1215. struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
  1216. int ret;
  1217. /* Get the PCIe memory aperture */
  1218. mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
  1219. if (resource_size(&pcie->mem) == 0) {
  1220. dev_err(dev, "invalid memory aperture size\n");
  1221. return -EINVAL;
  1222. }
  1223. pcie->mem.name = "PCI MEM";
  1224. pci_add_resource(&bridge->windows, &pcie->mem);
  1225. ret = devm_request_resource(dev, &iomem_resource, &pcie->mem);
  1226. if (ret)
  1227. return ret;
  1228. /* Get the PCIe IO aperture */
  1229. mvebu_mbus_get_pcie_io_aperture(&pcie->io);
  1230. if (resource_size(&pcie->io) != 0) {
  1231. pcie->realio.flags = pcie->io.flags;
  1232. pcie->realio.start = PCIBIOS_MIN_IO;
  1233. pcie->realio.end = min_t(resource_size_t,
  1234. IO_SPACE_LIMIT - SZ_64K,
  1235. resource_size(&pcie->io) - 1);
  1236. pcie->realio.name = "PCI I/O";
  1237. ret = devm_pci_remap_iospace(dev, &pcie->realio, pcie->io.start);
  1238. if (ret)
  1239. return ret;
  1240. pci_add_resource(&bridge->windows, &pcie->realio);
  1241. ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
  1242. if (ret)
  1243. return ret;
  1244. }
  1245. return 0;
  1246. }
  1247. static int mvebu_pcie_probe(struct platform_device *pdev)
  1248. {
  1249. struct device *dev = &pdev->dev;
  1250. struct mvebu_pcie *pcie;
  1251. struct pci_host_bridge *bridge;
  1252. struct device_node *np = dev->of_node;
  1253. struct device_node *child;
  1254. int num, i, ret;
  1255. bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
  1256. if (!bridge)
  1257. return -ENOMEM;
  1258. pcie = pci_host_bridge_priv(bridge);
  1259. pcie->pdev = pdev;
  1260. platform_set_drvdata(pdev, pcie);
  1261. ret = mvebu_pcie_parse_request_resources(pcie);
  1262. if (ret)
  1263. return ret;
  1264. num = of_get_available_child_count(np);
  1265. pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
  1266. if (!pcie->ports)
  1267. return -ENOMEM;
  1268. i = 0;
  1269. for_each_available_child_of_node(np, child) {
  1270. struct mvebu_pcie_port *port = &pcie->ports[i];
  1271. ret = mvebu_pcie_parse_port(pcie, port, child);
  1272. if (ret < 0) {
  1273. of_node_put(child);
  1274. return ret;
  1275. } else if (ret == 0) {
  1276. continue;
  1277. }
  1278. port->dn = child;
  1279. i++;
  1280. }
  1281. pcie->nports = i;
  1282. for (i = 0; i < pcie->nports; i++) {
  1283. struct mvebu_pcie_port *port = &pcie->ports[i];
  1284. int irq = port->intx_irq;
  1285. child = port->dn;
  1286. if (!child)
  1287. continue;
  1288. ret = mvebu_pcie_powerup(port);
  1289. if (ret < 0)
  1290. continue;
  1291. port->base = mvebu_pcie_map_registers(pdev, child, port);
  1292. if (IS_ERR(port->base)) {
  1293. dev_err(dev, "%s: cannot map registers\n", port->name);
  1294. port->base = NULL;
  1295. mvebu_pcie_powerdown(port);
  1296. continue;
  1297. }
  1298. ret = mvebu_pci_bridge_emul_init(port);
  1299. if (ret < 0) {
  1300. dev_err(dev, "%s: cannot init emulated bridge\n",
  1301. port->name);
  1302. devm_iounmap(dev, port->base);
  1303. port->base = NULL;
  1304. mvebu_pcie_powerdown(port);
  1305. continue;
  1306. }
  1307. if (irq > 0) {
  1308. ret = mvebu_pcie_init_irq_domain(port);
  1309. if (ret) {
  1310. dev_err(dev, "%s: cannot init irq domain\n",
  1311. port->name);
  1312. pci_bridge_emul_cleanup(&port->bridge);
  1313. devm_iounmap(dev, port->base);
  1314. port->base = NULL;
  1315. mvebu_pcie_powerdown(port);
  1316. continue;
  1317. }
  1318. irq_set_chained_handler_and_data(irq,
  1319. mvebu_pcie_irq_handler,
  1320. port);
  1321. }
  1322. /*
  1323. * PCIe topology exported by mvebu hw is quite complicated. In
  1324. * reality has something like N fully independent host bridges
  1325. * where each host bridge has one PCIe Root Port (which acts as
  1326. * PCI Bridge device). Each host bridge has its own independent
  1327. * internal registers, independent access to PCI config space,
  1328. * independent interrupt lines, independent window and memory
  1329. * access configuration. But additionally there is some kind of
  1330. * peer-to-peer support between PCIe devices behind different
  1331. * host bridges limited just to forwarding of memory and I/O
  1332. * transactions (forwarding of error messages and config cycles
  1333. * is not supported). So we could say there are N independent
  1334. * PCIe Root Complexes.
  1335. *
  1336. * For this kind of setup DT should have been structured into
  1337. * N independent PCIe controllers / host bridges. But instead
  1338. * structure in past was defined to put PCIe Root Ports of all
  1339. * host bridges into one bus zero, like in classic multi-port
  1340. * Root Complex setup with just one host bridge.
  1341. *
  1342. * This means that pci-mvebu.c driver provides "virtual" bus 0
  1343. * on which registers all PCIe Root Ports (PCI Bridge devices)
  1344. * specified in DT by their BDF addresses and virtually routes
  1345. * PCI config access of each PCI bridge device to specific PCIe
  1346. * host bridge.
  1347. *
  1348. * Normally PCI Bridge should choose between Type 0 and Type 1
  1349. * config requests based on primary and secondary bus numbers
  1350. * configured on the bridge itself. But because mvebu PCI Bridge
  1351. * does not have registers for primary and secondary bus numbers
  1352. * in its config space, it determinates type of config requests
  1353. * via its own custom way.
  1354. *
  1355. * There are two options how mvebu determinate type of config
  1356. * request.
  1357. *
  1358. * 1. If Secondary Bus Number Enable bit is not set or is not
  1359. * available (applies for pre-XP PCIe controllers) then Type 0
  1360. * is used if target bus number equals Local Bus Number (bits
  1361. * [15:8] in register 0x1a04) and target device number differs
  1362. * from Local Device Number (bits [20:16] in register 0x1a04).
  1363. * Type 1 is used if target bus number differs from Local Bus
  1364. * Number. And when target bus number equals Local Bus Number
  1365. * and target device equals Local Device Number then request is
  1366. * routed to Local PCI Bridge (PCIe Root Port).
  1367. *
  1368. * 2. If Secondary Bus Number Enable bit is set (bit 7 in
  1369. * register 0x1a2c) then mvebu hw determinate type of config
  1370. * request like compliant PCI Bridge based on primary bus number
  1371. * which is configured via Local Bus Number (bits [15:8] in
  1372. * register 0x1a04) and secondary bus number which is configured
  1373. * via Secondary Bus Number (bits [7:0] in register 0x1a2c).
  1374. * Local PCI Bridge (PCIe Root Port) is available on primary bus
  1375. * as device with Local Device Number (bits [20:16] in register
  1376. * 0x1a04).
  1377. *
  1378. * Secondary Bus Number Enable bit is disabled by default and
  1379. * option 2. is not available on pre-XP PCIe controllers. Hence
  1380. * this driver always use option 1.
  1381. *
  1382. * Basically it means that primary and secondary buses shares
  1383. * one virtual number configured via Local Bus Number bits and
  1384. * Local Device Number bits determinates if accessing primary
  1385. * or secondary bus. Set Local Device Number to 1 and redirect
  1386. * all writes of PCI Bridge Secondary Bus Number register to
  1387. * Local Bus Number (bits [15:8] in register 0x1a04).
  1388. *
  1389. * So when accessing devices on buses behind secondary bus
  1390. * number it would work correctly. And also when accessing
  1391. * device 0 at secondary bus number via config space would be
  1392. * correctly routed to secondary bus. Due to issues described
  1393. * in mvebu_pcie_setup_hw(), PCI Bridges at primary bus (zero)
  1394. * are not accessed directly via PCI config space but rarher
  1395. * indirectly via kernel emulated PCI bridge driver.
  1396. */
  1397. mvebu_pcie_setup_hw(port);
  1398. mvebu_pcie_set_local_dev_nr(port, 1);
  1399. mvebu_pcie_set_local_bus_nr(port, 0);
  1400. }
  1401. bridge->sysdata = pcie;
  1402. bridge->ops = &mvebu_pcie_ops;
  1403. bridge->child_ops = &mvebu_pcie_child_ops;
  1404. bridge->align_resource = mvebu_pcie_align_resource;
  1405. bridge->map_irq = mvebu_pcie_map_irq;
  1406. return pci_host_probe(bridge);
  1407. }
  1408. static void mvebu_pcie_remove(struct platform_device *pdev)
  1409. {
  1410. struct mvebu_pcie *pcie = platform_get_drvdata(pdev);
  1411. struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
  1412. u32 cmd, sspl;
  1413. int i;
  1414. /* Remove PCI bus with all devices. */
  1415. pci_lock_rescan_remove();
  1416. pci_stop_root_bus(bridge->bus);
  1417. pci_remove_root_bus(bridge->bus);
  1418. pci_unlock_rescan_remove();
  1419. for (i = 0; i < pcie->nports; i++) {
  1420. struct mvebu_pcie_port *port = &pcie->ports[i];
  1421. int irq = port->intx_irq;
  1422. if (!port->base)
  1423. continue;
  1424. /* Disable Root Bridge I/O space, memory space and bus mastering. */
  1425. cmd = mvebu_readl(port, PCIE_CMD_OFF);
  1426. cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
  1427. mvebu_writel(port, cmd, PCIE_CMD_OFF);
  1428. /* Mask all interrupt sources. */
  1429. mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
  1430. /* Clear all interrupt causes. */
  1431. mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
  1432. if (irq > 0)
  1433. irq_set_chained_handler_and_data(irq, NULL, NULL);
  1434. /* Remove IRQ domains. */
  1435. if (port->intx_irq_domain)
  1436. irq_domain_remove(port->intx_irq_domain);
  1437. /* Free config space for emulated root bridge. */
  1438. pci_bridge_emul_cleanup(&port->bridge);
  1439. /* Disable sending Set_Slot_Power_Limit PCIe Message. */
  1440. sspl = mvebu_readl(port, PCIE_SSPL_OFF);
  1441. sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
  1442. mvebu_writel(port, sspl, PCIE_SSPL_OFF);
  1443. /* Disable and clear BARs and windows. */
  1444. mvebu_pcie_disable_wins(port);
  1445. /* Delete PCIe IO and MEM windows. */
  1446. if (port->iowin.size)
  1447. mvebu_pcie_del_windows(port, port->iowin.base, port->iowin.size);
  1448. if (port->memwin.size)
  1449. mvebu_pcie_del_windows(port, port->memwin.base, port->memwin.size);
  1450. /* Power down card and disable clocks. Must be the last step. */
  1451. mvebu_pcie_powerdown(port);
  1452. }
  1453. }
  1454. static const struct of_device_id mvebu_pcie_of_match_table[] = {
  1455. { .compatible = "marvell,armada-xp-pcie", },
  1456. { .compatible = "marvell,armada-370-pcie", },
  1457. { .compatible = "marvell,dove-pcie", },
  1458. { .compatible = "marvell,kirkwood-pcie", },
  1459. {},
  1460. };
  1461. static const struct dev_pm_ops mvebu_pcie_pm_ops = {
  1462. NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
  1463. };
  1464. static struct platform_driver mvebu_pcie_driver = {
  1465. .driver = {
  1466. .name = "mvebu-pcie",
  1467. .of_match_table = mvebu_pcie_of_match_table,
  1468. .pm = &mvebu_pcie_pm_ops,
  1469. },
  1470. .probe = mvebu_pcie_probe,
  1471. .remove_new = mvebu_pcie_remove,
  1472. };
  1473. module_platform_driver(mvebu_pcie_driver);
  1474. MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@bootlin.com>");
  1475. MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
  1476. MODULE_DESCRIPTION("Marvell EBU PCIe controller");
  1477. MODULE_LICENSE("GPL v2");