pcie-mediatek-gen3.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * MediaTek PCIe host controller driver.
  4. *
  5. * Copyright (c) 2020 MediaTek Inc.
  6. * Author: Jianjun Wang <jianjun.wang@mediatek.com>
  7. */
  8. #include <linux/bitfield.h>
  9. #include <linux/clk.h>
  10. #include <linux/clk-provider.h>
  11. #include <linux/delay.h>
  12. #include <linux/iopoll.h>
  13. #include <linux/irq.h>
  14. #include <linux/irqchip/chained_irq.h>
  15. #include <linux/irqdomain.h>
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/msi.h>
  19. #include <linux/of_device.h>
  20. #include <linux/of_pci.h>
  21. #include <linux/pci.h>
  22. #include <linux/phy/phy.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/pm_domain.h>
  25. #include <linux/pm_runtime.h>
  26. #include <linux/reset.h>
  27. #include "../pci.h"
  28. #define PCIE_SETTING_REG 0x80
  29. #define PCIE_PCI_IDS_1 0x9c
  30. #define PCI_CLASS(class) (class << 8)
  31. #define PCIE_RC_MODE BIT(0)
  32. #define PCIE_EQ_PRESET_01_REG 0x100
  33. #define PCIE_VAL_LN0_DOWNSTREAM GENMASK(6, 0)
  34. #define PCIE_VAL_LN0_UPSTREAM GENMASK(14, 8)
  35. #define PCIE_VAL_LN1_DOWNSTREAM GENMASK(22, 16)
  36. #define PCIE_VAL_LN1_UPSTREAM GENMASK(30, 24)
  37. #define PCIE_CFGNUM_REG 0x140
  38. #define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
  39. #define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
  40. #define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16))
  41. #define PCIE_CFG_FORCE_BYTE_EN BIT(20)
  42. #define PCIE_CFG_OFFSET_ADDR 0x1000
  43. #define PCIE_CFG_HEADER(bus, devfn) \
  44. (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
  45. #define PCIE_RST_CTRL_REG 0x148
  46. #define PCIE_MAC_RSTB BIT(0)
  47. #define PCIE_PHY_RSTB BIT(1)
  48. #define PCIE_BRG_RSTB BIT(2)
  49. #define PCIE_PE_RSTB BIT(3)
  50. #define PCIE_LTSSM_STATUS_REG 0x150
  51. #define PCIE_LTSSM_STATE_MASK GENMASK(28, 24)
  52. #define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24)
  53. #define PCIE_LTSSM_STATE_L2_IDLE 0x14
  54. #define PCIE_LINK_STATUS_REG 0x154
  55. #define PCIE_PORT_LINKUP BIT(8)
  56. #define PCIE_MSI_SET_NUM 8
  57. #define PCIE_MSI_IRQS_PER_SET 32
  58. #define PCIE_MSI_IRQS_NUM \
  59. (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
  60. #define PCIE_INT_ENABLE_REG 0x180
  61. #define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
  62. #define PCIE_MSI_SHIFT 8
  63. #define PCIE_INTX_SHIFT 24
  64. #define PCIE_INTX_ENABLE \
  65. GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
  66. #define PCIE_INT_STATUS_REG 0x184
  67. #define PCIE_MSI_SET_ENABLE_REG 0x190
  68. #define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
  69. #define PCIE_PIPE4_PIE8_REG 0x338
  70. #define PCIE_K_FINETUNE_MAX GENMASK(5, 0)
  71. #define PCIE_K_FINETUNE_ERR GENMASK(7, 6)
  72. #define PCIE_K_PRESET_TO_USE GENMASK(18, 8)
  73. #define PCIE_K_PHYPARAM_QUERY BIT(19)
  74. #define PCIE_K_QUERY_TIMEOUT BIT(20)
  75. #define PCIE_K_PRESET_TO_USE_16G GENMASK(31, 21)
  76. #define PCIE_MSI_SET_BASE_REG 0xc00
  77. #define PCIE_MSI_SET_OFFSET 0x10
  78. #define PCIE_MSI_SET_STATUS_OFFSET 0x04
  79. #define PCIE_MSI_SET_ENABLE_OFFSET 0x08
  80. #define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
  81. #define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
  82. #define PCIE_ICMD_PM_REG 0x198
  83. #define PCIE_TURN_OFF_LINK BIT(4)
  84. #define PCIE_MISC_CTRL_REG 0x348
  85. #define PCIE_DISABLE_DVFSRC_VLT_REQ BIT(1)
  86. #define PCIE_TRANS_TABLE_BASE_REG 0x800
  87. #define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
  88. #define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
  89. #define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
  90. #define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
  91. #define PCIE_ATR_TLB_SET_OFFSET 0x20
  92. #define PCIE_MAX_TRANS_TABLES 8
  93. #define PCIE_ATR_EN BIT(0)
  94. #define PCIE_ATR_SIZE(size) \
  95. (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
  96. #define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
  97. #define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
  98. #define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1)
  99. #define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16))
  100. #define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
  101. #define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
  102. #define MAX_NUM_PHY_RESETS 3
  103. /* Time in ms needed to complete PCIe reset on EN7581 SoC */
  104. #define PCIE_EN7581_RESET_TIME_MS 100
  105. struct mtk_gen3_pcie;
  106. /**
  107. * struct mtk_gen3_pcie_pdata - differentiate between host generations
  108. * @power_up: pcie power_up callback
  109. * @phy_resets: phy reset lines SoC data.
  110. */
  111. struct mtk_gen3_pcie_pdata {
  112. int (*power_up)(struct mtk_gen3_pcie *pcie);
  113. struct {
  114. const char *id[MAX_NUM_PHY_RESETS];
  115. int num_resets;
  116. } phy_resets;
  117. };
  118. /**
  119. * struct mtk_msi_set - MSI information for each set
  120. * @base: IO mapped register base
  121. * @msg_addr: MSI message address
  122. * @saved_irq_state: IRQ enable state saved at suspend time
  123. */
  124. struct mtk_msi_set {
  125. void __iomem *base;
  126. phys_addr_t msg_addr;
  127. u32 saved_irq_state;
  128. };
  129. /**
  130. * struct mtk_gen3_pcie - PCIe port information
  131. * @dev: pointer to PCIe device
  132. * @base: IO mapped register base
  133. * @reg_base: physical register base
  134. * @mac_reset: MAC reset control
  135. * @phy_resets: PHY reset controllers
  136. * @phy: PHY controller block
  137. * @clks: PCIe clocks
  138. * @num_clks: PCIe clocks count for this port
  139. * @irq: PCIe controller interrupt number
  140. * @saved_irq_state: IRQ enable state saved at suspend time
  141. * @irq_lock: lock protecting IRQ register access
  142. * @intx_domain: legacy INTx IRQ domain
  143. * @msi_domain: MSI IRQ domain
  144. * @msi_bottom_domain: MSI IRQ bottom domain
  145. * @msi_sets: MSI sets information
  146. * @lock: lock protecting IRQ bit map
  147. * @msi_irq_in_use: bit map for assigned MSI IRQ
  148. * @soc: pointer to SoC-dependent operations
  149. */
  150. struct mtk_gen3_pcie {
  151. struct device *dev;
  152. void __iomem *base;
  153. phys_addr_t reg_base;
  154. struct reset_control *mac_reset;
  155. struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS];
  156. struct phy *phy;
  157. struct clk_bulk_data *clks;
  158. int num_clks;
  159. int irq;
  160. u32 saved_irq_state;
  161. raw_spinlock_t irq_lock;
  162. struct irq_domain *intx_domain;
  163. struct irq_domain *msi_domain;
  164. struct irq_domain *msi_bottom_domain;
  165. struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
  166. struct mutex lock;
  167. DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
  168. const struct mtk_gen3_pcie_pdata *soc;
  169. };
  170. /* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
  171. static const char *const ltssm_str[] = {
  172. "detect.quiet", /* 0x00 */
  173. "detect.active", /* 0x01 */
  174. "polling.active", /* 0x02 */
  175. "polling.compliance", /* 0x03 */
  176. "polling.configuration", /* 0x04 */
  177. "config.linkwidthstart", /* 0x05 */
  178. "config.linkwidthaccept", /* 0x06 */
  179. "config.lanenumwait", /* 0x07 */
  180. "config.lanenumaccept", /* 0x08 */
  181. "config.complete", /* 0x09 */
  182. "config.idle", /* 0x0A */
  183. "recovery.receiverlock", /* 0x0B */
  184. "recovery.equalization", /* 0x0C */
  185. "recovery.speed", /* 0x0D */
  186. "recovery.receiverconfig", /* 0x0E */
  187. "recovery.idle", /* 0x0F */
  188. "L0", /* 0x10 */
  189. "L0s", /* 0x11 */
  190. "L1.entry", /* 0x12 */
  191. "L1.idle", /* 0x13 */
  192. "L2.idle", /* 0x14 */
  193. "L2.transmitwake", /* 0x15 */
  194. "disable", /* 0x16 */
  195. "loopback.entry", /* 0x17 */
  196. "loopback.active", /* 0x18 */
  197. "loopback.exit", /* 0x19 */
  198. "hotreset", /* 0x1A */
  199. };
  200. /**
  201. * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
  202. * @bus: PCI bus to query
  203. * @devfn: device/function number
  204. * @where: offset in config space
  205. * @size: data size in TLP header
  206. *
  207. * Set byte enable field and device information in configuration TLP header.
  208. */
  209. static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
  210. int where, int size)
  211. {
  212. struct mtk_gen3_pcie *pcie = bus->sysdata;
  213. int bytes;
  214. u32 val;
  215. bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
  216. val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
  217. PCIE_CFG_HEADER(bus->number, devfn);
  218. writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG);
  219. }
  220. static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
  221. int where)
  222. {
  223. struct mtk_gen3_pcie *pcie = bus->sysdata;
  224. return pcie->base + PCIE_CFG_OFFSET_ADDR + where;
  225. }
  226. static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
  227. int where, int size, u32 *val)
  228. {
  229. mtk_pcie_config_tlp_header(bus, devfn, where, size);
  230. return pci_generic_config_read32(bus, devfn, where, size, val);
  231. }
  232. static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
  233. int where, int size, u32 val)
  234. {
  235. mtk_pcie_config_tlp_header(bus, devfn, where, size);
  236. if (size <= 2)
  237. val <<= (where & 0x3) * 8;
  238. return pci_generic_config_write32(bus, devfn, where, 4, val);
  239. }
  240. static struct pci_ops mtk_pcie_ops = {
  241. .map_bus = mtk_pcie_map_bus,
  242. .read = mtk_pcie_config_read,
  243. .write = mtk_pcie_config_write,
  244. };
  245. static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
  246. resource_size_t cpu_addr,
  247. resource_size_t pci_addr,
  248. resource_size_t size,
  249. unsigned long type, int *num)
  250. {
  251. resource_size_t remaining = size;
  252. resource_size_t table_size;
  253. resource_size_t addr_align;
  254. const char *range_type;
  255. void __iomem *table;
  256. u32 val;
  257. while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) {
  258. /* Table size needs to be a power of 2 */
  259. table_size = BIT(fls(remaining) - 1);
  260. if (cpu_addr > 0) {
  261. addr_align = BIT(ffs(cpu_addr) - 1);
  262. table_size = min(table_size, addr_align);
  263. }
  264. /* Minimum size of translate table is 4KiB */
  265. if (table_size < 0x1000) {
  266. dev_err(pcie->dev, "illegal table size %#llx\n",
  267. (unsigned long long)table_size);
  268. return -EINVAL;
  269. }
  270. table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET;
  271. writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table);
  272. writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
  273. writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
  274. writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
  275. if (type == IORESOURCE_IO) {
  276. val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
  277. range_type = "IO";
  278. } else {
  279. val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
  280. range_type = "MEM";
  281. }
  282. writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
  283. dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
  284. range_type, *num, (unsigned long long)cpu_addr,
  285. (unsigned long long)pci_addr, (unsigned long long)table_size);
  286. cpu_addr += table_size;
  287. pci_addr += table_size;
  288. remaining -= table_size;
  289. (*num)++;
  290. }
  291. if (remaining)
  292. dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
  293. (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
  294. return 0;
  295. }
  296. static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
  297. {
  298. int i;
  299. u32 val;
  300. for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
  301. struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
  302. msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG +
  303. i * PCIE_MSI_SET_OFFSET;
  304. msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG +
  305. i * PCIE_MSI_SET_OFFSET;
  306. /* Configure the MSI capture address */
  307. writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
  308. writel_relaxed(upper_32_bits(msi_set->msg_addr),
  309. pcie->base + PCIE_MSI_SET_ADDR_HI_BASE +
  310. i * PCIE_MSI_SET_ADDR_HI_OFFSET);
  311. }
  312. val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG);
  313. val |= PCIE_MSI_SET_ENABLE;
  314. writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG);
  315. val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
  316. val |= PCIE_MSI_ENABLE;
  317. writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
  318. }
  319. static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
  320. {
  321. struct resource_entry *entry;
  322. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  323. unsigned int table_index = 0;
  324. int err;
  325. u32 val;
  326. /* Set as RC mode */
  327. val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
  328. val |= PCIE_RC_MODE;
  329. writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
  330. /* Set class code */
  331. val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
  332. val &= ~GENMASK(31, 8);
  333. val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL);
  334. writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
  335. /* Mask all INTx interrupts */
  336. val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
  337. val &= ~PCIE_INTX_ENABLE;
  338. writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
  339. /* Disable DVFSRC voltage request */
  340. val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG);
  341. val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
  342. writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
  343. /* Assert all reset signals */
  344. val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
  345. val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
  346. writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
  347. /*
  348. * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
  349. * and 2.2.1 (Initial Power-Up (G3 to S0)).
  350. * The deassertion of PERST# should be delayed 100ms (TPVPERL)
  351. * for the power and clock to become stable.
  352. */
  353. msleep(100);
  354. /* De-assert reset signals */
  355. val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
  356. writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
  357. /* Check if the link is up or not */
  358. err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
  359. !!(val & PCIE_PORT_LINKUP), 20,
  360. PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
  361. if (err) {
  362. const char *ltssm_state;
  363. int ltssm_index;
  364. val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
  365. ltssm_index = PCIE_LTSSM_STATE(val);
  366. ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ?
  367. "Unknown state" : ltssm_str[ltssm_index];
  368. dev_err(pcie->dev,
  369. "PCIe link down, current LTSSM state: %s (%#x)\n",
  370. ltssm_state, val);
  371. return err;
  372. }
  373. mtk_pcie_enable_msi(pcie);
  374. /* Set PCIe translation windows */
  375. resource_list_for_each_entry(entry, &host->windows) {
  376. struct resource *res = entry->res;
  377. unsigned long type = resource_type(res);
  378. resource_size_t cpu_addr;
  379. resource_size_t pci_addr;
  380. resource_size_t size;
  381. if (type == IORESOURCE_IO)
  382. cpu_addr = pci_pio_to_address(res->start);
  383. else if (type == IORESOURCE_MEM)
  384. cpu_addr = res->start;
  385. else
  386. continue;
  387. pci_addr = res->start - entry->offset;
  388. size = resource_size(res);
  389. err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
  390. type, &table_index);
  391. if (err)
  392. return err;
  393. }
  394. return 0;
  395. }
  396. static void mtk_pcie_msi_irq_mask(struct irq_data *data)
  397. {
  398. pci_msi_mask_irq(data);
  399. irq_chip_mask_parent(data);
  400. }
  401. static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
  402. {
  403. pci_msi_unmask_irq(data);
  404. irq_chip_unmask_parent(data);
  405. }
  406. static struct irq_chip mtk_msi_irq_chip = {
  407. .irq_ack = irq_chip_ack_parent,
  408. .irq_mask = mtk_pcie_msi_irq_mask,
  409. .irq_unmask = mtk_pcie_msi_irq_unmask,
  410. .name = "MSI",
  411. };
  412. static struct msi_domain_info mtk_msi_domain_info = {
  413. .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  414. MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
  415. MSI_FLAG_MULTI_PCI_MSI,
  416. .chip = &mtk_msi_irq_chip,
  417. };
  418. static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  419. {
  420. struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
  421. struct mtk_gen3_pcie *pcie = data->domain->host_data;
  422. unsigned long hwirq;
  423. hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
  424. msg->address_hi = upper_32_bits(msi_set->msg_addr);
  425. msg->address_lo = lower_32_bits(msi_set->msg_addr);
  426. msg->data = hwirq;
  427. dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
  428. hwirq, msg->address_hi, msg->address_lo, msg->data);
  429. }
  430. static void mtk_msi_bottom_irq_ack(struct irq_data *data)
  431. {
  432. struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
  433. unsigned long hwirq;
  434. hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
  435. writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
  436. }
  437. static void mtk_msi_bottom_irq_mask(struct irq_data *data)
  438. {
  439. struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
  440. struct mtk_gen3_pcie *pcie = data->domain->host_data;
  441. unsigned long hwirq, flags;
  442. u32 val;
  443. hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
  444. raw_spin_lock_irqsave(&pcie->irq_lock, flags);
  445. val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
  446. val &= ~BIT(hwirq);
  447. writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
  448. raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
  449. }
  450. static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
  451. {
  452. struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
  453. struct mtk_gen3_pcie *pcie = data->domain->host_data;
  454. unsigned long hwirq, flags;
  455. u32 val;
  456. hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
  457. raw_spin_lock_irqsave(&pcie->irq_lock, flags);
  458. val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
  459. val |= BIT(hwirq);
  460. writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
  461. raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
  462. }
  463. static struct irq_chip mtk_msi_bottom_irq_chip = {
  464. .irq_ack = mtk_msi_bottom_irq_ack,
  465. .irq_mask = mtk_msi_bottom_irq_mask,
  466. .irq_unmask = mtk_msi_bottom_irq_unmask,
  467. .irq_compose_msi_msg = mtk_compose_msi_msg,
  468. .name = "MSI",
  469. };
  470. static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
  471. unsigned int virq, unsigned int nr_irqs,
  472. void *arg)
  473. {
  474. struct mtk_gen3_pcie *pcie = domain->host_data;
  475. struct mtk_msi_set *msi_set;
  476. int i, hwirq, set_idx;
  477. mutex_lock(&pcie->lock);
  478. hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
  479. order_base_2(nr_irqs));
  480. mutex_unlock(&pcie->lock);
  481. if (hwirq < 0)
  482. return -ENOSPC;
  483. set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
  484. msi_set = &pcie->msi_sets[set_idx];
  485. for (i = 0; i < nr_irqs; i++)
  486. irq_domain_set_info(domain, virq + i, hwirq + i,
  487. &mtk_msi_bottom_irq_chip, msi_set,
  488. handle_edge_irq, NULL, NULL);
  489. return 0;
  490. }
  491. static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
  492. unsigned int virq, unsigned int nr_irqs)
  493. {
  494. struct mtk_gen3_pcie *pcie = domain->host_data;
  495. struct irq_data *data = irq_domain_get_irq_data(domain, virq);
  496. mutex_lock(&pcie->lock);
  497. bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
  498. order_base_2(nr_irqs));
  499. mutex_unlock(&pcie->lock);
  500. irq_domain_free_irqs_common(domain, virq, nr_irqs);
  501. }
  502. static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
  503. .alloc = mtk_msi_bottom_domain_alloc,
  504. .free = mtk_msi_bottom_domain_free,
  505. };
  506. static void mtk_intx_mask(struct irq_data *data)
  507. {
  508. struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
  509. unsigned long flags;
  510. u32 val;
  511. raw_spin_lock_irqsave(&pcie->irq_lock, flags);
  512. val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
  513. val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
  514. writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
  515. raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
  516. }
  517. static void mtk_intx_unmask(struct irq_data *data)
  518. {
  519. struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
  520. unsigned long flags;
  521. u32 val;
  522. raw_spin_lock_irqsave(&pcie->irq_lock, flags);
  523. val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
  524. val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
  525. writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
  526. raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
  527. }
  528. /**
  529. * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
  530. * @data: pointer to chip specific data
  531. *
  532. * As an emulated level IRQ, its interrupt status will remain
  533. * until the corresponding de-assert message is received; hence that
  534. * the status can only be cleared when the interrupt has been serviced.
  535. */
  536. static void mtk_intx_eoi(struct irq_data *data)
  537. {
  538. struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
  539. unsigned long hwirq;
  540. hwirq = data->hwirq + PCIE_INTX_SHIFT;
  541. writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG);
  542. }
  543. static struct irq_chip mtk_intx_irq_chip = {
  544. .irq_mask = mtk_intx_mask,
  545. .irq_unmask = mtk_intx_unmask,
  546. .irq_eoi = mtk_intx_eoi,
  547. .name = "INTx",
  548. };
  549. static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
  550. irq_hw_number_t hwirq)
  551. {
  552. irq_set_chip_data(irq, domain->host_data);
  553. irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
  554. handle_fasteoi_irq, "INTx");
  555. return 0;
  556. }
  557. static const struct irq_domain_ops intx_domain_ops = {
  558. .map = mtk_pcie_intx_map,
  559. };
  560. static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
  561. {
  562. struct device *dev = pcie->dev;
  563. struct device_node *intc_node, *node = dev->of_node;
  564. int ret;
  565. raw_spin_lock_init(&pcie->irq_lock);
  566. /* Setup INTx */
  567. intc_node = of_get_child_by_name(node, "interrupt-controller");
  568. if (!intc_node) {
  569. dev_err(dev, "missing interrupt-controller node\n");
  570. return -ENODEV;
  571. }
  572. pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
  573. &intx_domain_ops, pcie);
  574. if (!pcie->intx_domain) {
  575. dev_err(dev, "failed to create INTx IRQ domain\n");
  576. ret = -ENODEV;
  577. goto out_put_node;
  578. }
  579. /* Setup MSI */
  580. mutex_init(&pcie->lock);
  581. pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
  582. &mtk_msi_bottom_domain_ops, pcie);
  583. if (!pcie->msi_bottom_domain) {
  584. dev_err(dev, "failed to create MSI bottom domain\n");
  585. ret = -ENODEV;
  586. goto err_msi_bottom_domain;
  587. }
  588. pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
  589. &mtk_msi_domain_info,
  590. pcie->msi_bottom_domain);
  591. if (!pcie->msi_domain) {
  592. dev_err(dev, "failed to create MSI domain\n");
  593. ret = -ENODEV;
  594. goto err_msi_domain;
  595. }
  596. of_node_put(intc_node);
  597. return 0;
  598. err_msi_domain:
  599. irq_domain_remove(pcie->msi_bottom_domain);
  600. err_msi_bottom_domain:
  601. irq_domain_remove(pcie->intx_domain);
  602. out_put_node:
  603. of_node_put(intc_node);
  604. return ret;
  605. }
  606. static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
  607. {
  608. irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
  609. if (pcie->intx_domain)
  610. irq_domain_remove(pcie->intx_domain);
  611. if (pcie->msi_domain)
  612. irq_domain_remove(pcie->msi_domain);
  613. if (pcie->msi_bottom_domain)
  614. irq_domain_remove(pcie->msi_bottom_domain);
  615. irq_dispose_mapping(pcie->irq);
  616. }
  617. static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx)
  618. {
  619. struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx];
  620. unsigned long msi_enable, msi_status;
  621. irq_hw_number_t bit, hwirq;
  622. msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
  623. do {
  624. msi_status = readl_relaxed(msi_set->base +
  625. PCIE_MSI_SET_STATUS_OFFSET);
  626. msi_status &= msi_enable;
  627. if (!msi_status)
  628. break;
  629. for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
  630. hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
  631. generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq);
  632. }
  633. } while (true);
  634. }
  635. static void mtk_pcie_irq_handler(struct irq_desc *desc)
  636. {
  637. struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc);
  638. struct irq_chip *irqchip = irq_desc_get_chip(desc);
  639. unsigned long status;
  640. irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
  641. chained_irq_enter(irqchip, desc);
  642. status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG);
  643. for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
  644. PCIE_INTX_SHIFT)
  645. generic_handle_domain_irq(pcie->intx_domain,
  646. irq_bit - PCIE_INTX_SHIFT);
  647. irq_bit = PCIE_MSI_SHIFT;
  648. for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
  649. PCIE_MSI_SHIFT) {
  650. mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT);
  651. writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG);
  652. }
  653. chained_irq_exit(irqchip, desc);
  654. }
  655. static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
  656. {
  657. struct device *dev = pcie->dev;
  658. struct platform_device *pdev = to_platform_device(dev);
  659. int err;
  660. err = mtk_pcie_init_irq_domains(pcie);
  661. if (err)
  662. return err;
  663. pcie->irq = platform_get_irq(pdev, 0);
  664. if (pcie->irq < 0)
  665. return pcie->irq;
  666. irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie);
  667. return 0;
  668. }
  669. static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
  670. {
  671. int i, ret, num_resets = pcie->soc->phy_resets.num_resets;
  672. struct device *dev = pcie->dev;
  673. struct platform_device *pdev = to_platform_device(dev);
  674. struct resource *regs;
  675. regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
  676. if (!regs)
  677. return -EINVAL;
  678. pcie->base = devm_ioremap_resource(dev, regs);
  679. if (IS_ERR(pcie->base)) {
  680. dev_err(dev, "failed to map register base\n");
  681. return PTR_ERR(pcie->base);
  682. }
  683. pcie->reg_base = regs->start;
  684. for (i = 0; i < num_resets; i++)
  685. pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
  686. ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets);
  687. if (ret) {
  688. dev_err(dev, "failed to get PHY bulk reset\n");
  689. return ret;
  690. }
  691. pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
  692. if (IS_ERR(pcie->mac_reset)) {
  693. ret = PTR_ERR(pcie->mac_reset);
  694. if (ret != -EPROBE_DEFER)
  695. dev_err(dev, "failed to get MAC reset\n");
  696. return ret;
  697. }
  698. pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
  699. if (IS_ERR(pcie->phy)) {
  700. ret = PTR_ERR(pcie->phy);
  701. if (ret != -EPROBE_DEFER)
  702. dev_err(dev, "failed to get PHY\n");
  703. return ret;
  704. }
  705. pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
  706. if (pcie->num_clks < 0) {
  707. dev_err(dev, "failed to get clocks\n");
  708. return pcie->num_clks;
  709. }
  710. return 0;
  711. }
  712. static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
  713. {
  714. struct device *dev = pcie->dev;
  715. int err;
  716. u32 val;
  717. /*
  718. * Wait for the time needed to complete the bulk assert in
  719. * mtk_pcie_setup for EN7581 SoC.
  720. */
  721. mdelay(PCIE_EN7581_RESET_TIME_MS);
  722. err = phy_init(pcie->phy);
  723. if (err) {
  724. dev_err(dev, "failed to initialize PHY\n");
  725. return err;
  726. }
  727. err = phy_power_on(pcie->phy);
  728. if (err) {
  729. dev_err(dev, "failed to power on PHY\n");
  730. goto err_phy_on;
  731. }
  732. err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
  733. if (err) {
  734. dev_err(dev, "failed to deassert PHYs\n");
  735. goto err_phy_deassert;
  736. }
  737. /*
  738. * Wait for the time needed to complete the bulk de-assert above.
  739. * This time is specific for EN7581 SoC.
  740. */
  741. mdelay(PCIE_EN7581_RESET_TIME_MS);
  742. pm_runtime_enable(dev);
  743. pm_runtime_get_sync(dev);
  744. err = clk_bulk_prepare(pcie->num_clks, pcie->clks);
  745. if (err) {
  746. dev_err(dev, "failed to prepare clock\n");
  747. goto err_clk_prepare;
  748. }
  749. val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
  750. FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
  751. FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
  752. FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41);
  753. writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG);
  754. val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT |
  755. FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) |
  756. FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) |
  757. FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
  758. writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
  759. err = clk_bulk_enable(pcie->num_clks, pcie->clks);
  760. if (err) {
  761. dev_err(dev, "failed to prepare clock\n");
  762. goto err_clk_enable;
  763. }
  764. return 0;
  765. err_clk_enable:
  766. clk_bulk_unprepare(pcie->num_clks, pcie->clks);
  767. err_clk_prepare:
  768. pm_runtime_put_sync(dev);
  769. pm_runtime_disable(dev);
  770. reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
  771. err_phy_deassert:
  772. phy_power_off(pcie->phy);
  773. err_phy_on:
  774. phy_exit(pcie->phy);
  775. return err;
  776. }
  777. static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
  778. {
  779. struct device *dev = pcie->dev;
  780. int err;
  781. /* PHY power on and enable pipe clock */
  782. err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
  783. if (err) {
  784. dev_err(dev, "failed to deassert PHYs\n");
  785. return err;
  786. }
  787. err = phy_init(pcie->phy);
  788. if (err) {
  789. dev_err(dev, "failed to initialize PHY\n");
  790. goto err_phy_init;
  791. }
  792. err = phy_power_on(pcie->phy);
  793. if (err) {
  794. dev_err(dev, "failed to power on PHY\n");
  795. goto err_phy_on;
  796. }
  797. /* MAC power on and enable transaction layer clocks */
  798. reset_control_deassert(pcie->mac_reset);
  799. pm_runtime_enable(dev);
  800. pm_runtime_get_sync(dev);
  801. err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
  802. if (err) {
  803. dev_err(dev, "failed to enable clocks\n");
  804. goto err_clk_init;
  805. }
  806. return 0;
  807. err_clk_init:
  808. pm_runtime_put_sync(dev);
  809. pm_runtime_disable(dev);
  810. reset_control_assert(pcie->mac_reset);
  811. phy_power_off(pcie->phy);
  812. err_phy_on:
  813. phy_exit(pcie->phy);
  814. err_phy_init:
  815. reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
  816. return err;
  817. }
  818. static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
  819. {
  820. clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
  821. pm_runtime_put_sync(pcie->dev);
  822. pm_runtime_disable(pcie->dev);
  823. reset_control_assert(pcie->mac_reset);
  824. phy_power_off(pcie->phy);
  825. phy_exit(pcie->phy);
  826. reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
  827. }
  828. static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
  829. {
  830. int err;
  831. err = mtk_pcie_parse_port(pcie);
  832. if (err)
  833. return err;
  834. /*
  835. * Deassert the line in order to avoid unbalance in deassert_count
  836. * counter since the bulk is shared.
  837. */
  838. reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
  839. /*
  840. * The controller may have been left out of reset by the bootloader
  841. * so make sure that we get a clean start by asserting resets here.
  842. */
  843. reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
  844. reset_control_assert(pcie->mac_reset);
  845. usleep_range(10, 20);
  846. /* Don't touch the hardware registers before power up */
  847. err = pcie->soc->power_up(pcie);
  848. if (err)
  849. return err;
  850. /* Try link up */
  851. err = mtk_pcie_startup_port(pcie);
  852. if (err)
  853. goto err_setup;
  854. err = mtk_pcie_setup_irq(pcie);
  855. if (err)
  856. goto err_setup;
  857. return 0;
  858. err_setup:
  859. mtk_pcie_power_down(pcie);
  860. return err;
  861. }
  862. static int mtk_pcie_probe(struct platform_device *pdev)
  863. {
  864. struct device *dev = &pdev->dev;
  865. struct mtk_gen3_pcie *pcie;
  866. struct pci_host_bridge *host;
  867. int err;
  868. host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
  869. if (!host)
  870. return -ENOMEM;
  871. pcie = pci_host_bridge_priv(host);
  872. pcie->dev = dev;
  873. pcie->soc = device_get_match_data(dev);
  874. platform_set_drvdata(pdev, pcie);
  875. err = mtk_pcie_setup(pcie);
  876. if (err)
  877. return err;
  878. host->ops = &mtk_pcie_ops;
  879. host->sysdata = pcie;
  880. err = pci_host_probe(host);
  881. if (err) {
  882. mtk_pcie_irq_teardown(pcie);
  883. mtk_pcie_power_down(pcie);
  884. return err;
  885. }
  886. return 0;
  887. }
  888. static void mtk_pcie_remove(struct platform_device *pdev)
  889. {
  890. struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev);
  891. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  892. pci_lock_rescan_remove();
  893. pci_stop_root_bus(host->bus);
  894. pci_remove_root_bus(host->bus);
  895. pci_unlock_rescan_remove();
  896. mtk_pcie_irq_teardown(pcie);
  897. mtk_pcie_power_down(pcie);
  898. }
  899. static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
  900. {
  901. int i;
  902. raw_spin_lock(&pcie->irq_lock);
  903. pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
  904. for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
  905. struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
  906. msi_set->saved_irq_state = readl_relaxed(msi_set->base +
  907. PCIE_MSI_SET_ENABLE_OFFSET);
  908. }
  909. raw_spin_unlock(&pcie->irq_lock);
  910. }
  911. static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
  912. {
  913. int i;
  914. raw_spin_lock(&pcie->irq_lock);
  915. writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG);
  916. for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
  917. struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
  918. writel_relaxed(msi_set->saved_irq_state,
  919. msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
  920. }
  921. raw_spin_unlock(&pcie->irq_lock);
  922. }
  923. static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
  924. {
  925. u32 val;
  926. val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG);
  927. val |= PCIE_TURN_OFF_LINK;
  928. writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG);
  929. /* Check the link is L2 */
  930. return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val,
  931. (PCIE_LTSSM_STATE(val) ==
  932. PCIE_LTSSM_STATE_L2_IDLE), 20,
  933. 50 * USEC_PER_MSEC);
  934. }
  935. static int mtk_pcie_suspend_noirq(struct device *dev)
  936. {
  937. struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
  938. int err;
  939. u32 val;
  940. /* Trigger link to L2 state */
  941. err = mtk_pcie_turn_off_link(pcie);
  942. if (err) {
  943. dev_err(pcie->dev, "cannot enter L2 state\n");
  944. return err;
  945. }
  946. /* Pull down the PERST# pin */
  947. val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
  948. val |= PCIE_PE_RSTB;
  949. writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
  950. dev_dbg(pcie->dev, "entered L2 states successfully");
  951. mtk_pcie_irq_save(pcie);
  952. mtk_pcie_power_down(pcie);
  953. return 0;
  954. }
  955. static int mtk_pcie_resume_noirq(struct device *dev)
  956. {
  957. struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
  958. int err;
  959. err = pcie->soc->power_up(pcie);
  960. if (err)
  961. return err;
  962. err = mtk_pcie_startup_port(pcie);
  963. if (err) {
  964. mtk_pcie_power_down(pcie);
  965. return err;
  966. }
  967. mtk_pcie_irq_restore(pcie);
  968. return 0;
  969. }
  970. static const struct dev_pm_ops mtk_pcie_pm_ops = {
  971. NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
  972. mtk_pcie_resume_noirq)
  973. };
  974. static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
  975. .power_up = mtk_pcie_power_up,
  976. .phy_resets = {
  977. .id[0] = "phy",
  978. .num_resets = 1,
  979. },
  980. };
  981. static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
  982. .power_up = mtk_pcie_en7581_power_up,
  983. .phy_resets = {
  984. .id[0] = "phy-lane0",
  985. .id[1] = "phy-lane1",
  986. .id[2] = "phy-lane2",
  987. .num_resets = 3,
  988. },
  989. };
  990. static const struct of_device_id mtk_pcie_of_match[] = {
  991. { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
  992. { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
  993. {},
  994. };
  995. MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
  996. static struct platform_driver mtk_pcie_driver = {
  997. .probe = mtk_pcie_probe,
  998. .remove_new = mtk_pcie_remove,
  999. .driver = {
  1000. .name = "mtk-pcie-gen3",
  1001. .of_match_table = mtk_pcie_of_match,
  1002. .pm = &mtk_pcie_pm_ops,
  1003. },
  1004. };
  1005. module_platform_driver(mtk_pcie_driver);
  1006. MODULE_DESCRIPTION("MediaTek Gen3 PCIe host controller driver");
  1007. MODULE_LICENSE("GPL v2");