pci-imx6.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCIe host controller driver for Freescale i.MX6 SoCs
  4. *
  5. * Copyright (C) 2013 Kosagi
  6. * http://www.kosagi.com
  7. *
  8. * Author: Sean Cross <xobs@kosagi.com>
  9. */
  10. #include <linux/clk.h>
  11. #include <linux/delay.h>
  12. #include <linux/gpio.h>
  13. #include <linux/kernel.h>
  14. #include <linux/mfd/syscon.h>
  15. #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  16. #include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
  17. #include <linux/module.h>
  18. #include <linux/of_gpio.h>
  19. #include <linux/of_device.h>
  20. #include <linux/pci.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/regmap.h>
  23. #include <linux/regulator/consumer.h>
  24. #include <linux/resource.h>
  25. #include <linux/signal.h>
  26. #include <linux/types.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/reset.h>
  29. #include "pcie-designware.h"
  30. #define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
  31. enum imx6_pcie_variants {
  32. IMX6Q,
  33. IMX6SX,
  34. IMX6QP,
  35. IMX7D,
  36. };
  37. struct imx6_pcie {
  38. struct dw_pcie *pci;
  39. int reset_gpio;
  40. bool gpio_active_high;
  41. struct clk *pcie_bus;
  42. struct clk *pcie_phy;
  43. struct clk *pcie_inbound_axi;
  44. struct clk *pcie;
  45. struct regmap *iomuxc_gpr;
  46. struct reset_control *pciephy_reset;
  47. struct reset_control *apps_reset;
  48. enum imx6_pcie_variants variant;
  49. u32 tx_deemph_gen1;
  50. u32 tx_deemph_gen2_3p5db;
  51. u32 tx_deemph_gen2_6db;
  52. u32 tx_swing_full;
  53. u32 tx_swing_low;
  54. int link_gen;
  55. struct regulator *vpcie;
  56. };
  57. /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
  58. #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000
  59. #define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50
  60. #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
  61. /* PCIe Root Complex registers (memory-mapped) */
  62. #define PCIE_RC_IMX6_MSI_CAP 0x50
  63. #define PCIE_RC_LCR 0x7c
  64. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
  65. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
  66. #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
  67. #define PCIE_RC_LCSR 0x80
  68. /* PCIe Port Logic registers (memory-mapped) */
  69. #define PL_OFFSET 0x700
  70. #define PCIE_PL_PFLR (PL_OFFSET + 0x08)
  71. #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
  72. #define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
  73. #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
  74. #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
  75. #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
  76. #define PCIE_PHY_CTRL_DATA_LOC 0
  77. #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
  78. #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
  79. #define PCIE_PHY_CTRL_WR_LOC 18
  80. #define PCIE_PHY_CTRL_RD_LOC 19
  81. #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
  82. #define PCIE_PHY_STAT_ACK_LOC 16
  83. #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
  84. #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
  85. /* PHY registers (not memory-mapped) */
  86. #define PCIE_PHY_RX_ASIC_OUT 0x100D
  87. #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
  88. #define PHY_RX_OVRD_IN_LO 0x1005
  89. #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
  90. #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
  91. static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
  92. {
  93. struct dw_pcie *pci = imx6_pcie->pci;
  94. u32 val;
  95. u32 max_iterations = 10;
  96. u32 wait_counter = 0;
  97. do {
  98. val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
  99. val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
  100. wait_counter++;
  101. if (val == exp_val)
  102. return 0;
  103. udelay(1);
  104. } while (wait_counter < max_iterations);
  105. return -ETIMEDOUT;
  106. }
  107. static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
  108. {
  109. struct dw_pcie *pci = imx6_pcie->pci;
  110. u32 val;
  111. int ret;
  112. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  113. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
  114. val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
  115. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
  116. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  117. if (ret)
  118. return ret;
  119. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  120. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
  121. return pcie_phy_poll_ack(imx6_pcie, 0);
  122. }
  123. /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
  124. static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
  125. {
  126. struct dw_pcie *pci = imx6_pcie->pci;
  127. u32 val, phy_ctl;
  128. int ret;
  129. ret = pcie_phy_wait_ack(imx6_pcie, addr);
  130. if (ret)
  131. return ret;
  132. /* assert Read signal */
  133. phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
  134. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
  135. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  136. if (ret)
  137. return ret;
  138. val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
  139. *data = val & 0xffff;
  140. /* deassert Read signal */
  141. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
  142. return pcie_phy_poll_ack(imx6_pcie, 0);
  143. }
  144. static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
  145. {
  146. struct dw_pcie *pci = imx6_pcie->pci;
  147. u32 var;
  148. int ret;
  149. /* write addr */
  150. /* cap addr */
  151. ret = pcie_phy_wait_ack(imx6_pcie, addr);
  152. if (ret)
  153. return ret;
  154. var = data << PCIE_PHY_CTRL_DATA_LOC;
  155. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  156. /* capture data */
  157. var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
  158. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  159. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  160. if (ret)
  161. return ret;
  162. /* deassert cap data */
  163. var = data << PCIE_PHY_CTRL_DATA_LOC;
  164. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  165. /* wait for ack de-assertion */
  166. ret = pcie_phy_poll_ack(imx6_pcie, 0);
  167. if (ret)
  168. return ret;
  169. /* assert wr signal */
  170. var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
  171. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  172. /* wait for ack */
  173. ret = pcie_phy_poll_ack(imx6_pcie, 1);
  174. if (ret)
  175. return ret;
  176. /* deassert wr signal */
  177. var = data << PCIE_PHY_CTRL_DATA_LOC;
  178. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
  179. /* wait for ack de-assertion */
  180. ret = pcie_phy_poll_ack(imx6_pcie, 0);
  181. if (ret)
  182. return ret;
  183. dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
  184. return 0;
  185. }
  186. static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
  187. {
  188. u32 tmp;
  189. pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
  190. tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  191. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  192. pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
  193. usleep_range(2000, 3000);
  194. pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
  195. tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
  196. PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  197. pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
  198. }
  199. /* Added for PCI abort handling */
  200. static int imx6q_pcie_abort_handler(unsigned long addr,
  201. unsigned int fsr, struct pt_regs *regs)
  202. {
  203. unsigned long pc = instruction_pointer(regs);
  204. unsigned long instr = *(unsigned long *)pc;
  205. int reg = (instr >> 12) & 15;
  206. /*
  207. * If the instruction being executed was a read,
  208. * make it look like it read all-ones.
  209. */
  210. if ((instr & 0x0c100000) == 0x04100000) {
  211. unsigned long val;
  212. if (instr & 0x00400000)
  213. val = 255;
  214. else
  215. val = -1;
  216. regs->uregs[reg] = val;
  217. regs->ARM_pc += 4;
  218. return 0;
  219. }
  220. if ((instr & 0x0e100090) == 0x00100090) {
  221. regs->uregs[reg] = -1;
  222. regs->ARM_pc += 4;
  223. return 0;
  224. }
  225. return 1;
  226. }
  227. static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
  228. {
  229. struct device *dev = imx6_pcie->pci->dev;
  230. switch (imx6_pcie->variant) {
  231. case IMX7D:
  232. reset_control_assert(imx6_pcie->pciephy_reset);
  233. reset_control_assert(imx6_pcie->apps_reset);
  234. break;
  235. case IMX6SX:
  236. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  237. IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
  238. IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
  239. /* Force PCIe PHY reset */
  240. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
  241. IMX6SX_GPR5_PCIE_BTNRST_RESET,
  242. IMX6SX_GPR5_PCIE_BTNRST_RESET);
  243. break;
  244. case IMX6QP:
  245. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  246. IMX6Q_GPR1_PCIE_SW_RST,
  247. IMX6Q_GPR1_PCIE_SW_RST);
  248. break;
  249. case IMX6Q:
  250. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  251. IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
  252. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  253. IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
  254. break;
  255. }
  256. if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
  257. int ret = regulator_disable(imx6_pcie->vpcie);
  258. if (ret)
  259. dev_err(dev, "failed to disable vpcie regulator: %d\n",
  260. ret);
  261. }
  262. }
  263. static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
  264. {
  265. struct dw_pcie *pci = imx6_pcie->pci;
  266. struct device *dev = pci->dev;
  267. int ret = 0;
  268. switch (imx6_pcie->variant) {
  269. case IMX6SX:
  270. ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
  271. if (ret) {
  272. dev_err(dev, "unable to enable pcie_axi clock\n");
  273. break;
  274. }
  275. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  276. IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
  277. break;
  278. case IMX6QP: /* FALLTHROUGH */
  279. case IMX6Q:
  280. /* power up core phy and enable ref clock */
  281. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  282. IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
  283. /*
  284. * the async reset input need ref clock to sync internally,
  285. * when the ref clock comes after reset, internal synced
  286. * reset time is too short, cannot meet the requirement.
  287. * add one ~10us delay here.
  288. */
  289. udelay(10);
  290. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  291. IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
  292. break;
  293. case IMX7D:
  294. break;
  295. }
  296. return ret;
  297. }
  298. static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
  299. {
  300. u32 val;
  301. unsigned int retries;
  302. struct device *dev = imx6_pcie->pci->dev;
  303. for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
  304. regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
  305. if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
  306. return;
  307. usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
  308. PHY_PLL_LOCK_WAIT_USLEEP_MAX);
  309. }
  310. dev_err(dev, "PCIe PLL lock timeout\n");
  311. }
  312. static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
  313. {
  314. struct dw_pcie *pci = imx6_pcie->pci;
  315. struct device *dev = pci->dev;
  316. int ret;
  317. if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
  318. ret = regulator_enable(imx6_pcie->vpcie);
  319. if (ret) {
  320. dev_err(dev, "failed to enable vpcie regulator: %d\n",
  321. ret);
  322. return;
  323. }
  324. }
  325. ret = clk_prepare_enable(imx6_pcie->pcie_phy);
  326. if (ret) {
  327. dev_err(dev, "unable to enable pcie_phy clock\n");
  328. goto err_pcie_phy;
  329. }
  330. ret = clk_prepare_enable(imx6_pcie->pcie_bus);
  331. if (ret) {
  332. dev_err(dev, "unable to enable pcie_bus clock\n");
  333. goto err_pcie_bus;
  334. }
  335. ret = clk_prepare_enable(imx6_pcie->pcie);
  336. if (ret) {
  337. dev_err(dev, "unable to enable pcie clock\n");
  338. goto err_pcie;
  339. }
  340. ret = imx6_pcie_enable_ref_clk(imx6_pcie);
  341. if (ret) {
  342. dev_err(dev, "unable to enable pcie ref clock\n");
  343. goto err_ref_clk;
  344. }
  345. /* allow the clocks to stabilize */
  346. usleep_range(200, 500);
  347. /* Some boards don't have PCIe reset GPIO. */
  348. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  349. gpio_set_value_cansleep(imx6_pcie->reset_gpio,
  350. imx6_pcie->gpio_active_high);
  351. msleep(100);
  352. gpio_set_value_cansleep(imx6_pcie->reset_gpio,
  353. !imx6_pcie->gpio_active_high);
  354. }
  355. switch (imx6_pcie->variant) {
  356. case IMX7D:
  357. reset_control_deassert(imx6_pcie->pciephy_reset);
  358. imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
  359. break;
  360. case IMX6SX:
  361. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
  362. IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
  363. break;
  364. case IMX6QP:
  365. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  366. IMX6Q_GPR1_PCIE_SW_RST, 0);
  367. usleep_range(200, 500);
  368. break;
  369. case IMX6Q: /* Nothing to do */
  370. break;
  371. }
  372. return;
  373. err_ref_clk:
  374. clk_disable_unprepare(imx6_pcie->pcie);
  375. err_pcie:
  376. clk_disable_unprepare(imx6_pcie->pcie_bus);
  377. err_pcie_bus:
  378. clk_disable_unprepare(imx6_pcie->pcie_phy);
  379. err_pcie_phy:
  380. if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
  381. ret = regulator_disable(imx6_pcie->vpcie);
  382. if (ret)
  383. dev_err(dev, "failed to disable vpcie regulator: %d\n",
  384. ret);
  385. }
  386. }
  387. static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
  388. {
  389. switch (imx6_pcie->variant) {
  390. case IMX7D:
  391. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  392. IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
  393. break;
  394. case IMX6SX:
  395. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  396. IMX6SX_GPR12_PCIE_RX_EQ_MASK,
  397. IMX6SX_GPR12_PCIE_RX_EQ_2);
  398. /* FALLTHROUGH */
  399. default:
  400. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  401. IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
  402. /* configure constant input signal to the pcie ctrl and phy */
  403. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  404. IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
  405. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  406. IMX6Q_GPR8_TX_DEEMPH_GEN1,
  407. imx6_pcie->tx_deemph_gen1 << 0);
  408. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  409. IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
  410. imx6_pcie->tx_deemph_gen2_3p5db << 6);
  411. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  412. IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
  413. imx6_pcie->tx_deemph_gen2_6db << 12);
  414. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  415. IMX6Q_GPR8_TX_SWING_FULL,
  416. imx6_pcie->tx_swing_full << 18);
  417. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  418. IMX6Q_GPR8_TX_SWING_LOW,
  419. imx6_pcie->tx_swing_low << 25);
  420. break;
  421. }
  422. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  423. IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
  424. }
  425. static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
  426. {
  427. struct dw_pcie *pci = imx6_pcie->pci;
  428. struct device *dev = pci->dev;
  429. /* check if the link is up or not */
  430. if (!dw_pcie_wait_for_link(pci))
  431. return 0;
  432. dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
  433. dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
  434. dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
  435. return -ETIMEDOUT;
  436. }
  437. static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
  438. {
  439. struct dw_pcie *pci = imx6_pcie->pci;
  440. struct device *dev = pci->dev;
  441. u32 tmp;
  442. unsigned int retries;
  443. for (retries = 0; retries < 200; retries++) {
  444. tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
  445. /* Test if the speed change finished. */
  446. if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
  447. return 0;
  448. usleep_range(100, 1000);
  449. }
  450. dev_err(dev, "Speed change timeout\n");
  451. return -EINVAL;
  452. }
  453. static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
  454. {
  455. struct dw_pcie *pci = imx6_pcie->pci;
  456. struct device *dev = pci->dev;
  457. u32 tmp;
  458. int ret;
  459. /*
  460. * Force Gen1 operation when starting the link. In case the link is
  461. * started in Gen2 mode, there is a possibility the devices on the
  462. * bus will not be detected at all. This happens with PCIe switches.
  463. */
  464. tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
  465. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  466. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
  467. dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
  468. /* Start LTSSM. */
  469. if (imx6_pcie->variant == IMX7D)
  470. reset_control_deassert(imx6_pcie->apps_reset);
  471. else
  472. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  473. IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
  474. ret = imx6_pcie_wait_for_link(imx6_pcie);
  475. if (ret)
  476. goto err_reset_phy;
  477. if (imx6_pcie->link_gen == 2) {
  478. /* Allow Gen2 mode after the link is up. */
  479. tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
  480. tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
  481. tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
  482. dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
  483. /*
  484. * Start Directed Speed Change so the best possible
  485. * speed both link partners support can be negotiated.
  486. */
  487. tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
  488. tmp |= PORT_LOGIC_SPEED_CHANGE;
  489. dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
  490. if (imx6_pcie->variant != IMX7D) {
  491. /*
  492. * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
  493. * from i.MX6 family when no link speed transition
  494. * occurs and we go Gen1 -> yep, Gen1. The difference
  495. * is that, in such case, it will not be cleared by HW
  496. * which will cause the following code to report false
  497. * failure.
  498. */
  499. ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
  500. if (ret) {
  501. dev_err(dev, "Failed to bring link up!\n");
  502. goto err_reset_phy;
  503. }
  504. }
  505. /* Make sure link training is finished as well! */
  506. ret = imx6_pcie_wait_for_link(imx6_pcie);
  507. if (ret) {
  508. dev_err(dev, "Failed to bring link up!\n");
  509. goto err_reset_phy;
  510. }
  511. } else {
  512. dev_info(dev, "Link: Gen2 disabled\n");
  513. }
  514. tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
  515. dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
  516. return 0;
  517. err_reset_phy:
  518. dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
  519. dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
  520. dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
  521. imx6_pcie_reset_phy(imx6_pcie);
  522. return ret;
  523. }
  524. static int imx6_pcie_host_init(struct pcie_port *pp)
  525. {
  526. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  527. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
  528. imx6_pcie_assert_core_reset(imx6_pcie);
  529. imx6_pcie_init_phy(imx6_pcie);
  530. imx6_pcie_deassert_core_reset(imx6_pcie);
  531. dw_pcie_setup_rc(pp);
  532. imx6_pcie_establish_link(imx6_pcie);
  533. if (IS_ENABLED(CONFIG_PCI_MSI))
  534. dw_pcie_msi_init(pp);
  535. return 0;
  536. }
  537. static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
  538. .host_init = imx6_pcie_host_init,
  539. };
  540. static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
  541. struct platform_device *pdev)
  542. {
  543. struct dw_pcie *pci = imx6_pcie->pci;
  544. struct pcie_port *pp = &pci->pp;
  545. struct device *dev = &pdev->dev;
  546. int ret;
  547. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  548. pp->msi_irq = platform_get_irq_byname(pdev, "msi");
  549. if (pp->msi_irq <= 0) {
  550. dev_err(dev, "failed to get MSI irq\n");
  551. return -ENODEV;
  552. }
  553. }
  554. pp->ops = &imx6_pcie_host_ops;
  555. ret = dw_pcie_host_init(pp);
  556. if (ret) {
  557. dev_err(dev, "failed to initialize host\n");
  558. return ret;
  559. }
  560. return 0;
  561. }
  562. static const struct dw_pcie_ops dw_pcie_ops = {
  563. /* No special ops needed, but pcie-designware still expects this struct */
  564. };
  565. static int imx6_pcie_probe(struct platform_device *pdev)
  566. {
  567. struct device *dev = &pdev->dev;
  568. struct dw_pcie *pci;
  569. struct imx6_pcie *imx6_pcie;
  570. struct resource *dbi_base;
  571. struct device_node *node = dev->of_node;
  572. int ret;
  573. u16 val;
  574. imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
  575. if (!imx6_pcie)
  576. return -ENOMEM;
  577. pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
  578. if (!pci)
  579. return -ENOMEM;
  580. pci->dev = dev;
  581. pci->ops = &dw_pcie_ops;
  582. imx6_pcie->pci = pci;
  583. imx6_pcie->variant =
  584. (enum imx6_pcie_variants)of_device_get_match_data(dev);
  585. dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  586. pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
  587. if (IS_ERR(pci->dbi_base))
  588. return PTR_ERR(pci->dbi_base);
  589. /* Fetch GPIOs */
  590. imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
  591. imx6_pcie->gpio_active_high = of_property_read_bool(node,
  592. "reset-gpio-active-high");
  593. if (gpio_is_valid(imx6_pcie->reset_gpio)) {
  594. ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
  595. imx6_pcie->gpio_active_high ?
  596. GPIOF_OUT_INIT_HIGH :
  597. GPIOF_OUT_INIT_LOW,
  598. "PCIe reset");
  599. if (ret) {
  600. dev_err(dev, "unable to get reset gpio\n");
  601. return ret;
  602. }
  603. } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
  604. return imx6_pcie->reset_gpio;
  605. }
  606. /* Fetch clocks */
  607. imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
  608. if (IS_ERR(imx6_pcie->pcie_phy)) {
  609. dev_err(dev, "pcie_phy clock source missing or invalid\n");
  610. return PTR_ERR(imx6_pcie->pcie_phy);
  611. }
  612. imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
  613. if (IS_ERR(imx6_pcie->pcie_bus)) {
  614. dev_err(dev, "pcie_bus clock source missing or invalid\n");
  615. return PTR_ERR(imx6_pcie->pcie_bus);
  616. }
  617. imx6_pcie->pcie = devm_clk_get(dev, "pcie");
  618. if (IS_ERR(imx6_pcie->pcie)) {
  619. dev_err(dev, "pcie clock source missing or invalid\n");
  620. return PTR_ERR(imx6_pcie->pcie);
  621. }
  622. switch (imx6_pcie->variant) {
  623. case IMX6SX:
  624. imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
  625. "pcie_inbound_axi");
  626. if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
  627. dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
  628. return PTR_ERR(imx6_pcie->pcie_inbound_axi);
  629. }
  630. break;
  631. case IMX7D:
  632. imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
  633. "pciephy");
  634. if (IS_ERR(imx6_pcie->pciephy_reset)) {
  635. dev_err(dev, "Failed to get PCIEPHY reset control\n");
  636. return PTR_ERR(imx6_pcie->pciephy_reset);
  637. }
  638. imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
  639. "apps");
  640. if (IS_ERR(imx6_pcie->apps_reset)) {
  641. dev_err(dev, "Failed to get PCIE APPS reset control\n");
  642. return PTR_ERR(imx6_pcie->apps_reset);
  643. }
  644. break;
  645. default:
  646. break;
  647. }
  648. /* Grab GPR config register range */
  649. imx6_pcie->iomuxc_gpr =
  650. syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
  651. if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
  652. dev_err(dev, "unable to find iomuxc registers\n");
  653. return PTR_ERR(imx6_pcie->iomuxc_gpr);
  654. }
  655. /* Grab PCIe PHY Tx Settings */
  656. if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
  657. &imx6_pcie->tx_deemph_gen1))
  658. imx6_pcie->tx_deemph_gen1 = 0;
  659. if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
  660. &imx6_pcie->tx_deemph_gen2_3p5db))
  661. imx6_pcie->tx_deemph_gen2_3p5db = 0;
  662. if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
  663. &imx6_pcie->tx_deemph_gen2_6db))
  664. imx6_pcie->tx_deemph_gen2_6db = 20;
  665. if (of_property_read_u32(node, "fsl,tx-swing-full",
  666. &imx6_pcie->tx_swing_full))
  667. imx6_pcie->tx_swing_full = 127;
  668. if (of_property_read_u32(node, "fsl,tx-swing-low",
  669. &imx6_pcie->tx_swing_low))
  670. imx6_pcie->tx_swing_low = 127;
  671. /* Limit link speed */
  672. ret = of_property_read_u32(node, "fsl,max-link-speed",
  673. &imx6_pcie->link_gen);
  674. if (ret)
  675. imx6_pcie->link_gen = 1;
  676. imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
  677. if (IS_ERR(imx6_pcie->vpcie)) {
  678. if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
  679. return PTR_ERR(imx6_pcie->vpcie);
  680. imx6_pcie->vpcie = NULL;
  681. }
  682. platform_set_drvdata(pdev, imx6_pcie);
  683. ret = imx6_add_pcie_port(imx6_pcie, pdev);
  684. if (ret < 0)
  685. return ret;
  686. if (pci_msi_enabled()) {
  687. val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
  688. PCI_MSI_FLAGS);
  689. val |= PCI_MSI_FLAGS_ENABLE;
  690. dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
  691. val);
  692. }
  693. return 0;
  694. }
  695. static void imx6_pcie_shutdown(struct platform_device *pdev)
  696. {
  697. struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
  698. /* bring down link, so bootloader gets clean state in case of reboot */
  699. imx6_pcie_assert_core_reset(imx6_pcie);
  700. }
  701. static const struct of_device_id imx6_pcie_of_match[] = {
  702. { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
  703. { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
  704. { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
  705. { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, },
  706. {},
  707. };
  708. static struct platform_driver imx6_pcie_driver = {
  709. .driver = {
  710. .name = "imx6q-pcie",
  711. .of_match_table = imx6_pcie_of_match,
  712. .suppress_bind_attrs = true,
  713. },
  714. .probe = imx6_pcie_probe,
  715. .shutdown = imx6_pcie_shutdown,
  716. };
  717. static int __init imx6_pcie_init(void)
  718. {
  719. /*
  720. * Since probe() can be deferred we need to make sure that
  721. * hook_fault_code is not called after __init memory is freed
  722. * by kernel and since imx6q_pcie_abort_handler() is a no-op,
  723. * we can install the handler here without risking it
  724. * accessing some uninitialized driver state.
  725. */
  726. hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
  727. "external abort on non-linefetch");
  728. return platform_driver_register(&imx6_pcie_driver);
  729. }
  730. device_initcall(imx6_pcie_init);