phy-rockchip-pcie.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Rockchip PCIe PHY driver
  4. *
  5. * Copyright (C) 2016 Shawn Lin <shawn.lin@rock-chips.com>
  6. * Copyright (C) 2016 ROCKCHIP, Inc.
  7. */
  8. #include <linux/clk.h>
  9. #include <linux/delay.h>
  10. #include <linux/io.h>
  11. #include <linux/mfd/syscon.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/phy/phy.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/property.h>
  17. #include <linux/regmap.h>
  18. #include <linux/reset.h>
  19. /*
  20. * The higher 16-bit of this register is used for write protection
  21. * only if BIT(x + 16) set to 1 the BIT(x) can be written.
  22. */
  23. #define HIWORD_UPDATE(val, mask, shift) \
  24. ((val) << (shift) | (mask) << ((shift) + 16))
  25. #define PHY_MAX_LANE_NUM 4
  26. #define PHY_CFG_DATA_SHIFT 7
  27. #define PHY_CFG_ADDR_SHIFT 1
  28. #define PHY_CFG_DATA_MASK 0xf
  29. #define PHY_CFG_ADDR_MASK 0x3f
  30. #define PHY_CFG_RD_MASK 0x3ff
  31. #define PHY_CFG_WR_ENABLE 1
  32. #define PHY_CFG_WR_DISABLE 1
  33. #define PHY_CFG_WR_SHIFT 0
  34. #define PHY_CFG_WR_MASK 1
  35. #define PHY_CFG_PLL_LOCK 0x10
  36. #define PHY_CFG_CLK_TEST 0x10
  37. #define PHY_CFG_CLK_SCC 0x12
  38. #define PHY_CFG_SEPE_RATE BIT(3)
  39. #define PHY_CFG_PLL_100M BIT(3)
  40. #define PHY_PLL_LOCKED BIT(9)
  41. #define PHY_PLL_OUTPUT BIT(10)
  42. #define PHY_LANE_A_STATUS 0x30
  43. #define PHY_LANE_B_STATUS 0x31
  44. #define PHY_LANE_C_STATUS 0x32
  45. #define PHY_LANE_D_STATUS 0x33
  46. #define PHY_LANE_RX_DET_SHIFT 11
  47. #define PHY_LANE_RX_DET_TH 0x1
  48. #define PHY_LANE_IDLE_OFF 0x1
  49. #define PHY_LANE_IDLE_MASK 0x1
  50. #define PHY_LANE_IDLE_A_SHIFT 3
  51. #define PHY_LANE_IDLE_B_SHIFT 4
  52. #define PHY_LANE_IDLE_C_SHIFT 5
  53. #define PHY_LANE_IDLE_D_SHIFT 6
  54. struct rockchip_pcie_data {
  55. unsigned int pcie_conf;
  56. unsigned int pcie_status;
  57. unsigned int pcie_laneoff;
  58. };
  59. struct rockchip_pcie_phy {
  60. const struct rockchip_pcie_data *phy_data;
  61. struct regmap *reg_base;
  62. struct phy_pcie_instance {
  63. struct phy *phy;
  64. u32 index;
  65. } phys[PHY_MAX_LANE_NUM];
  66. struct mutex pcie_mutex;
  67. struct reset_control *phy_rst;
  68. struct clk *clk_pciephy_ref;
  69. int pwr_cnt;
  70. int init_cnt;
  71. };
  72. static struct rockchip_pcie_phy *to_pcie_phy(struct phy_pcie_instance *inst)
  73. {
  74. return container_of(inst, struct rockchip_pcie_phy,
  75. phys[inst->index]);
  76. }
  77. static struct phy *rockchip_pcie_phy_of_xlate(struct device *dev,
  78. const struct of_phandle_args *args)
  79. {
  80. struct rockchip_pcie_phy *rk_phy = dev_get_drvdata(dev);
  81. if (args->args_count == 0)
  82. return rk_phy->phys[0].phy;
  83. if (WARN_ON(args->args[0] >= PHY_MAX_LANE_NUM))
  84. return ERR_PTR(-ENODEV);
  85. return rk_phy->phys[args->args[0]].phy;
  86. }
  87. static inline void phy_wr_cfg(struct rockchip_pcie_phy *rk_phy,
  88. u32 addr, u32 data)
  89. {
  90. regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
  91. HIWORD_UPDATE(data,
  92. PHY_CFG_DATA_MASK,
  93. PHY_CFG_DATA_SHIFT) |
  94. HIWORD_UPDATE(addr,
  95. PHY_CFG_ADDR_MASK,
  96. PHY_CFG_ADDR_SHIFT));
  97. udelay(1);
  98. regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
  99. HIWORD_UPDATE(PHY_CFG_WR_ENABLE,
  100. PHY_CFG_WR_MASK,
  101. PHY_CFG_WR_SHIFT));
  102. udelay(1);
  103. regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
  104. HIWORD_UPDATE(PHY_CFG_WR_DISABLE,
  105. PHY_CFG_WR_MASK,
  106. PHY_CFG_WR_SHIFT));
  107. }
  108. static int rockchip_pcie_phy_power_off(struct phy *phy)
  109. {
  110. struct phy_pcie_instance *inst = phy_get_drvdata(phy);
  111. struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
  112. int err = 0;
  113. mutex_lock(&rk_phy->pcie_mutex);
  114. regmap_write(rk_phy->reg_base,
  115. rk_phy->phy_data->pcie_laneoff,
  116. HIWORD_UPDATE(PHY_LANE_IDLE_OFF,
  117. PHY_LANE_IDLE_MASK,
  118. PHY_LANE_IDLE_A_SHIFT + inst->index));
  119. if (--rk_phy->pwr_cnt)
  120. goto err_out;
  121. err = reset_control_assert(rk_phy->phy_rst);
  122. if (err) {
  123. dev_err(&phy->dev, "assert phy_rst err %d\n", err);
  124. goto err_restore;
  125. }
  126. err_out:
  127. mutex_unlock(&rk_phy->pcie_mutex);
  128. return 0;
  129. err_restore:
  130. rk_phy->pwr_cnt++;
  131. regmap_write(rk_phy->reg_base,
  132. rk_phy->phy_data->pcie_laneoff,
  133. HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
  134. PHY_LANE_IDLE_MASK,
  135. PHY_LANE_IDLE_A_SHIFT + inst->index));
  136. mutex_unlock(&rk_phy->pcie_mutex);
  137. return err;
  138. }
  139. static int rockchip_pcie_phy_power_on(struct phy *phy)
  140. {
  141. struct phy_pcie_instance *inst = phy_get_drvdata(phy);
  142. struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
  143. int err = 0;
  144. u32 status;
  145. unsigned long timeout;
  146. mutex_lock(&rk_phy->pcie_mutex);
  147. if (rk_phy->pwr_cnt++)
  148. goto err_out;
  149. err = reset_control_deassert(rk_phy->phy_rst);
  150. if (err) {
  151. dev_err(&phy->dev, "deassert phy_rst err %d\n", err);
  152. goto err_pwr_cnt;
  153. }
  154. regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
  155. HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
  156. PHY_CFG_ADDR_MASK,
  157. PHY_CFG_ADDR_SHIFT));
  158. regmap_write(rk_phy->reg_base,
  159. rk_phy->phy_data->pcie_laneoff,
  160. HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
  161. PHY_LANE_IDLE_MASK,
  162. PHY_LANE_IDLE_A_SHIFT + inst->index));
  163. /*
  164. * No documented timeout value for phy operation below,
  165. * so we make it large enough here. And we use loop-break
  166. * method which should not be harmful.
  167. */
  168. timeout = jiffies + msecs_to_jiffies(1000);
  169. err = -EINVAL;
  170. while (time_before(jiffies, timeout)) {
  171. regmap_read(rk_phy->reg_base,
  172. rk_phy->phy_data->pcie_status,
  173. &status);
  174. if (status & PHY_PLL_LOCKED) {
  175. dev_dbg(&phy->dev, "pll locked!\n");
  176. err = 0;
  177. break;
  178. }
  179. msleep(20);
  180. }
  181. if (err) {
  182. dev_err(&phy->dev, "pll lock timeout!\n");
  183. goto err_pll_lock;
  184. }
  185. phy_wr_cfg(rk_phy, PHY_CFG_CLK_TEST, PHY_CFG_SEPE_RATE);
  186. phy_wr_cfg(rk_phy, PHY_CFG_CLK_SCC, PHY_CFG_PLL_100M);
  187. err = -ETIMEDOUT;
  188. while (time_before(jiffies, timeout)) {
  189. regmap_read(rk_phy->reg_base,
  190. rk_phy->phy_data->pcie_status,
  191. &status);
  192. if (!(status & PHY_PLL_OUTPUT)) {
  193. dev_dbg(&phy->dev, "pll output enable done!\n");
  194. err = 0;
  195. break;
  196. }
  197. msleep(20);
  198. }
  199. if (err) {
  200. dev_err(&phy->dev, "pll output enable timeout!\n");
  201. goto err_pll_lock;
  202. }
  203. regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
  204. HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
  205. PHY_CFG_ADDR_MASK,
  206. PHY_CFG_ADDR_SHIFT));
  207. err = -EINVAL;
  208. while (time_before(jiffies, timeout)) {
  209. regmap_read(rk_phy->reg_base,
  210. rk_phy->phy_data->pcie_status,
  211. &status);
  212. if (status & PHY_PLL_LOCKED) {
  213. dev_dbg(&phy->dev, "pll relocked!\n");
  214. err = 0;
  215. break;
  216. }
  217. msleep(20);
  218. }
  219. if (err) {
  220. dev_err(&phy->dev, "pll relock timeout!\n");
  221. goto err_pll_lock;
  222. }
  223. err_out:
  224. mutex_unlock(&rk_phy->pcie_mutex);
  225. return 0;
  226. err_pll_lock:
  227. reset_control_assert(rk_phy->phy_rst);
  228. err_pwr_cnt:
  229. rk_phy->pwr_cnt--;
  230. mutex_unlock(&rk_phy->pcie_mutex);
  231. return err;
  232. }
  233. static int rockchip_pcie_phy_init(struct phy *phy)
  234. {
  235. struct phy_pcie_instance *inst = phy_get_drvdata(phy);
  236. struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
  237. int err = 0;
  238. mutex_lock(&rk_phy->pcie_mutex);
  239. if (rk_phy->init_cnt++)
  240. goto err_out;
  241. err = clk_prepare_enable(rk_phy->clk_pciephy_ref);
  242. if (err) {
  243. dev_err(&phy->dev, "Fail to enable pcie ref clock.\n");
  244. goto err_refclk;
  245. }
  246. err = reset_control_assert(rk_phy->phy_rst);
  247. if (err) {
  248. dev_err(&phy->dev, "assert phy_rst err %d\n", err);
  249. goto err_reset;
  250. }
  251. err_out:
  252. mutex_unlock(&rk_phy->pcie_mutex);
  253. return 0;
  254. err_reset:
  255. clk_disable_unprepare(rk_phy->clk_pciephy_ref);
  256. err_refclk:
  257. rk_phy->init_cnt--;
  258. mutex_unlock(&rk_phy->pcie_mutex);
  259. return err;
  260. }
  261. static int rockchip_pcie_phy_exit(struct phy *phy)
  262. {
  263. struct phy_pcie_instance *inst = phy_get_drvdata(phy);
  264. struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
  265. mutex_lock(&rk_phy->pcie_mutex);
  266. if (--rk_phy->init_cnt)
  267. goto err_init_cnt;
  268. clk_disable_unprepare(rk_phy->clk_pciephy_ref);
  269. err_init_cnt:
  270. mutex_unlock(&rk_phy->pcie_mutex);
  271. return 0;
  272. }
  273. static const struct phy_ops ops = {
  274. .init = rockchip_pcie_phy_init,
  275. .exit = rockchip_pcie_phy_exit,
  276. .power_on = rockchip_pcie_phy_power_on,
  277. .power_off = rockchip_pcie_phy_power_off,
  278. .owner = THIS_MODULE,
  279. };
  280. static const struct rockchip_pcie_data rk3399_pcie_data = {
  281. .pcie_conf = 0xe220,
  282. .pcie_status = 0xe2a4,
  283. .pcie_laneoff = 0xe214,
  284. };
  285. static const struct of_device_id rockchip_pcie_phy_dt_ids[] = {
  286. {
  287. .compatible = "rockchip,rk3399-pcie-phy",
  288. .data = &rk3399_pcie_data,
  289. },
  290. {}
  291. };
  292. MODULE_DEVICE_TABLE(of, rockchip_pcie_phy_dt_ids);
  293. static int rockchip_pcie_phy_probe(struct platform_device *pdev)
  294. {
  295. struct device *dev = &pdev->dev;
  296. struct rockchip_pcie_phy *rk_phy;
  297. struct phy_provider *phy_provider;
  298. struct regmap *grf;
  299. int i;
  300. u32 phy_num;
  301. grf = syscon_node_to_regmap(dev->parent->of_node);
  302. if (IS_ERR(grf)) {
  303. dev_err(dev, "Cannot find GRF syscon\n");
  304. return PTR_ERR(grf);
  305. }
  306. rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL);
  307. if (!rk_phy)
  308. return -ENOMEM;
  309. rk_phy->phy_data = device_get_match_data(&pdev->dev);
  310. if (!rk_phy->phy_data)
  311. return -EINVAL;
  312. rk_phy->reg_base = grf;
  313. mutex_init(&rk_phy->pcie_mutex);
  314. rk_phy->phy_rst = devm_reset_control_get(dev, "phy");
  315. if (IS_ERR(rk_phy->phy_rst)) {
  316. if (PTR_ERR(rk_phy->phy_rst) != -EPROBE_DEFER)
  317. dev_err(dev,
  318. "missing phy property for reset controller\n");
  319. return PTR_ERR(rk_phy->phy_rst);
  320. }
  321. rk_phy->clk_pciephy_ref = devm_clk_get(dev, "refclk");
  322. if (IS_ERR(rk_phy->clk_pciephy_ref)) {
  323. dev_err(dev, "refclk not found.\n");
  324. return PTR_ERR(rk_phy->clk_pciephy_ref);
  325. }
  326. /* parse #phy-cells to see if it's legacy PHY model */
  327. if (of_property_read_u32(dev->of_node, "#phy-cells", &phy_num))
  328. return -ENOENT;
  329. phy_num = (phy_num == 0) ? 1 : PHY_MAX_LANE_NUM;
  330. dev_dbg(dev, "phy number is %d\n", phy_num);
  331. for (i = 0; i < phy_num; i++) {
  332. rk_phy->phys[i].phy = devm_phy_create(dev, dev->of_node, &ops);
  333. if (IS_ERR(rk_phy->phys[i].phy)) {
  334. dev_err(dev, "failed to create PHY%d\n", i);
  335. return PTR_ERR(rk_phy->phys[i].phy);
  336. }
  337. rk_phy->phys[i].index = i;
  338. phy_set_drvdata(rk_phy->phys[i].phy, &rk_phy->phys[i]);
  339. }
  340. platform_set_drvdata(pdev, rk_phy);
  341. phy_provider = devm_of_phy_provider_register(dev,
  342. rockchip_pcie_phy_of_xlate);
  343. return PTR_ERR_OR_ZERO(phy_provider);
  344. }
  345. static struct platform_driver rockchip_pcie_driver = {
  346. .probe = rockchip_pcie_phy_probe,
  347. .driver = {
  348. .name = "rockchip-pcie-phy",
  349. .of_match_table = rockchip_pcie_phy_dt_ids,
  350. },
  351. };
  352. module_platform_driver(rockchip_pcie_driver);
  353. MODULE_AUTHOR("Shawn Lin <shawn.lin@rock-chips.com>");
  354. MODULE_DESCRIPTION("Rockchip PCIe PHY driver");
  355. MODULE_LICENSE("GPL v2");