dmc-rk3368.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * (C) Copyright 2017 Theobroma Systems Design und Consulting GmbH
  4. */
  5. #include <common.h>
  6. #include <clk.h>
  7. #include <dm.h>
  8. #include <dt-bindings/memory/rk3368-dmc.h>
  9. #include <dt-structs.h>
  10. #include <ram.h>
  11. #include <regmap.h>
  12. #include <syscon.h>
  13. #include <asm/io.h>
  14. #include <asm/arch/clock.h>
  15. #include <asm/arch/cru_rk3368.h>
  16. #include <asm/arch/grf_rk3368.h>
  17. #include <asm/arch/ddr_rk3368.h>
  18. #include <asm/arch/sdram.h>
  19. #include <asm/arch/sdram_common.h>
  20. struct dram_info {
  21. struct ram_info info;
  22. struct clk ddr_clk;
  23. struct rk3368_cru *cru;
  24. struct rk3368_grf *grf;
  25. struct rk3368_ddr_pctl *pctl;
  26. struct rk3368_ddrphy *phy;
  27. struct rk3368_pmu_grf *pmugrf;
  28. struct rk3368_msch *msch;
  29. };
  30. struct rk3368_sdram_params {
  31. #if CONFIG_IS_ENABLED(OF_PLATDATA)
  32. struct dtd_rockchip_rk3368_dmc of_plat;
  33. #endif
  34. struct rk3288_sdram_pctl_timing pctl_timing;
  35. u32 trefi_mem_ddr3;
  36. struct rk3288_sdram_channel chan;
  37. struct regmap *map;
  38. u32 ddr_freq;
  39. u32 memory_schedule;
  40. u32 ddr_speed_bin;
  41. u32 tfaw_mult;
  42. };
  43. /* PTCL bits */
  44. enum {
  45. /* PCTL_DFISTCFG0 */
  46. DFI_INIT_START = BIT(0),
  47. DFI_DATA_BYTE_DISABLE_EN = BIT(2),
  48. /* PCTL_DFISTCFG1 */
  49. DFI_DRAM_CLK_SR_EN = BIT(0),
  50. DFI_DRAM_CLK_DPD_EN = BIT(1),
  51. ODT_LEN_BL8_W_SHIFT = 16,
  52. /* PCTL_DFISTCFG2 */
  53. DFI_PARITY_INTR_EN = BIT(0),
  54. DFI_PARITY_EN = BIT(1),
  55. /* PCTL_DFILPCFG0 */
  56. TLP_RESP_TIME_SHIFT = 16,
  57. LP_SR_EN = BIT(8),
  58. LP_PD_EN = BIT(0),
  59. /* PCTL_DFIODTCFG */
  60. RANK0_ODT_WRITE_SEL = BIT(3),
  61. RANK1_ODT_WRITE_SEL = BIT(11),
  62. /* PCTL_SCFG */
  63. HW_LOW_POWER_EN = BIT(0),
  64. /* PCTL_MCMD */
  65. START_CMD = BIT(31),
  66. MCMD_RANK0 = BIT(20),
  67. MCMD_RANK1 = BIT(21),
  68. DESELECT_CMD = 0,
  69. PREA_CMD,
  70. REF_CMD,
  71. MRS_CMD,
  72. ZQCS_CMD,
  73. ZQCL_CMD,
  74. RSTL_CMD,
  75. MRR_CMD = 8,
  76. DPDE_CMD,
  77. /* PCTL_POWCTL */
  78. POWER_UP_START = BIT(0),
  79. /* PCTL_POWSTAT */
  80. POWER_UP_DONE = BIT(0),
  81. /* PCTL_SCTL */
  82. INIT_STATE = 0,
  83. CFG_STATE,
  84. GO_STATE,
  85. SLEEP_STATE,
  86. WAKEUP_STATE,
  87. /* PCTL_STAT */
  88. LP_TRIG_SHIFT = 4,
  89. LP_TRIG_MASK = 7,
  90. PCTL_STAT_MSK = 7,
  91. INIT_MEM = 0,
  92. CONFIG,
  93. CONFIG_REQ,
  94. ACCESS,
  95. ACCESS_REQ,
  96. LOW_POWER,
  97. LOW_POWER_ENTRY_REQ,
  98. LOW_POWER_EXIT_REQ,
  99. /* PCTL_MCFG */
  100. DDR2_DDR3_BL_8 = BIT(0),
  101. DDR3_EN = BIT(5),
  102. TFAW_TRRD_MULT4 = (0 << 18),
  103. TFAW_TRRD_MULT5 = (1 << 18),
  104. TFAW_TRRD_MULT6 = (2 << 18),
  105. };
  106. #define DDR3_MR0_WR(n) \
  107. ((n <= 8) ? ((n - 4) << 9) : (((n >> 1) & 0x7) << 9))
  108. #define DDR3_MR0_CL(n) \
  109. ((((n - 4) & 0x7) << 4) | (((n - 4) & 0x8) >> 2))
  110. #define DDR3_MR0_BL8 \
  111. (0 << 0)
  112. #define DDR3_MR0_DLL_RESET \
  113. (1 << 8)
  114. #define DDR3_MR1_RTT120OHM \
  115. ((0 << 9) | (1 << 6) | (0 << 2))
  116. #define DDR3_MR2_TWL(n) \
  117. (((n - 5) & 0x7) << 3)
  118. #ifdef CONFIG_TPL_BUILD
  119. static void ddr_set_noc_spr_err_stall(struct rk3368_grf *grf, bool enable)
  120. {
  121. if (enable)
  122. rk_setreg(&grf->ddrc0_con0, NOC_RSP_ERR_STALL);
  123. else
  124. rk_clrreg(&grf->ddrc0_con0, NOC_RSP_ERR_STALL);
  125. }
  126. static void ddr_set_ddr3_mode(struct rk3368_grf *grf, bool ddr3_mode)
  127. {
  128. if (ddr3_mode)
  129. rk_setreg(&grf->ddrc0_con0, MSCH0_MAINDDR3_DDR3);
  130. else
  131. rk_clrreg(&grf->ddrc0_con0, MSCH0_MAINDDR3_DDR3);
  132. }
  133. static void ddrphy_config(struct rk3368_ddrphy *phy,
  134. u32 tcl, u32 tal, u32 tcwl)
  135. {
  136. int i;
  137. /* Set to DDR3 mode */
  138. clrsetbits_le32(&phy->reg[1], 0x3, 0x0);
  139. /* DDRPHY_REGB: CL, AL */
  140. clrsetbits_le32(&phy->reg[0xb], 0xff, tcl << 4 | tal);
  141. /* DDRPHY_REGC: CWL */
  142. clrsetbits_le32(&phy->reg[0xc], 0x0f, tcwl);
  143. /* Update drive-strength */
  144. writel(0xcc, &phy->reg[0x11]);
  145. writel(0xaa, &phy->reg[0x16]);
  146. /*
  147. * Update NRCOMP/PRCOMP for all 4 channels (for details of all
  148. * affected registers refer to the documentation of DDRPHY_REG20
  149. * and DDRPHY_REG21 in the RK3368 TRM.
  150. */
  151. for (i = 0; i < 4; ++i) {
  152. writel(0xcc, &phy->reg[0x20 + i * 0x10]);
  153. writel(0x44, &phy->reg[0x21 + i * 0x10]);
  154. }
  155. /* Enable write-leveling calibration bypass */
  156. setbits_le32(&phy->reg[2], BIT(3));
  157. }
  158. static void copy_to_reg(u32 *dest, const u32 *src, u32 n)
  159. {
  160. int i;
  161. for (i = 0; i < n / sizeof(u32); i++)
  162. writel(*src++, dest++);
  163. }
  164. static void send_command(struct rk3368_ddr_pctl *pctl, u32 rank, u32 cmd)
  165. {
  166. u32 mcmd = START_CMD | cmd | rank;
  167. debug("%s: writing %x to MCMD\n", __func__, mcmd);
  168. writel(mcmd, &pctl->mcmd);
  169. while (readl(&pctl->mcmd) & START_CMD)
  170. /* spin */;
  171. }
  172. static void send_mrs(struct rk3368_ddr_pctl *pctl,
  173. u32 rank, u32 mr_num, u32 mr_data)
  174. {
  175. u32 mcmd = START_CMD | MRS_CMD | rank | (mr_num << 17) | (mr_data << 4);
  176. debug("%s: writing %x to MCMD\n", __func__, mcmd);
  177. writel(mcmd, &pctl->mcmd);
  178. while (readl(&pctl->mcmd) & START_CMD)
  179. /* spin */;
  180. }
  181. static int memory_init(struct rk3368_ddr_pctl *pctl,
  182. struct rk3368_sdram_params *params)
  183. {
  184. u32 mr[4];
  185. const ulong timeout_ms = 500;
  186. ulong tmp;
  187. /*
  188. * Power up DRAM by DDR_PCTL_POWCTL[0] register of PCTL and
  189. * wait power up DRAM finish with DDR_PCTL_POWSTAT[0] register
  190. * of PCTL.
  191. */
  192. writel(POWER_UP_START, &pctl->powctl);
  193. tmp = get_timer(0);
  194. do {
  195. if (get_timer(tmp) > timeout_ms) {
  196. pr_err("%s: POWER_UP_START did not complete in %ld ms\n",
  197. __func__, timeout_ms);
  198. return -ETIME;
  199. }
  200. } while (!(readl(&pctl->powstat) & POWER_UP_DONE));
  201. /* Configure MR0 through MR3 */
  202. mr[0] = DDR3_MR0_WR(params->pctl_timing.twr) |
  203. DDR3_MR0_CL(params->pctl_timing.tcl) |
  204. DDR3_MR0_DLL_RESET;
  205. mr[1] = DDR3_MR1_RTT120OHM;
  206. mr[2] = DDR3_MR2_TWL(params->pctl_timing.tcwl);
  207. mr[3] = 0;
  208. /*
  209. * Also see RK3368 Technical Reference Manual:
  210. * "16.6.2 Initialization (DDR3 Initialization Sequence)"
  211. */
  212. send_command(pctl, MCMD_RANK0 | MCMD_RANK1, DESELECT_CMD);
  213. udelay(1);
  214. send_command(pctl, MCMD_RANK0 | MCMD_RANK1, PREA_CMD);
  215. send_mrs(pctl, MCMD_RANK0 | MCMD_RANK1, 2, mr[2]);
  216. send_mrs(pctl, MCMD_RANK0 | MCMD_RANK1, 3, mr[3]);
  217. send_mrs(pctl, MCMD_RANK0 | MCMD_RANK1, 1, mr[1]);
  218. send_mrs(pctl, MCMD_RANK0 | MCMD_RANK1, 0, mr[0]);
  219. send_command(pctl, MCMD_RANK0 | MCMD_RANK1, ZQCL_CMD);
  220. return 0;
  221. }
  222. static void move_to_config_state(struct rk3368_ddr_pctl *pctl)
  223. {
  224. /*
  225. * Also see RK3368 Technical Reference Manual:
  226. * "16.6.1 State transition of PCTL (Moving to Config State)"
  227. */
  228. u32 state = readl(&pctl->stat) & PCTL_STAT_MSK;
  229. switch (state) {
  230. case LOW_POWER:
  231. writel(WAKEUP_STATE, &pctl->sctl);
  232. while ((readl(&pctl->stat) & PCTL_STAT_MSK) != ACCESS)
  233. /* spin */;
  234. /* fall-through */
  235. case ACCESS:
  236. case INIT_MEM:
  237. writel(CFG_STATE, &pctl->sctl);
  238. while ((readl(&pctl->stat) & PCTL_STAT_MSK) != CONFIG)
  239. /* spin */;
  240. break;
  241. case CONFIG:
  242. return;
  243. default:
  244. break;
  245. }
  246. }
  247. static void move_to_access_state(struct rk3368_ddr_pctl *pctl)
  248. {
  249. /*
  250. * Also see RK3368 Technical Reference Manual:
  251. * "16.6.1 State transition of PCTL (Moving to Access State)"
  252. */
  253. u32 state = readl(&pctl->stat) & PCTL_STAT_MSK;
  254. switch (state) {
  255. case LOW_POWER:
  256. if (((readl(&pctl->stat) >> LP_TRIG_SHIFT) &
  257. LP_TRIG_MASK) == 1)
  258. return;
  259. writel(WAKEUP_STATE, &pctl->sctl);
  260. while ((readl(&pctl->stat) & PCTL_STAT_MSK) != ACCESS)
  261. /* spin */;
  262. /* fall-through */
  263. case INIT_MEM:
  264. writel(CFG_STATE, &pctl->sctl);
  265. while ((readl(&pctl->stat) & PCTL_STAT_MSK) != CONFIG)
  266. /* spin */;
  267. /* fall-through */
  268. case CONFIG:
  269. writel(GO_STATE, &pctl->sctl);
  270. while ((readl(&pctl->stat) & PCTL_STAT_MSK) == CONFIG)
  271. /* spin */;
  272. break;
  273. case ACCESS:
  274. return;
  275. default:
  276. break;
  277. }
  278. }
  279. static void ddrctl_reset(struct rk3368_cru *cru)
  280. {
  281. const u32 ctl_reset = BIT(3) | BIT(2);
  282. const u32 phy_reset = BIT(1) | BIT(0);
  283. /*
  284. * The PHY reset should be released before the PCTL reset.
  285. *
  286. * Note that the following sequence (including the number of
  287. * us to delay between releasing the PHY and PCTL reset) has
  288. * been adapted per feedback received from Rockchips, so do
  289. * not try to optimise.
  290. */
  291. rk_setreg(&cru->softrst_con[10], ctl_reset | phy_reset);
  292. udelay(1);
  293. rk_clrreg(&cru->softrst_con[10], phy_reset);
  294. udelay(5);
  295. rk_clrreg(&cru->softrst_con[10], ctl_reset);
  296. }
  297. static void ddrphy_reset(struct rk3368_ddrphy *ddrphy)
  298. {
  299. /*
  300. * The analog part of the PHY should be release at least 1000
  301. * DRAM cycles before the digital part of the PHY (waiting for
  302. * 5us will ensure this for a DRAM clock as low as 200MHz).
  303. */
  304. clrbits_le32(&ddrphy->reg[0], BIT(3) | BIT(2));
  305. udelay(1);
  306. setbits_le32(&ddrphy->reg[0], BIT(2));
  307. udelay(5);
  308. setbits_le32(&ddrphy->reg[0], BIT(3));
  309. }
  310. static void ddrphy_config_delays(struct rk3368_ddrphy *ddrphy, u32 freq)
  311. {
  312. u32 dqs_dll_delay;
  313. setbits_le32(&ddrphy->reg[0x13], BIT(4));
  314. clrbits_le32(&ddrphy->reg[0x14], BIT(3));
  315. setbits_le32(&ddrphy->reg[0x26], BIT(4));
  316. clrbits_le32(&ddrphy->reg[0x27], BIT(3));
  317. setbits_le32(&ddrphy->reg[0x36], BIT(4));
  318. clrbits_le32(&ddrphy->reg[0x37], BIT(3));
  319. setbits_le32(&ddrphy->reg[0x46], BIT(4));
  320. clrbits_le32(&ddrphy->reg[0x47], BIT(3));
  321. setbits_le32(&ddrphy->reg[0x56], BIT(4));
  322. clrbits_le32(&ddrphy->reg[0x57], BIT(3));
  323. if (freq <= 400000000)
  324. setbits_le32(&ddrphy->reg[0xa4], 0x1f);
  325. else
  326. clrbits_le32(&ddrphy->reg[0xa4], 0x1f);
  327. if (freq < 681000000)
  328. dqs_dll_delay = 3; /* 67.5 degree delay */
  329. else
  330. dqs_dll_delay = 2; /* 45 degree delay */
  331. writel(dqs_dll_delay, &ddrphy->reg[0x28]);
  332. writel(dqs_dll_delay, &ddrphy->reg[0x38]);
  333. writel(dqs_dll_delay, &ddrphy->reg[0x48]);
  334. writel(dqs_dll_delay, &ddrphy->reg[0x58]);
  335. }
  336. static int dfi_cfg(struct rk3368_ddr_pctl *pctl)
  337. {
  338. const ulong timeout_ms = 200;
  339. ulong tmp;
  340. writel(DFI_DATA_BYTE_DISABLE_EN, &pctl->dfistcfg0);
  341. writel(DFI_DRAM_CLK_SR_EN | DFI_DRAM_CLK_DPD_EN,
  342. &pctl->dfistcfg1);
  343. writel(DFI_PARITY_INTR_EN | DFI_PARITY_EN, &pctl->dfistcfg2);
  344. writel(7 << TLP_RESP_TIME_SHIFT | LP_SR_EN | LP_PD_EN,
  345. &pctl->dfilpcfg0);
  346. writel(1, &pctl->dfitphyupdtype0);
  347. writel(0x1f, &pctl->dfitphyrdlat);
  348. writel(0, &pctl->dfitphywrdata);
  349. writel(0, &pctl->dfiupdcfg); /* phyupd and ctrlupd disabled */
  350. setbits_le32(&pctl->dfistcfg0, DFI_INIT_START);
  351. tmp = get_timer(0);
  352. do {
  353. if (get_timer(tmp) > timeout_ms) {
  354. pr_err("%s: DFI init did not complete within %ld ms\n",
  355. __func__, timeout_ms);
  356. return -ETIME;
  357. }
  358. } while ((readl(&pctl->dfiststat0) & 1) == 0);
  359. return 0;
  360. }
  361. static inline u32 ps_to_tCK(const u32 ps, const ulong freq)
  362. {
  363. const ulong MHz = 1000000;
  364. return DIV_ROUND_UP(ps * freq, 1000000 * MHz);
  365. }
  366. static inline u32 ns_to_tCK(const u32 ns, const ulong freq)
  367. {
  368. return ps_to_tCK(ns * 1000, freq);
  369. }
  370. static inline u32 tCK_to_ps(const ulong tCK, const ulong freq)
  371. {
  372. const ulong MHz = 1000000;
  373. return DIV_ROUND_UP(tCK * 1000000 * MHz, freq);
  374. }
  375. static int pctl_calc_timings(struct rk3368_sdram_params *params,
  376. ulong freq)
  377. {
  378. struct rk3288_sdram_pctl_timing *pctl_timing = &params->pctl_timing;
  379. const ulong MHz = 1000000;
  380. u32 tccd;
  381. u32 tfaw_as_ps;
  382. if (params->ddr_speed_bin != DDR3_1600K) {
  383. pr_err("%s: unimplemented DDR3 speed bin %d\n",
  384. __func__, params->ddr_speed_bin);
  385. return -1;
  386. }
  387. /* PCTL is clocked at 1/2 the DRAM clock; err on the side of caution */
  388. pctl_timing->togcnt1u = DIV_ROUND_UP(freq, 2 * MHz);
  389. pctl_timing->togcnt100n = DIV_ROUND_UP(freq / 10, 2 * MHz);
  390. pctl_timing->tinit = 200; /* 200 usec */
  391. pctl_timing->trsth = 500; /* 500 usec */
  392. pctl_timing->trefi = 78; /* 7.8usec = 78 * 100ns */
  393. params->trefi_mem_ddr3 = ns_to_tCK(pctl_timing->trefi * 100, freq);
  394. if (freq <= (400 * MHz)) {
  395. pctl_timing->tcl = 6;
  396. pctl_timing->tcwl = 10;
  397. } else if (freq <= (533 * MHz)) {
  398. pctl_timing->tcl = 8;
  399. pctl_timing->tcwl = 6;
  400. } else if (freq <= (666 * MHz)) {
  401. pctl_timing->tcl = 10;
  402. pctl_timing->tcwl = 7;
  403. } else {
  404. pctl_timing->tcl = 11;
  405. pctl_timing->tcwl = 8;
  406. }
  407. pctl_timing->tmrd = 4; /* 4 tCK (all speed bins) */
  408. pctl_timing->trfc = ns_to_tCK(350, freq); /* tRFC: 350 (max) @ 8GBit */
  409. pctl_timing->trp = max(4u, ps_to_tCK(13750, freq));
  410. /*
  411. * JESD-79:
  412. * READ to WRITE Command Delay = RL + tCCD / 2 + 2tCK - WL
  413. */
  414. tccd = 4;
  415. pctl_timing->trtw = pctl_timing->tcl + tccd/2 + 2 - pctl_timing->tcwl;
  416. pctl_timing->tal = 0;
  417. pctl_timing->tras = ps_to_tCK(35000, freq);
  418. pctl_timing->trc = ps_to_tCK(48750, freq);
  419. pctl_timing->trcd = ps_to_tCK(13750, freq);
  420. pctl_timing->trrd = max(4u, ps_to_tCK(7500, freq));
  421. pctl_timing->trtp = max(4u, ps_to_tCK(7500, freq));
  422. pctl_timing->twr = ps_to_tCK(15000, freq);
  423. /* The DDR3 mode-register does only support even values for tWR > 8. */
  424. if (pctl_timing->twr > 8)
  425. pctl_timing->twr = (pctl_timing->twr + 1) & ~1;
  426. pctl_timing->twtr = max(4u, ps_to_tCK(7500, freq));
  427. pctl_timing->texsr = 512; /* tEXSR(max) is tDLLLK */
  428. pctl_timing->txp = max(3u, ps_to_tCK(6000, freq));
  429. pctl_timing->txpdll = max(10u, ps_to_tCK(24000, freq));
  430. pctl_timing->tzqcs = max(64u, ps_to_tCK(80000, freq));
  431. pctl_timing->tzqcsi = 10000; /* as used by Rockchip */
  432. pctl_timing->tdqs = 1; /* fixed for DDR3 */
  433. pctl_timing->tcksre = max(5u, ps_to_tCK(10000, freq));
  434. pctl_timing->tcksrx = max(5u, ps_to_tCK(10000, freq));
  435. pctl_timing->tcke = max(3u, ps_to_tCK(5000, freq));
  436. pctl_timing->tmod = max(12u, ps_to_tCK(15000, freq));
  437. pctl_timing->trstl = ns_to_tCK(100, freq);
  438. pctl_timing->tzqcl = max(256u, ps_to_tCK(320000, freq)); /* tZQoper */
  439. pctl_timing->tmrr = 0;
  440. pctl_timing->tckesr = pctl_timing->tcke + 1; /* JESD-79: tCKE + 1tCK */
  441. pctl_timing->tdpd = 0; /* RK3368 TRM: "allowed values for DDR3: 0" */
  442. /*
  443. * The controller can represent tFAW as 4x, 5x or 6x tRRD only.
  444. * We want to use the smallest multiplier that satisfies the tFAW
  445. * requirements of the given speed-bin. If necessary, we stretch out
  446. * tRRD to allow us to operate on a 6x multiplier for tFAW.
  447. */
  448. tfaw_as_ps = 40000; /* 40ns: tFAW for DDR3-1600K, 2KB page-size */
  449. if (tCK_to_ps(pctl_timing->trrd * 6, freq) < tfaw_as_ps) {
  450. /* If tFAW is > 6 x tRRD, we need to stretch tRRD */
  451. pctl_timing->trrd = ps_to_tCK(DIV_ROUND_UP(40000, 6), freq);
  452. params->tfaw_mult = TFAW_TRRD_MULT6;
  453. } else if (tCK_to_ps(pctl_timing->trrd * 5, freq) < tfaw_as_ps) {
  454. params->tfaw_mult = TFAW_TRRD_MULT6;
  455. } else if (tCK_to_ps(pctl_timing->trrd * 4, freq) < tfaw_as_ps) {
  456. params->tfaw_mult = TFAW_TRRD_MULT5;
  457. } else {
  458. params->tfaw_mult = TFAW_TRRD_MULT4;
  459. }
  460. return 0;
  461. }
  462. static void pctl_cfg(struct rk3368_ddr_pctl *pctl,
  463. struct rk3368_sdram_params *params,
  464. struct rk3368_grf *grf)
  465. {
  466. /* Configure PCTL timing registers */
  467. params->pctl_timing.trefi |= BIT(31); /* see PCTL_TREFI */
  468. copy_to_reg(&pctl->togcnt1u, &params->pctl_timing.togcnt1u,
  469. sizeof(params->pctl_timing));
  470. writel(params->trefi_mem_ddr3, &pctl->trefi_mem_ddr3);
  471. /* Set up ODT write selector and ODT write length */
  472. writel((RANK0_ODT_WRITE_SEL | RANK1_ODT_WRITE_SEL), &pctl->dfiodtcfg);
  473. writel(7 << ODT_LEN_BL8_W_SHIFT, &pctl->dfiodtcfg1);
  474. /* Set up the CL/CWL-dependent timings of DFI */
  475. writel((params->pctl_timing.tcl - 1) / 2 - 1, &pctl->dfitrddataen);
  476. writel((params->pctl_timing.tcwl - 1) / 2 - 1, &pctl->dfitphywrlat);
  477. /* DDR3 */
  478. writel(params->tfaw_mult | DDR3_EN | DDR2_DDR3_BL_8, &pctl->mcfg);
  479. writel(0x001c0004, &grf->ddrc0_con0);
  480. setbits_le32(&pctl->scfg, HW_LOW_POWER_EN);
  481. }
  482. static int ddrphy_data_training(struct rk3368_ddr_pctl *pctl,
  483. struct rk3368_ddrphy *ddrphy)
  484. {
  485. const u32 trefi = readl(&pctl->trefi);
  486. const ulong timeout_ms = 500;
  487. ulong tmp;
  488. /* disable auto-refresh */
  489. writel(0 | BIT(31), &pctl->trefi);
  490. clrsetbits_le32(&ddrphy->reg[2], 0x33, 0x20);
  491. clrsetbits_le32(&ddrphy->reg[2], 0x33, 0x21);
  492. tmp = get_timer(0);
  493. do {
  494. if (get_timer(tmp) > timeout_ms) {
  495. pr_err("%s: did not complete within %ld ms\n",
  496. __func__, timeout_ms);
  497. return -ETIME;
  498. }
  499. } while ((readl(&ddrphy->reg[0xff]) & 0xf) != 0xf);
  500. send_command(pctl, MCMD_RANK0 | MCMD_RANK1, PREA_CMD);
  501. clrsetbits_le32(&ddrphy->reg[2], 0x33, 0x20);
  502. /* resume auto-refresh */
  503. writel(trefi | BIT(31), &pctl->trefi);
  504. return 0;
  505. }
  506. static int sdram_col_row_detect(struct udevice *dev)
  507. {
  508. struct dram_info *priv = dev_get_priv(dev);
  509. struct rk3368_sdram_params *params = dev_get_platdata(dev);
  510. struct rk3368_ddr_pctl *pctl = priv->pctl;
  511. struct rk3368_msch *msch = priv->msch;
  512. const u32 test_pattern = 0x5aa5f00f;
  513. int row, col;
  514. uintptr_t addr;
  515. move_to_config_state(pctl);
  516. writel(6, &msch->ddrconf);
  517. move_to_access_state(pctl);
  518. /* Detect col */
  519. for (col = 11; col >= 9; col--) {
  520. writel(0, CONFIG_SYS_SDRAM_BASE);
  521. addr = CONFIG_SYS_SDRAM_BASE +
  522. (1 << (col + params->chan.bw - 1));
  523. writel(test_pattern, addr);
  524. if ((readl(addr) == test_pattern) &&
  525. (readl(CONFIG_SYS_SDRAM_BASE) == 0))
  526. break;
  527. }
  528. if (col == 8) {
  529. pr_err("%s: col detect error\n", __func__);
  530. return -EINVAL;
  531. }
  532. move_to_config_state(pctl);
  533. writel(15, &msch->ddrconf);
  534. move_to_access_state(pctl);
  535. /* Detect row*/
  536. for (row = 16; row >= 12; row--) {
  537. writel(0, CONFIG_SYS_SDRAM_BASE);
  538. addr = CONFIG_SYS_SDRAM_BASE + (1 << (row + 15 - 1));
  539. writel(test_pattern, addr);
  540. if ((readl(addr) == test_pattern) &&
  541. (readl(CONFIG_SYS_SDRAM_BASE) == 0))
  542. break;
  543. }
  544. if (row == 11) {
  545. pr_err("%s: row detect error\n", __func__);
  546. return -EINVAL;
  547. }
  548. /* Record results */
  549. debug("%s: col %d, row %d\n", __func__, col, row);
  550. params->chan.col = col;
  551. params->chan.cs0_row = row;
  552. params->chan.cs1_row = row;
  553. params->chan.row_3_4 = 0;
  554. return 0;
  555. }
  556. static int msch_niu_config(struct rk3368_msch *msch,
  557. struct rk3368_sdram_params *params)
  558. {
  559. int i;
  560. const u8 cols = params->chan.col - ((params->chan.bw == 2) ? 0 : 1);
  561. const u8 rows = params->chan.cs0_row;
  562. /*
  563. * The DDR address-translation table always assumes a 32bit
  564. * bus and the comparison below takes care of adjusting for
  565. * a 16bit bus (i.e. one column-address is consumed).
  566. */
  567. const struct {
  568. u8 rows;
  569. u8 columns;
  570. u8 type;
  571. } ddrconf_table[] = {
  572. /*
  573. * C-B-R-D patterns are first. For these we require an
  574. * exact match for the columns and rows (as there's
  575. * one entry per possible configuration).
  576. */
  577. [0] = { .rows = 13, .columns = 10, .type = DMC_MSCH_CBRD },
  578. [1] = { .rows = 14, .columns = 10, .type = DMC_MSCH_CBRD },
  579. [2] = { .rows = 15, .columns = 10, .type = DMC_MSCH_CBRD },
  580. [3] = { .rows = 16, .columns = 10, .type = DMC_MSCH_CBRD },
  581. [4] = { .rows = 14, .columns = 11, .type = DMC_MSCH_CBRD },
  582. [5] = { .rows = 15, .columns = 11, .type = DMC_MSCH_CBRD },
  583. [6] = { .rows = 16, .columns = 11, .type = DMC_MSCH_CBRD },
  584. [7] = { .rows = 13, .columns = 9, .type = DMC_MSCH_CBRD },
  585. [8] = { .rows = 14, .columns = 9, .type = DMC_MSCH_CBRD },
  586. [9] = { .rows = 15, .columns = 9, .type = DMC_MSCH_CBRD },
  587. [10] = { .rows = 16, .columns = 9, .type = DMC_MSCH_CBRD },
  588. /*
  589. * 11 through 13 are C-R-B-D patterns. These are
  590. * matched for an exact number of columns and to
  591. * ensure that the hardware uses at least as many rows
  592. * as the pattern requires (i.e. we make sure that
  593. * there's no gaps up until we hit the device/chip-select;
  594. * however, these patterns can accept up to 16 rows,
  595. * as the row-address continues right after the CS
  596. * switching)
  597. */
  598. [11] = { .rows = 15, .columns = 10, .type = DMC_MSCH_CRBD },
  599. [12] = { .rows = 14, .columns = 11, .type = DMC_MSCH_CRBD },
  600. [13] = { .rows = 13, .columns = 10, .type = DMC_MSCH_CRBD },
  601. /*
  602. * 14 and 15 are catch-all variants using a C-B-D-R
  603. * scheme (i.e. alternating the chip-select every time
  604. * C-B overflows) and stuffing the remaining C-bits
  605. * into the top. Matching needs to make sure that the
  606. * number of columns is either an exact match (i.e. we
  607. * can use less the the maximum number of rows) -or-
  608. * that the columns exceed what is given in this table
  609. * and the rows are an exact match (in which case the
  610. * remaining C-bits will be stuffed onto the top after
  611. * the device/chip-select switches).
  612. */
  613. [14] = { .rows = 16, .columns = 10, .type = DMC_MSCH_CBDR },
  614. [15] = { .rows = 16, .columns = 9, .type = DMC_MSCH_CBDR },
  615. };
  616. /*
  617. * For C-B-R-D, we need an exact match (i.e. both for the number of
  618. * columns and rows), while for C-B-D-R, only the the number of
  619. * columns needs to match.
  620. */
  621. for (i = 0; i < ARRAY_SIZE(ddrconf_table); i++) {
  622. bool match = false;
  623. /* If this entry if for a different matcher, then skip it */
  624. if (ddrconf_table[i].type != params->memory_schedule)
  625. continue;
  626. /*
  627. * Match according to the rules (exact/inexact/at-least)
  628. * documented in the ddrconf_table above.
  629. */
  630. switch (params->memory_schedule) {
  631. case DMC_MSCH_CBRD:
  632. match = (ddrconf_table[i].columns == cols) &&
  633. (ddrconf_table[i].rows == rows);
  634. break;
  635. case DMC_MSCH_CRBD:
  636. match = (ddrconf_table[i].columns == cols) &&
  637. (ddrconf_table[i].rows <= rows);
  638. break;
  639. case DMC_MSCH_CBDR:
  640. match = (ddrconf_table[i].columns == cols) ||
  641. ((ddrconf_table[i].columns <= cols) &&
  642. (ddrconf_table[i].rows == rows));
  643. break;
  644. default:
  645. break;
  646. }
  647. if (match) {
  648. debug("%s: setting ddrconf 0x%x\n", __func__, i);
  649. writel(i, &msch->ddrconf);
  650. return 0;
  651. }
  652. }
  653. pr_err("%s: ddrconf (NIU config) not found\n", __func__);
  654. return -EINVAL;
  655. }
  656. static void dram_all_config(struct udevice *dev)
  657. {
  658. struct dram_info *priv = dev_get_priv(dev);
  659. struct rk3368_pmu_grf *pmugrf = priv->pmugrf;
  660. struct rk3368_sdram_params *params = dev_get_platdata(dev);
  661. const struct rk3288_sdram_channel *info = &params->chan;
  662. u32 sys_reg = 0;
  663. const int chan = 0;
  664. sys_reg |= DDR3 << SYS_REG_DDRTYPE_SHIFT;
  665. sys_reg |= 0 << SYS_REG_NUM_CH_SHIFT;
  666. sys_reg |= info->row_3_4 << SYS_REG_ROW_3_4_SHIFT(chan);
  667. sys_reg |= 1 << SYS_REG_CHINFO_SHIFT(chan);
  668. sys_reg |= (info->rank - 1) << SYS_REG_RANK_SHIFT(chan);
  669. sys_reg |= (info->col - 9) << SYS_REG_COL_SHIFT(chan);
  670. sys_reg |= info->bk == 3 ? 0 : 1 << SYS_REG_BK_SHIFT(chan);
  671. sys_reg |= (info->cs0_row - 13) << SYS_REG_CS0_ROW_SHIFT(chan);
  672. sys_reg |= (info->cs1_row - 13) << SYS_REG_CS1_ROW_SHIFT(chan);
  673. sys_reg |= (2 >> info->bw) << SYS_REG_BW_SHIFT(chan);
  674. sys_reg |= (2 >> info->dbw) << SYS_REG_DBW_SHIFT(chan);
  675. writel(sys_reg, &pmugrf->os_reg[2]);
  676. }
  677. static int setup_sdram(struct udevice *dev)
  678. {
  679. struct dram_info *priv = dev_get_priv(dev);
  680. struct rk3368_sdram_params *params = dev_get_platdata(dev);
  681. struct rk3368_ddr_pctl *pctl = priv->pctl;
  682. struct rk3368_ddrphy *ddrphy = priv->phy;
  683. struct rk3368_cru *cru = priv->cru;
  684. struct rk3368_grf *grf = priv->grf;
  685. struct rk3368_msch *msch = priv->msch;
  686. int ret;
  687. /* The input clock (i.e. DPLL) needs to be 2x the DRAM frequency */
  688. ret = clk_set_rate(&priv->ddr_clk, 2 * params->ddr_freq);
  689. if (ret < 0) {
  690. debug("%s: could not set DDR clock: %d\n", __func__, ret);
  691. return ret;
  692. }
  693. /* Update the read-latency for the RK3368 */
  694. writel(0x32, &msch->readlatency);
  695. /* Initialise the DDR PCTL and DDR PHY */
  696. ddrctl_reset(cru);
  697. ddrphy_reset(ddrphy);
  698. ddrphy_config_delays(ddrphy, params->ddr_freq);
  699. dfi_cfg(pctl);
  700. /* Configure relative system information of grf_ddrc0_con0 register */
  701. ddr_set_ddr3_mode(grf, true);
  702. ddr_set_noc_spr_err_stall(grf, true);
  703. /* Calculate timings */
  704. pctl_calc_timings(params, params->ddr_freq);
  705. /* Initialise the device timings in protocol controller */
  706. pctl_cfg(pctl, params, grf);
  707. /* Configure AL, CL ... information of PHY registers */
  708. ddrphy_config(ddrphy,
  709. params->pctl_timing.tcl,
  710. params->pctl_timing.tal,
  711. params->pctl_timing.tcwl);
  712. /* Initialize DRAM and configure with mode-register values */
  713. ret = memory_init(pctl, params);
  714. if (ret)
  715. goto error;
  716. move_to_config_state(pctl);
  717. /* Perform data-training */
  718. ddrphy_data_training(pctl, ddrphy);
  719. move_to_access_state(pctl);
  720. /* TODO(prt): could detect rank in training... */
  721. params->chan.rank = 2;
  722. /* TODO(prt): bus width is not auto-detected (yet)... */
  723. params->chan.bw = 2; /* 32bit wide bus */
  724. params->chan.dbw = params->chan.dbw; /* 32bit wide bus */
  725. /* DDR3 is always 8 bank */
  726. params->chan.bk = 3;
  727. /* Detect col and row number */
  728. ret = sdram_col_row_detect(dev);
  729. if (ret)
  730. goto error;
  731. /* Configure NIU DDR configuration */
  732. ret = msch_niu_config(msch, params);
  733. if (ret)
  734. goto error;
  735. /* set up OS_REG to communicate w/ next stage and OS */
  736. dram_all_config(dev);
  737. return 0;
  738. error:
  739. printf("DRAM init failed!\n");
  740. hang();
  741. }
  742. #endif
  743. static int rk3368_dmc_ofdata_to_platdata(struct udevice *dev)
  744. {
  745. int ret = 0;
  746. #if !CONFIG_IS_ENABLED(OF_PLATDATA)
  747. struct rk3368_sdram_params *plat = dev_get_platdata(dev);
  748. ret = regmap_init_mem(dev_ofnode(dev), &plat->map);
  749. if (ret)
  750. return ret;
  751. #endif
  752. return ret;
  753. }
  754. #if CONFIG_IS_ENABLED(OF_PLATDATA)
  755. static int conv_of_platdata(struct udevice *dev)
  756. {
  757. struct rk3368_sdram_params *plat = dev_get_platdata(dev);
  758. struct dtd_rockchip_rk3368_dmc *of_plat = &plat->of_plat;
  759. plat->ddr_freq = of_plat->rockchip_ddr_frequency;
  760. plat->ddr_speed_bin = of_plat->rockchip_ddr_speed_bin;
  761. plat->memory_schedule = of_plat->rockchip_memory_schedule;
  762. return 0;
  763. }
  764. #endif
  765. static int rk3368_dmc_probe(struct udevice *dev)
  766. {
  767. #ifdef CONFIG_TPL_BUILD
  768. struct rk3368_sdram_params *plat = dev_get_platdata(dev);
  769. struct rk3368_ddr_pctl *pctl;
  770. struct rk3368_ddrphy *ddrphy;
  771. struct rk3368_cru *cru;
  772. struct rk3368_grf *grf;
  773. struct rk3368_msch *msch;
  774. int ret;
  775. struct udevice *dev_clk;
  776. #endif
  777. struct dram_info *priv = dev_get_priv(dev);
  778. #if CONFIG_IS_ENABLED(OF_PLATDATA)
  779. ret = conv_of_platdata(dev);
  780. if (ret)
  781. return ret;
  782. #endif
  783. priv->pmugrf = syscon_get_first_range(ROCKCHIP_SYSCON_PMUGRF);
  784. debug("%s: pmugrf=%p\n", __func__, priv->pmugrf);
  785. #ifdef CONFIG_TPL_BUILD
  786. pctl = (struct rk3368_ddr_pctl *)plat->of_plat.reg[0];
  787. ddrphy = (struct rk3368_ddrphy *)plat->of_plat.reg[2];
  788. msch = syscon_get_first_range(ROCKCHIP_SYSCON_MSCH);
  789. grf = syscon_get_first_range(ROCKCHIP_SYSCON_GRF);
  790. priv->pctl = pctl;
  791. priv->phy = ddrphy;
  792. priv->msch = msch;
  793. priv->grf = grf;
  794. ret = rockchip_get_clk(&dev_clk);
  795. if (ret)
  796. return ret;
  797. priv->ddr_clk.id = CLK_DDR;
  798. ret = clk_request(dev_clk, &priv->ddr_clk);
  799. if (ret)
  800. return ret;
  801. cru = rockchip_get_cru();
  802. priv->cru = cru;
  803. if (IS_ERR(priv->cru))
  804. return PTR_ERR(priv->cru);
  805. ret = setup_sdram(dev);
  806. if (ret)
  807. return ret;
  808. #endif
  809. priv->info.base = 0;
  810. priv->info.size =
  811. rockchip_sdram_size((phys_addr_t)&priv->pmugrf->os_reg[2]);
  812. /*
  813. * we use the 0x00000000~0xfdffffff space since 0xff000000~0xffffffff
  814. * is SoC register space (i.e. reserved), and 0xfe000000~0xfeffffff is
  815. * inaccessible for some IP controller.
  816. */
  817. priv->info.size = min(priv->info.size, (size_t)0xfe000000);
  818. return 0;
  819. }
  820. static int rk3368_dmc_get_info(struct udevice *dev, struct ram_info *info)
  821. {
  822. struct dram_info *priv = dev_get_priv(dev);
  823. *info = priv->info;
  824. return 0;
  825. }
  826. static struct ram_ops rk3368_dmc_ops = {
  827. .get_info = rk3368_dmc_get_info,
  828. };
  829. static const struct udevice_id rk3368_dmc_ids[] = {
  830. { .compatible = "rockchip,rk3368-dmc" },
  831. { }
  832. };
  833. U_BOOT_DRIVER(dmc_rk3368) = {
  834. .name = "rockchip_rk3368_dmc",
  835. .id = UCLASS_RAM,
  836. .of_match = rk3368_dmc_ids,
  837. .ops = &rk3368_dmc_ops,
  838. .probe = rk3368_dmc_probe,
  839. .priv_auto_alloc_size = sizeof(struct dram_info),
  840. .ofdata_to_platdata = rk3368_dmc_ofdata_to_platdata,
  841. .probe = rk3368_dmc_probe,
  842. .priv_auto_alloc_size = sizeof(struct dram_info),
  843. .platdata_auto_alloc_size = sizeof(struct rk3368_sdram_params),
  844. };