umc-pxs2.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2015-2017 Socionext Inc.
  4. * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  5. *
  6. * based on commit 21b6e480f92ccc38fe0502e3116411d6509d3bf2 of Diag by:
  7. * Copyright (C) 2015 Socionext Inc.
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/errno.h>
  11. #include <linux/io.h>
  12. #include <linux/printk.h>
  13. #include <linux/sizes.h>
  14. #include <asm/processor.h>
  15. #include <time.h>
  16. #include "../init.h"
  17. #include "../soc-info.h"
  18. #include "ddrmphy-regs.h"
  19. #include "umc-regs.h"
  20. #define DRAM_CH_NR 3
  21. enum dram_freq {
  22. DRAM_FREQ_1866M,
  23. DRAM_FREQ_2133M,
  24. DRAM_FREQ_NR,
  25. };
  26. enum dram_size {
  27. DRAM_SZ_256M,
  28. DRAM_SZ_512M,
  29. DRAM_SZ_NR,
  30. };
  31. /* PHY */
  32. static u32 ddrphy_pgcr2[DRAM_FREQ_NR] = {0x00FC7E5D, 0x00FC90AB};
  33. static u32 ddrphy_ptr0[DRAM_FREQ_NR] = {0x0EA09205, 0x10C0A6C6};
  34. static u32 ddrphy_ptr1[DRAM_FREQ_NR] = {0x0DAC041B, 0x0FA104B1};
  35. static u32 ddrphy_ptr3[DRAM_FREQ_NR] = {0x15171e45, 0x18182357};
  36. static u32 ddrphy_ptr4[DRAM_FREQ_NR] = {0x0e9ad8e9, 0x10b34157};
  37. static u32 ddrphy_dtpr0[DRAM_FREQ_NR] = {0x35a00d88, 0x39e40e88};
  38. static u32 ddrphy_dtpr1[DRAM_FREQ_NR] = {0x2288cc2c, 0x228a04d0};
  39. static u32 ddrphy_dtpr2[DRAM_FREQ_NR] = {0x50005e00, 0x50006a00};
  40. static u32 ddrphy_dtpr3[DRAM_FREQ_NR] = {0x0010cb49, 0x0010ec89};
  41. static u32 ddrphy_mr0[DRAM_FREQ_NR] = {0x00000115, 0x00000125};
  42. static u32 ddrphy_mr2[DRAM_FREQ_NR] = {0x000002a0, 0x000002a8};
  43. /* dependent on package and board design */
  44. static u32 ddrphy_acbdlr0[DRAM_CH_NR] = {0x0000000c, 0x0000000c, 0x00000009};
  45. /* DDR multiPHY */
  46. static inline int ddrphy_get_rank(int dx)
  47. {
  48. return dx / 2;
  49. }
  50. static void ddrphy_fifo_reset(void __iomem *phy_base)
  51. {
  52. u32 tmp;
  53. tmp = readl(phy_base + MPHY_PGCR0);
  54. tmp &= ~MPHY_PGCR0_PHYFRST;
  55. writel(tmp, phy_base + MPHY_PGCR0);
  56. udelay(1);
  57. tmp |= MPHY_PGCR0_PHYFRST;
  58. writel(tmp, phy_base + MPHY_PGCR0);
  59. udelay(1);
  60. }
  61. static void ddrphy_vt_ctrl(void __iomem *phy_base, int enable)
  62. {
  63. u32 tmp;
  64. tmp = readl(phy_base + MPHY_PGCR1);
  65. if (enable)
  66. tmp &= ~MPHY_PGCR1_INHVT;
  67. else
  68. tmp |= MPHY_PGCR1_INHVT;
  69. writel(tmp, phy_base + MPHY_PGCR1);
  70. if (!enable) {
  71. while (!(readl(phy_base + MPHY_PGSR1) & MPHY_PGSR1_VTSTOP))
  72. cpu_relax();
  73. }
  74. }
  75. static void ddrphy_dqs_delay_fixup(void __iomem *phy_base, int nr_dx, int step)
  76. {
  77. int dx;
  78. u32 lcdlr1, rdqsd;
  79. void __iomem *dx_base = phy_base + MPHY_DX_BASE;
  80. ddrphy_vt_ctrl(phy_base, 0);
  81. for (dx = 0; dx < nr_dx; dx++) {
  82. lcdlr1 = readl(dx_base + MPHY_DX_LCDLR1);
  83. rdqsd = (lcdlr1 >> 8) & 0xff;
  84. rdqsd = clamp(rdqsd + step, 0U, 0xffU);
  85. lcdlr1 = (lcdlr1 & ~(0xff << 8)) | (rdqsd << 8);
  86. writel(lcdlr1, dx_base + MPHY_DX_LCDLR1);
  87. readl(dx_base + MPHY_DX_LCDLR1); /* relax */
  88. dx_base += MPHY_DX_STRIDE;
  89. }
  90. ddrphy_vt_ctrl(phy_base, 1);
  91. }
  92. static int ddrphy_get_system_latency(void __iomem *phy_base, int width)
  93. {
  94. void __iomem *dx_base = phy_base + MPHY_DX_BASE;
  95. const int nr_dx = width / 8;
  96. int dx, rank;
  97. u32 gtr;
  98. int dgsl, dgsl_min = INT_MAX, dgsl_max = 0;
  99. for (dx = 0; dx < nr_dx; dx++) {
  100. gtr = readl(dx_base + MPHY_DX_GTR);
  101. for (rank = 0; rank < 4; rank++) {
  102. dgsl = gtr & 0x7;
  103. /* if dgsl is zero, this rank was not trained. skip. */
  104. if (dgsl) {
  105. dgsl_min = min(dgsl_min, dgsl);
  106. dgsl_max = max(dgsl_max, dgsl);
  107. }
  108. gtr >>= 3;
  109. }
  110. dx_base += MPHY_DX_STRIDE;
  111. }
  112. if (dgsl_min != dgsl_max)
  113. pr_warn("DQS Gateing System Latencies are not all leveled.\n");
  114. return dgsl_max;
  115. }
  116. static void ddrphy_init(void __iomem *phy_base, enum dram_freq freq, int width,
  117. int ch)
  118. {
  119. u32 tmp;
  120. void __iomem *zq_base, *dx_base;
  121. int zq, dx;
  122. int nr_dx;
  123. nr_dx = width / 8;
  124. writel(MPHY_PIR_ZCALBYP, phy_base + MPHY_PIR);
  125. /*
  126. * Disable RGLVT bit (Read DQS Gating LCDL Delay VT Compensation)
  127. * to avoid read error issue.
  128. */
  129. writel(0x07d81e37, phy_base + MPHY_PGCR0);
  130. writel(0x0200c4e0, phy_base + MPHY_PGCR1);
  131. tmp = ddrphy_pgcr2[freq];
  132. if (width >= 32)
  133. tmp |= MPHY_PGCR2_DUALCHN | MPHY_PGCR2_ACPDDC;
  134. writel(tmp, phy_base + MPHY_PGCR2);
  135. writel(ddrphy_ptr0[freq], phy_base + MPHY_PTR0);
  136. writel(ddrphy_ptr1[freq], phy_base + MPHY_PTR1);
  137. writel(0x00083def, phy_base + MPHY_PTR2);
  138. writel(ddrphy_ptr3[freq], phy_base + MPHY_PTR3);
  139. writel(ddrphy_ptr4[freq], phy_base + MPHY_PTR4);
  140. writel(ddrphy_acbdlr0[ch], phy_base + MPHY_ACBDLR0);
  141. writel(0x55555555, phy_base + MPHY_ACIOCR1);
  142. writel(0x00000000, phy_base + MPHY_ACIOCR2);
  143. writel(0x55555555, phy_base + MPHY_ACIOCR3);
  144. writel(0x00000000, phy_base + MPHY_ACIOCR4);
  145. writel(0x00000055, phy_base + MPHY_ACIOCR5);
  146. writel(0x00181aa4, phy_base + MPHY_DXCCR);
  147. writel(0x0024641e, phy_base + MPHY_DSGCR);
  148. writel(0x0000040b, phy_base + MPHY_DCR);
  149. writel(ddrphy_dtpr0[freq], phy_base + MPHY_DTPR0);
  150. writel(ddrphy_dtpr1[freq], phy_base + MPHY_DTPR1);
  151. writel(ddrphy_dtpr2[freq], phy_base + MPHY_DTPR2);
  152. writel(ddrphy_dtpr3[freq], phy_base + MPHY_DTPR3);
  153. writel(ddrphy_mr0[freq], phy_base + MPHY_MR0);
  154. writel(0x00000006, phy_base + MPHY_MR1);
  155. writel(ddrphy_mr2[freq], phy_base + MPHY_MR2);
  156. writel(0x00000000, phy_base + MPHY_MR3);
  157. tmp = 0;
  158. for (dx = 0; dx < nr_dx; dx++)
  159. tmp |= BIT(MPHY_DTCR_RANKEN_SHIFT + ddrphy_get_rank(dx));
  160. writel(0x90003087 | tmp, phy_base + MPHY_DTCR);
  161. writel(0x00000000, phy_base + MPHY_DTAR0);
  162. writel(0x00000008, phy_base + MPHY_DTAR1);
  163. writel(0x00000010, phy_base + MPHY_DTAR2);
  164. writel(0x00000018, phy_base + MPHY_DTAR3);
  165. writel(0xdd22ee11, phy_base + MPHY_DTDR0);
  166. writel(0x7788bb44, phy_base + MPHY_DTDR1);
  167. /* impedance control settings */
  168. writel(0x04048900, phy_base + MPHY_ZQCR);
  169. zq_base = phy_base + MPHY_ZQ_BASE;
  170. for (zq = 0; zq < 4; zq++) {
  171. /*
  172. * board-dependent
  173. * PXS2: CH0ZQ0=0x5B, CH1ZQ0=0x5B, CH2ZQ0=0x59, others=0x5D
  174. */
  175. writel(0x0007BB5D, zq_base + MPHY_ZQ_PR);
  176. zq_base += MPHY_ZQ_STRIDE;
  177. }
  178. /* DATX8 settings */
  179. dx_base = phy_base + MPHY_DX_BASE;
  180. for (dx = 0; dx < 4; dx++) {
  181. tmp = readl(dx_base + MPHY_DX_GCR0);
  182. tmp &= ~MPHY_DX_GCR0_WLRKEN_MASK;
  183. tmp |= BIT(MPHY_DX_GCR0_WLRKEN_SHIFT + ddrphy_get_rank(dx)) &
  184. MPHY_DX_GCR0_WLRKEN_MASK;
  185. writel(tmp, dx_base + MPHY_DX_GCR0);
  186. writel(0x00000000, dx_base + MPHY_DX_GCR1);
  187. writel(0x00000000, dx_base + MPHY_DX_GCR2);
  188. writel(0x00000000, dx_base + MPHY_DX_GCR3);
  189. dx_base += MPHY_DX_STRIDE;
  190. }
  191. while (!(readl(phy_base + MPHY_PGSR0) & MPHY_PGSR0_IDONE))
  192. cpu_relax();
  193. ddrphy_dqs_delay_fixup(phy_base, nr_dx, -4);
  194. }
  195. struct ddrphy_init_sequence {
  196. char *description;
  197. u32 init_flag;
  198. u32 done_flag;
  199. u32 err_flag;
  200. };
  201. static const struct ddrphy_init_sequence impedance_calibration_sequence[] = {
  202. {
  203. "Impedance Calibration",
  204. MPHY_PIR_ZCAL,
  205. MPHY_PGSR0_ZCDONE,
  206. MPHY_PGSR0_ZCERR,
  207. },
  208. { /* sentinel */ }
  209. };
  210. static const struct ddrphy_init_sequence dram_init_sequence[] = {
  211. {
  212. "DRAM Initialization",
  213. MPHY_PIR_DRAMRST | MPHY_PIR_DRAMINIT,
  214. MPHY_PGSR0_DIDONE,
  215. 0,
  216. },
  217. { /* sentinel */ }
  218. };
  219. static const struct ddrphy_init_sequence training_sequence[] = {
  220. {
  221. "Write Leveling",
  222. MPHY_PIR_WL,
  223. MPHY_PGSR0_WLDONE,
  224. MPHY_PGSR0_WLERR,
  225. },
  226. {
  227. "Read DQS Gate Training",
  228. MPHY_PIR_QSGATE,
  229. MPHY_PGSR0_QSGDONE,
  230. MPHY_PGSR0_QSGERR,
  231. },
  232. {
  233. "Write Leveling Adjustment",
  234. MPHY_PIR_WLADJ,
  235. MPHY_PGSR0_WLADONE,
  236. MPHY_PGSR0_WLAERR,
  237. },
  238. {
  239. "Read Bit Deskew",
  240. MPHY_PIR_RDDSKW,
  241. MPHY_PGSR0_RDDONE,
  242. MPHY_PGSR0_RDERR,
  243. },
  244. {
  245. "Write Bit Deskew",
  246. MPHY_PIR_WRDSKW,
  247. MPHY_PGSR0_WDDONE,
  248. MPHY_PGSR0_WDERR,
  249. },
  250. {
  251. "Read Eye Training",
  252. MPHY_PIR_RDEYE,
  253. MPHY_PGSR0_REDONE,
  254. MPHY_PGSR0_REERR,
  255. },
  256. {
  257. "Write Eye Training",
  258. MPHY_PIR_WREYE,
  259. MPHY_PGSR0_WEDONE,
  260. MPHY_PGSR0_WEERR,
  261. },
  262. { /* sentinel */ }
  263. };
  264. static int __ddrphy_training(void __iomem *phy_base,
  265. const struct ddrphy_init_sequence *seq)
  266. {
  267. const struct ddrphy_init_sequence *s;
  268. u32 pgsr0;
  269. u32 init_flag = MPHY_PIR_INIT;
  270. u32 done_flag = MPHY_PGSR0_IDONE;
  271. int timeout = 50000; /* 50 msec is long enough */
  272. unsigned long start = 0;
  273. #ifdef DEBUG
  274. start = get_timer(0);
  275. #endif
  276. for (s = seq; s->description; s++) {
  277. init_flag |= s->init_flag;
  278. done_flag |= s->done_flag;
  279. }
  280. writel(init_flag, phy_base + MPHY_PIR);
  281. do {
  282. if (--timeout < 0) {
  283. pr_err("%s: error: timeout during DDR training\n",
  284. __func__);
  285. return -ETIMEDOUT;
  286. }
  287. udelay(1);
  288. pgsr0 = readl(phy_base + MPHY_PGSR0);
  289. } while ((pgsr0 & done_flag) != done_flag);
  290. for (s = seq; s->description; s++) {
  291. if (pgsr0 & s->err_flag) {
  292. pr_err("%s: error: %s failed\n", __func__,
  293. s->description);
  294. return -EIO;
  295. }
  296. }
  297. pr_debug("DDRPHY training: elapsed time %ld msec\n", get_timer(start));
  298. return 0;
  299. }
  300. static int ddrphy_impedance_calibration(void __iomem *phy_base)
  301. {
  302. int ret;
  303. u32 tmp;
  304. ret = __ddrphy_training(phy_base, impedance_calibration_sequence);
  305. if (ret)
  306. return ret;
  307. /*
  308. * Because of a hardware bug, IDONE flag is set when the first ZQ block
  309. * is calibrated. The flag does not guarantee the completion for all
  310. * the ZQ blocks. Wait a little more just in case.
  311. */
  312. udelay(1);
  313. /* reflect ZQ settings and enable average algorithm*/
  314. tmp = readl(phy_base + MPHY_ZQCR);
  315. tmp |= MPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
  316. writel(tmp, phy_base + MPHY_ZQCR);
  317. tmp &= ~MPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
  318. tmp |= MPHY_ZQCR_AVGEN;
  319. writel(tmp, phy_base + MPHY_ZQCR);
  320. return 0;
  321. }
  322. static int ddrphy_dram_init(void __iomem *phy_base)
  323. {
  324. return __ddrphy_training(phy_base, dram_init_sequence);
  325. }
  326. static int ddrphy_training(void __iomem *phy_base)
  327. {
  328. return __ddrphy_training(phy_base, training_sequence);
  329. }
  330. /* UMC */
  331. static u32 umc_cmdctla[DRAM_FREQ_NR] = {0x66DD131D, 0x77EE1722};
  332. /*
  333. * The ch2 is a different generation UMC core.
  334. * The register spec is different, unfortunately.
  335. */
  336. static u32 umc_cmdctlb_ch01[DRAM_FREQ_NR] = {0x13E87C44, 0x18F88C44};
  337. static u32 umc_cmdctlb_ch2[DRAM_FREQ_NR] = {0x19E8DC44, 0x1EF8EC44};
  338. static u32 umc_spcctla[DRAM_FREQ_NR][DRAM_SZ_NR] = {
  339. {0x004A071D, 0x0078071D},
  340. {0x0055081E, 0x0089081E},
  341. };
  342. static u32 umc_spcctlb[] = {0x00FF000A, 0x00FF000B};
  343. /* The ch2 is different for some reason only hardware guys know... */
  344. static u32 umc_flowctla_ch01[] = {0x0800001E, 0x08000022};
  345. static u32 umc_flowctla_ch2[] = {0x0800001E, 0x0800001E};
  346. static void umc_set_system_latency(void __iomem *dc_base, int phy_latency)
  347. {
  348. u32 val;
  349. int latency;
  350. val = readl(dc_base + UMC_RDATACTL_D0);
  351. latency = (val & UMC_RDATACTL_RADLTY_MASK) >> UMC_RDATACTL_RADLTY_SHIFT;
  352. latency += (val & UMC_RDATACTL_RAD2LTY_MASK) >>
  353. UMC_RDATACTL_RAD2LTY_SHIFT;
  354. /*
  355. * UMC works at the half clock rate of the PHY.
  356. * The LSB of latency is ignored
  357. */
  358. latency += phy_latency & ~1;
  359. val &= ~(UMC_RDATACTL_RADLTY_MASK | UMC_RDATACTL_RAD2LTY_MASK);
  360. if (latency > 0xf) {
  361. val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
  362. val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
  363. } else {
  364. val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
  365. }
  366. writel(val, dc_base + UMC_RDATACTL_D0);
  367. writel(val, dc_base + UMC_RDATACTL_D1);
  368. readl(dc_base + UMC_RDATACTL_D1); /* relax */
  369. }
  370. /* enable/disable auto refresh */
  371. static void umc_refresh_ctrl(void __iomem *dc_base, int enable)
  372. {
  373. u32 tmp;
  374. tmp = readl(dc_base + UMC_SPCSETB);
  375. tmp &= ~UMC_SPCSETB_AREFMD_MASK;
  376. if (enable)
  377. tmp |= UMC_SPCSETB_AREFMD_ARB;
  378. else
  379. tmp |= UMC_SPCSETB_AREFMD_REG;
  380. writel(tmp, dc_base + UMC_SPCSETB);
  381. udelay(1);
  382. }
  383. static void umc_ud_init(void __iomem *umc_base, int ch)
  384. {
  385. writel(0x00000003, umc_base + UMC_BITPERPIXELMODE_D0);
  386. if (ch == 2)
  387. writel(0x00000033, umc_base + UMC_PAIR1DOFF_D0);
  388. }
  389. static int umc_dc_init(void __iomem *dc_base, enum dram_freq freq,
  390. unsigned long size, int width, int ch)
  391. {
  392. enum dram_size size_e;
  393. int latency;
  394. u32 val;
  395. switch (size) {
  396. case 0:
  397. return 0;
  398. case SZ_256M:
  399. size_e = DRAM_SZ_256M;
  400. break;
  401. case SZ_512M:
  402. size_e = DRAM_SZ_512M;
  403. break;
  404. default:
  405. pr_err("unsupported DRAM size 0x%08lx (per 16bit) for ch%d\n",
  406. size, ch);
  407. return -EINVAL;
  408. }
  409. writel(umc_cmdctla[freq], dc_base + UMC_CMDCTLA);
  410. writel(ch == 2 ? umc_cmdctlb_ch2[freq] : umc_cmdctlb_ch01[freq],
  411. dc_base + UMC_CMDCTLB);
  412. writel(umc_spcctla[freq][size_e], dc_base + UMC_SPCCTLA);
  413. writel(umc_spcctlb[freq], dc_base + UMC_SPCCTLB);
  414. val = 0x000e000e;
  415. latency = 12;
  416. /* ES2 inserted one more FF to the logic. */
  417. if (uniphier_get_soc_model() >= 2)
  418. latency += 2;
  419. if (latency > 0xf) {
  420. val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
  421. val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
  422. } else {
  423. val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
  424. }
  425. writel(val, dc_base + UMC_RDATACTL_D0);
  426. if (width >= 32)
  427. writel(val, dc_base + UMC_RDATACTL_D1);
  428. writel(0x04060A02, dc_base + UMC_WDATACTL_D0);
  429. if (width >= 32)
  430. writel(0x04060A02, dc_base + UMC_WDATACTL_D1);
  431. writel(0x04000000, dc_base + UMC_DATASET);
  432. writel(0x00400020, dc_base + UMC_DCCGCTL);
  433. writel(0x00000084, dc_base + UMC_FLOWCTLG);
  434. writel(0x00000000, dc_base + UMC_ACSSETA);
  435. writel(ch == 2 ? umc_flowctla_ch2[freq] : umc_flowctla_ch01[freq],
  436. dc_base + UMC_FLOWCTLA);
  437. writel(0x00004400, dc_base + UMC_FLOWCTLC);
  438. writel(0x200A0A00, dc_base + UMC_SPCSETB);
  439. writel(0x00000520, dc_base + UMC_DFICUPDCTLA);
  440. writel(0x0000000D, dc_base + UMC_RESPCTL);
  441. if (ch != 2) {
  442. writel(0x00202000, dc_base + UMC_FLOWCTLB);
  443. writel(0xFDBFFFFF, dc_base + UMC_FLOWCTLOB0);
  444. writel(0xFFFFFFFF, dc_base + UMC_FLOWCTLOB1);
  445. writel(0x00080700, dc_base + UMC_BSICMAPSET);
  446. } else {
  447. writel(0x00200000, dc_base + UMC_FLOWCTLB);
  448. writel(0x00000000, dc_base + UMC_BSICMAPSET);
  449. }
  450. writel(0x00000000, dc_base + UMC_ERRMASKA);
  451. writel(0x00000000, dc_base + UMC_ERRMASKB);
  452. return 0;
  453. }
  454. static int umc_ch_init(void __iomem *umc_ch_base, enum dram_freq freq,
  455. unsigned long size, unsigned int width, int ch)
  456. {
  457. void __iomem *dc_base = umc_ch_base + 0x00011000;
  458. void __iomem *phy_base = umc_ch_base + 0x00030000;
  459. int ret;
  460. writel(0x00000002, dc_base + UMC_INITSET);
  461. while (readl(dc_base + UMC_INITSTAT) & BIT(2))
  462. cpu_relax();
  463. /* deassert PHY reset signals */
  464. writel(UMC_DIOCTLA_CTL_NRST | UMC_DIOCTLA_CFG_NRST,
  465. dc_base + UMC_DIOCTLA);
  466. ddrphy_init(phy_base, freq, width, ch);
  467. ret = ddrphy_impedance_calibration(phy_base);
  468. if (ret)
  469. return ret;
  470. ddrphy_dram_init(phy_base);
  471. if (ret)
  472. return ret;
  473. ret = umc_dc_init(dc_base, freq, size, width, ch);
  474. if (ret)
  475. return ret;
  476. umc_ud_init(umc_ch_base, ch);
  477. ret = ddrphy_training(phy_base);
  478. if (ret)
  479. return ret;
  480. udelay(1);
  481. /* match the system latency between UMC and PHY */
  482. umc_set_system_latency(dc_base,
  483. ddrphy_get_system_latency(phy_base, width));
  484. udelay(1);
  485. /* stop auto refresh before clearing FIFO in PHY */
  486. umc_refresh_ctrl(dc_base, 0);
  487. ddrphy_fifo_reset(phy_base);
  488. umc_refresh_ctrl(dc_base, 1);
  489. udelay(10);
  490. return 0;
  491. }
  492. static void um_init(void __iomem *um_base)
  493. {
  494. writel(0x000000ff, um_base + UMC_MBUS0);
  495. writel(0x000000ff, um_base + UMC_MBUS1);
  496. writel(0x000000ff, um_base + UMC_MBUS2);
  497. writel(0x000000ff, um_base + UMC_MBUS3);
  498. }
  499. int uniphier_pxs2_umc_init(const struct uniphier_board_data *bd)
  500. {
  501. void __iomem *um_base = (void __iomem *)0x5b600000;
  502. void __iomem *umc_ch_base = (void __iomem *)0x5b800000;
  503. enum dram_freq freq;
  504. int ch, ret;
  505. switch (bd->dram_freq) {
  506. case 1866:
  507. freq = DRAM_FREQ_1866M;
  508. break;
  509. case 2133:
  510. freq = DRAM_FREQ_2133M;
  511. break;
  512. default:
  513. pr_err("unsupported DRAM frequency %d MHz\n", bd->dram_freq);
  514. return -EINVAL;
  515. }
  516. for (ch = 0; ch < DRAM_CH_NR; ch++) {
  517. unsigned long size = bd->dram_ch[ch].size;
  518. unsigned int width = bd->dram_ch[ch].width;
  519. if (size) {
  520. ret = umc_ch_init(umc_ch_base, freq,
  521. size / (width / 16), width, ch);
  522. if (ret) {
  523. pr_err("failed to initialize UMC ch%d\n", ch);
  524. return ret;
  525. }
  526. }
  527. umc_ch_base += 0x00200000;
  528. }
  529. um_init(um_base);
  530. return 0;
  531. }