pl35x-nand-controller.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ARM PL35X NAND flash controller driver
  4. *
  5. * Copyright (C) 2017 Xilinx, Inc
  6. * Author:
  7. * Miquel Raynal <miquel.raynal@bootlin.com>
  8. * Original work (rewritten):
  9. * Punnaiah Choudary Kalluri <punnaia@xilinx.com>
  10. * Naga Sureshkumar Relli <nagasure@xilinx.com>
  11. */
  12. #include <linux/amba/bus.h>
  13. #include <linux/err.h>
  14. #include <linux/delay.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/io.h>
  17. #include <linux/ioport.h>
  18. #include <linux/iopoll.h>
  19. #include <linux/irq.h>
  20. #include <linux/module.h>
  21. #include <linux/moduleparam.h>
  22. #include <linux/mtd/mtd.h>
  23. #include <linux/mtd/rawnand.h>
  24. #include <linux/mtd/partitions.h>
  25. #include <linux/of.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/slab.h>
  28. #include <linux/clk.h>
  29. #define PL35X_NANDC_DRIVER_NAME "pl35x-nand-controller"
  30. /* SMC controller status register (RO) */
  31. #define PL35X_SMC_MEMC_STATUS 0x0
  32. #define PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1 BIT(6)
  33. /* SMC clear config register (WO) */
  34. #define PL35X_SMC_MEMC_CFG_CLR 0xC
  35. #define PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1 BIT(1)
  36. #define PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 BIT(4)
  37. #define PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 BIT(6)
  38. /* SMC direct command register (WO) */
  39. #define PL35X_SMC_DIRECT_CMD 0x10
  40. #define PL35X_SMC_DIRECT_CMD_NAND_CS (0x4 << 23)
  41. #define PL35X_SMC_DIRECT_CMD_UPD_REGS (0x2 << 21)
  42. /* SMC set cycles register (WO) */
  43. #define PL35X_SMC_CYCLES 0x14
  44. #define PL35X_SMC_NAND_TRC_CYCLES(x) ((x) << 0)
  45. #define PL35X_SMC_NAND_TWC_CYCLES(x) ((x) << 4)
  46. #define PL35X_SMC_NAND_TREA_CYCLES(x) ((x) << 8)
  47. #define PL35X_SMC_NAND_TWP_CYCLES(x) ((x) << 11)
  48. #define PL35X_SMC_NAND_TCLR_CYCLES(x) ((x) << 14)
  49. #define PL35X_SMC_NAND_TAR_CYCLES(x) ((x) << 17)
  50. #define PL35X_SMC_NAND_TRR_CYCLES(x) ((x) << 20)
  51. /* SMC set opmode register (WO) */
  52. #define PL35X_SMC_OPMODE 0x18
  53. #define PL35X_SMC_OPMODE_BW_8 0
  54. #define PL35X_SMC_OPMODE_BW_16 1
  55. /* SMC ECC status register (RO) */
  56. #define PL35X_SMC_ECC_STATUS 0x400
  57. #define PL35X_SMC_ECC_STATUS_ECC_BUSY BIT(6)
  58. /* SMC ECC configuration register */
  59. #define PL35X_SMC_ECC_CFG 0x404
  60. #define PL35X_SMC_ECC_CFG_MODE_MASK 0xC
  61. #define PL35X_SMC_ECC_CFG_MODE_BYPASS 0
  62. #define PL35X_SMC_ECC_CFG_MODE_APB BIT(2)
  63. #define PL35X_SMC_ECC_CFG_MODE_MEM BIT(3)
  64. #define PL35X_SMC_ECC_CFG_PGSIZE_MASK 0x3
  65. /* SMC ECC command 1 register */
  66. #define PL35X_SMC_ECC_CMD1 0x408
  67. #define PL35X_SMC_ECC_CMD1_WRITE(x) ((x) << 0)
  68. #define PL35X_SMC_ECC_CMD1_READ(x) ((x) << 8)
  69. #define PL35X_SMC_ECC_CMD1_READ_END(x) ((x) << 16)
  70. #define PL35X_SMC_ECC_CMD1_READ_END_VALID(x) ((x) << 24)
  71. /* SMC ECC command 2 register */
  72. #define PL35X_SMC_ECC_CMD2 0x40C
  73. #define PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(x) ((x) << 0)
  74. #define PL35X_SMC_ECC_CMD2_READ_COL_CHG(x) ((x) << 8)
  75. #define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(x) ((x) << 16)
  76. #define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(x) ((x) << 24)
  77. /* SMC ECC value registers (RO) */
  78. #define PL35X_SMC_ECC_VALUE(x) (0x418 + (4 * (x)))
  79. #define PL35X_SMC_ECC_VALUE_IS_CORRECTABLE(x) ((x) & BIT(27))
  80. #define PL35X_SMC_ECC_VALUE_HAS_FAILED(x) ((x) & BIT(28))
  81. #define PL35X_SMC_ECC_VALUE_IS_VALID(x) ((x) & BIT(30))
  82. /* NAND AXI interface */
  83. #define PL35X_SMC_CMD_PHASE 0
  84. #define PL35X_SMC_CMD_PHASE_CMD0(x) ((x) << 3)
  85. #define PL35X_SMC_CMD_PHASE_CMD1(x) ((x) << 11)
  86. #define PL35X_SMC_CMD_PHASE_CMD1_VALID BIT(20)
  87. #define PL35X_SMC_CMD_PHASE_ADDR(pos, x) ((x) << (8 * (pos)))
  88. #define PL35X_SMC_CMD_PHASE_NADDRS(x) ((x) << 21)
  89. #define PL35X_SMC_DATA_PHASE BIT(19)
  90. #define PL35X_SMC_DATA_PHASE_ECC_LAST BIT(10)
  91. #define PL35X_SMC_DATA_PHASE_CLEAR_CS BIT(21)
  92. #define PL35X_NAND_MAX_CS 1
  93. #define PL35X_NAND_LAST_XFER_SZ 4
  94. #define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP((ps) / 1000, period_ns))
  95. #define PL35X_NAND_ECC_BITS_MASK 0xFFF
  96. #define PL35X_NAND_ECC_BYTE_OFF_MASK 0x1FF
  97. #define PL35X_NAND_ECC_BIT_OFF_MASK 0x7
  98. struct pl35x_nand_timings {
  99. unsigned int t_rc:4;
  100. unsigned int t_wc:4;
  101. unsigned int t_rea:3;
  102. unsigned int t_wp:3;
  103. unsigned int t_clr:3;
  104. unsigned int t_ar:3;
  105. unsigned int t_rr:4;
  106. unsigned int rsvd:8;
  107. };
  108. struct pl35x_nand {
  109. struct list_head node;
  110. struct nand_chip chip;
  111. unsigned int cs;
  112. unsigned int addr_cycles;
  113. u32 ecc_cfg;
  114. u32 timings;
  115. };
  116. /**
  117. * struct pl35x_nandc - NAND flash controller driver structure
  118. * @dev: Kernel device
  119. * @conf_regs: SMC configuration registers for command phase
  120. * @io_regs: NAND data registers for data phase
  121. * @controller: Core NAND controller structure
  122. * @chips: List of connected NAND chips
  123. * @selected_chip: NAND chip currently selected by the controller
  124. * @assigned_cs: List of assigned CS
  125. * @ecc_buf: Temporary buffer to extract ECC bytes
  126. */
  127. struct pl35x_nandc {
  128. struct device *dev;
  129. void __iomem *conf_regs;
  130. void __iomem *io_regs;
  131. struct nand_controller controller;
  132. struct list_head chips;
  133. struct nand_chip *selected_chip;
  134. unsigned long assigned_cs;
  135. u8 *ecc_buf;
  136. };
  137. static inline struct pl35x_nandc *to_pl35x_nandc(struct nand_controller *ctrl)
  138. {
  139. return container_of(ctrl, struct pl35x_nandc, controller);
  140. }
  141. static inline struct pl35x_nand *to_pl35x_nand(struct nand_chip *chip)
  142. {
  143. return container_of(chip, struct pl35x_nand, chip);
  144. }
  145. static int pl35x_ecc_ooblayout16_ecc(struct mtd_info *mtd, int section,
  146. struct mtd_oob_region *oobregion)
  147. {
  148. struct nand_chip *chip = mtd_to_nand(mtd);
  149. if (section >= chip->ecc.steps)
  150. return -ERANGE;
  151. oobregion->offset = (section * chip->ecc.bytes);
  152. oobregion->length = chip->ecc.bytes;
  153. return 0;
  154. }
  155. static int pl35x_ecc_ooblayout16_free(struct mtd_info *mtd, int section,
  156. struct mtd_oob_region *oobregion)
  157. {
  158. struct nand_chip *chip = mtd_to_nand(mtd);
  159. if (section >= chip->ecc.steps)
  160. return -ERANGE;
  161. oobregion->offset = (section * chip->ecc.bytes) + 8;
  162. oobregion->length = 8;
  163. return 0;
  164. }
  165. static const struct mtd_ooblayout_ops pl35x_ecc_ooblayout16_ops = {
  166. .ecc = pl35x_ecc_ooblayout16_ecc,
  167. .free = pl35x_ecc_ooblayout16_free,
  168. };
  169. /* Generic flash bbt decriptors */
  170. static u8 bbt_pattern[] = { 'B', 'b', 't', '0' };
  171. static u8 mirror_pattern[] = { '1', 't', 'b', 'B' };
  172. static struct nand_bbt_descr bbt_main_descr = {
  173. .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
  174. | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
  175. .offs = 4,
  176. .len = 4,
  177. .veroffs = 20,
  178. .maxblocks = 4,
  179. .pattern = bbt_pattern
  180. };
  181. static struct nand_bbt_descr bbt_mirror_descr = {
  182. .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
  183. | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
  184. .offs = 4,
  185. .len = 4,
  186. .veroffs = 20,
  187. .maxblocks = 4,
  188. .pattern = mirror_pattern
  189. };
  190. static void pl35x_smc_update_regs(struct pl35x_nandc *nfc)
  191. {
  192. writel(PL35X_SMC_DIRECT_CMD_NAND_CS |
  193. PL35X_SMC_DIRECT_CMD_UPD_REGS,
  194. nfc->conf_regs + PL35X_SMC_DIRECT_CMD);
  195. }
  196. static int pl35x_smc_set_buswidth(struct pl35x_nandc *nfc, unsigned int bw)
  197. {
  198. if (bw != PL35X_SMC_OPMODE_BW_8 && bw != PL35X_SMC_OPMODE_BW_16)
  199. return -EINVAL;
  200. writel(bw, nfc->conf_regs + PL35X_SMC_OPMODE);
  201. pl35x_smc_update_regs(nfc);
  202. return 0;
  203. }
  204. static void pl35x_smc_clear_irq(struct pl35x_nandc *nfc)
  205. {
  206. writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1,
  207. nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR);
  208. }
  209. static int pl35x_smc_wait_for_irq(struct pl35x_nandc *nfc)
  210. {
  211. u32 reg;
  212. int ret;
  213. ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_MEMC_STATUS, reg,
  214. reg & PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1,
  215. 10, 1000000);
  216. if (ret)
  217. dev_err(nfc->dev,
  218. "Timeout polling on NAND controller interrupt (0x%x)\n",
  219. reg);
  220. pl35x_smc_clear_irq(nfc);
  221. return ret;
  222. }
  223. static int pl35x_smc_wait_for_ecc_done(struct pl35x_nandc *nfc)
  224. {
  225. u32 reg;
  226. int ret;
  227. ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_ECC_STATUS, reg,
  228. !(reg & PL35X_SMC_ECC_STATUS_ECC_BUSY),
  229. 10, 1000000);
  230. if (ret)
  231. dev_err(nfc->dev,
  232. "Timeout polling on ECC controller interrupt\n");
  233. return ret;
  234. }
  235. static int pl35x_smc_set_ecc_mode(struct pl35x_nandc *nfc,
  236. struct nand_chip *chip,
  237. unsigned int mode)
  238. {
  239. struct pl35x_nand *plnand;
  240. u32 ecc_cfg;
  241. ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG);
  242. ecc_cfg &= ~PL35X_SMC_ECC_CFG_MODE_MASK;
  243. ecc_cfg |= mode;
  244. writel(ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
  245. if (chip) {
  246. plnand = to_pl35x_nand(chip);
  247. plnand->ecc_cfg = ecc_cfg;
  248. }
  249. if (mode != PL35X_SMC_ECC_CFG_MODE_BYPASS)
  250. return pl35x_smc_wait_for_ecc_done(nfc);
  251. return 0;
  252. }
  253. static void pl35x_smc_force_byte_access(struct nand_chip *chip,
  254. bool force_8bit)
  255. {
  256. struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
  257. int ret;
  258. if (!(chip->options & NAND_BUSWIDTH_16))
  259. return;
  260. if (force_8bit)
  261. ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8);
  262. else
  263. ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_16);
  264. if (ret)
  265. dev_err(nfc->dev, "Error in Buswidth\n");
  266. }
  267. static void pl35x_nand_select_target(struct nand_chip *chip,
  268. unsigned int die_nr)
  269. {
  270. struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
  271. struct pl35x_nand *plnand = to_pl35x_nand(chip);
  272. if (chip == nfc->selected_chip)
  273. return;
  274. /* Setup the timings */
  275. writel(plnand->timings, nfc->conf_regs + PL35X_SMC_CYCLES);
  276. pl35x_smc_update_regs(nfc);
  277. /* Configure the ECC engine */
  278. writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
  279. nfc->selected_chip = chip;
  280. }
  281. static void pl35x_nand_read_data_op(struct nand_chip *chip, u8 *in,
  282. unsigned int len, bool force_8bit,
  283. unsigned int flags, unsigned int last_flags)
  284. {
  285. struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
  286. unsigned int buf_end = len / 4;
  287. unsigned int in_start = round_down(len, 4);
  288. unsigned int data_phase_addr;
  289. u32 *buf32 = (u32 *)in;
  290. u8 *buf8 = (u8 *)in;
  291. int i;
  292. if (force_8bit)
  293. pl35x_smc_force_byte_access(chip, true);
  294. for (i = 0; i < buf_end; i++) {
  295. data_phase_addr = PL35X_SMC_DATA_PHASE + flags;
  296. if (i + 1 == buf_end)
  297. data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags;
  298. buf32[i] = readl(nfc->io_regs + data_phase_addr);
  299. }
  300. /* No working extra flags on unaligned data accesses */
  301. for (i = in_start; i < len; i++)
  302. buf8[i] = readb(nfc->io_regs + PL35X_SMC_DATA_PHASE);
  303. if (force_8bit)
  304. pl35x_smc_force_byte_access(chip, false);
  305. }
  306. static void pl35x_nand_write_data_op(struct nand_chip *chip, const u8 *out,
  307. int len, bool force_8bit,
  308. unsigned int flags,
  309. unsigned int last_flags)
  310. {
  311. struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
  312. unsigned int buf_end = len / 4;
  313. unsigned int in_start = round_down(len, 4);
  314. const u32 *buf32 = (const u32 *)out;
  315. const u8 *buf8 = (const u8 *)out;
  316. unsigned int data_phase_addr;
  317. int i;
  318. if (force_8bit)
  319. pl35x_smc_force_byte_access(chip, true);
  320. for (i = 0; i < buf_end; i++) {
  321. data_phase_addr = PL35X_SMC_DATA_PHASE + flags;
  322. if (i + 1 == buf_end)
  323. data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags;
  324. writel(buf32[i], nfc->io_regs + data_phase_addr);
  325. }
  326. /* No working extra flags on unaligned data accesses */
  327. for (i = in_start; i < len; i++)
  328. writeb(buf8[i], nfc->io_regs + PL35X_SMC_DATA_PHASE);
  329. if (force_8bit)
  330. pl35x_smc_force_byte_access(chip, false);
  331. }
  332. static int pl35x_nand_correct_data(struct pl35x_nandc *nfc, unsigned char *buf,
  333. unsigned char *read_ecc,
  334. unsigned char *calc_ecc)
  335. {
  336. unsigned short ecc_odd, ecc_even, read_ecc_lower, read_ecc_upper;
  337. unsigned short calc_ecc_lower, calc_ecc_upper;
  338. unsigned short byte_addr, bit_addr;
  339. read_ecc_lower = (read_ecc[0] | (read_ecc[1] << 8)) &
  340. PL35X_NAND_ECC_BITS_MASK;
  341. read_ecc_upper = ((read_ecc[1] >> 4) | (read_ecc[2] << 4)) &
  342. PL35X_NAND_ECC_BITS_MASK;
  343. calc_ecc_lower = (calc_ecc[0] | (calc_ecc[1] << 8)) &
  344. PL35X_NAND_ECC_BITS_MASK;
  345. calc_ecc_upper = ((calc_ecc[1] >> 4) | (calc_ecc[2] << 4)) &
  346. PL35X_NAND_ECC_BITS_MASK;
  347. ecc_odd = read_ecc_lower ^ calc_ecc_lower;
  348. ecc_even = read_ecc_upper ^ calc_ecc_upper;
  349. /* No error */
  350. if (likely(!ecc_odd && !ecc_even))
  351. return 0;
  352. /* One error in the main data; to be corrected */
  353. if (ecc_odd == (~ecc_even & PL35X_NAND_ECC_BITS_MASK)) {
  354. /* Bits [11:3] of error code give the byte offset */
  355. byte_addr = (ecc_odd >> 3) & PL35X_NAND_ECC_BYTE_OFF_MASK;
  356. /* Bits [2:0] of error code give the bit offset */
  357. bit_addr = ecc_odd & PL35X_NAND_ECC_BIT_OFF_MASK;
  358. /* Toggle the faulty bit */
  359. buf[byte_addr] ^= (BIT(bit_addr));
  360. return 1;
  361. }
  362. /* One error in the ECC data; no action needed */
  363. if (hweight32(ecc_odd | ecc_even) == 1)
  364. return 1;
  365. return -EBADMSG;
  366. }
  367. static void pl35x_nand_ecc_reg_to_array(struct nand_chip *chip, u32 ecc_reg,
  368. u8 *ecc_array)
  369. {
  370. u32 ecc_value = ~ecc_reg;
  371. unsigned int ecc_byte;
  372. for (ecc_byte = 0; ecc_byte < chip->ecc.bytes; ecc_byte++)
  373. ecc_array[ecc_byte] = ecc_value >> (8 * ecc_byte);
  374. }
  375. static int pl35x_nand_read_eccbytes(struct pl35x_nandc *nfc,
  376. struct nand_chip *chip, u8 *read_ecc)
  377. {
  378. u32 ecc_value;
  379. int chunk;
  380. for (chunk = 0; chunk < chip->ecc.steps;
  381. chunk++, read_ecc += chip->ecc.bytes) {
  382. ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk));
  383. if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value))
  384. return -EINVAL;
  385. pl35x_nand_ecc_reg_to_array(chip, ecc_value, read_ecc);
  386. }
  387. return 0;
  388. }
  389. static int pl35x_nand_recover_data_hwecc(struct pl35x_nandc *nfc,
  390. struct nand_chip *chip, u8 *data,
  391. u8 *read_ecc)
  392. {
  393. struct mtd_info *mtd = nand_to_mtd(chip);
  394. unsigned int max_bitflips = 0, chunk;
  395. u8 calc_ecc[3];
  396. u32 ecc_value;
  397. int stats;
  398. for (chunk = 0; chunk < chip->ecc.steps;
  399. chunk++, data += chip->ecc.size, read_ecc += chip->ecc.bytes) {
  400. /* Read ECC value for each chunk */
  401. ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk));
  402. if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value))
  403. return -EINVAL;
  404. if (PL35X_SMC_ECC_VALUE_HAS_FAILED(ecc_value)) {
  405. mtd->ecc_stats.failed++;
  406. continue;
  407. }
  408. pl35x_nand_ecc_reg_to_array(chip, ecc_value, calc_ecc);
  409. stats = pl35x_nand_correct_data(nfc, data, read_ecc, calc_ecc);
  410. if (stats < 0) {
  411. mtd->ecc_stats.failed++;
  412. } else {
  413. mtd->ecc_stats.corrected += stats;
  414. max_bitflips = max_t(unsigned int, max_bitflips, stats);
  415. }
  416. }
  417. return max_bitflips;
  418. }
  419. static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
  420. const u8 *buf, int oob_required,
  421. int page)
  422. {
  423. struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
  424. struct pl35x_nand *plnand = to_pl35x_nand(chip);
  425. struct mtd_info *mtd = nand_to_mtd(chip);
  426. unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2;
  427. unsigned int nrows = plnand->addr_cycles;
  428. u32 addr1 = 0, addr2 = 0, row;
  429. u32 cmd_addr;
  430. int i, ret;
  431. u8 status;
  432. ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
  433. if (ret)
  434. return ret;
  435. cmd_addr = PL35X_SMC_CMD_PHASE |
  436. PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) |
  437. PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_SEQIN);
  438. for (i = 0, row = first_row; row < nrows; i++, row++) {
  439. u8 addr = page >> ((i * 8) & 0xFF);
  440. if (row < 4)
  441. addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr);
  442. else
  443. addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr);
  444. }
  445. /* Send the command and address cycles */
  446. writel(addr1, nfc->io_regs + cmd_addr);
  447. if (plnand->addr_cycles > 4)
  448. writel(addr2, nfc->io_regs + cmd_addr);
  449. /* Write the data with the engine enabled */
  450. pl35x_nand_write_data_op(chip, buf, mtd->writesize, false,
  451. 0, PL35X_SMC_DATA_PHASE_ECC_LAST);
  452. ret = pl35x_smc_wait_for_ecc_done(nfc);
  453. if (ret)
  454. goto disable_ecc_engine;
  455. /* Copy the HW calculated ECC bytes in the OOB buffer */
  456. ret = pl35x_nand_read_eccbytes(nfc, chip, nfc->ecc_buf);
  457. if (ret)
  458. goto disable_ecc_engine;
  459. if (!oob_required)
  460. memset(chip->oob_poi, 0xFF, mtd->oobsize);
  461. ret = mtd_ooblayout_set_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi,
  462. 0, chip->ecc.total);
  463. if (ret)
  464. goto disable_ecc_engine;
  465. /* Write the spare area with ECC bytes */
  466. pl35x_nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false, 0,
  467. PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_PAGEPROG) |
  468. PL35X_SMC_CMD_PHASE_CMD1_VALID |
  469. PL35X_SMC_DATA_PHASE_CLEAR_CS);
  470. ret = pl35x_smc_wait_for_irq(nfc);
  471. if (ret)
  472. goto disable_ecc_engine;
  473. /* Check write status on the chip side */
  474. ret = nand_status_op(chip, &status);
  475. if (ret)
  476. goto disable_ecc_engine;
  477. if (status & NAND_STATUS_FAIL)
  478. ret = -EIO;
  479. disable_ecc_engine:
  480. pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
  481. return ret;
  482. }
  483. /*
  484. * This functions reads data and checks the data integrity by comparing hardware
  485. * generated ECC values and read ECC values from spare area.
  486. *
  487. * There is a limitation with SMC controller: ECC_LAST must be set on the
  488. * last data access to tell the ECC engine not to expect any further data.
  489. * In practice, this implies to shrink the last data transfert by eg. 4 bytes,
  490. * and doing a last 4-byte transfer with the additional bit set. The last block
  491. * should be aligned with the end of an ECC block. Because of this limitation,
  492. * it is not possible to use the core routines.
  493. */
  494. static int pl35x_nand_read_page_hwecc(struct nand_chip *chip,
  495. u8 *buf, int oob_required, int page)
  496. {
  497. const struct nand_sdr_timings *sdr =
  498. nand_get_sdr_timings(nand_get_interface_config(chip));
  499. struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
  500. struct pl35x_nand *plnand = to_pl35x_nand(chip);
  501. struct mtd_info *mtd = nand_to_mtd(chip);
  502. unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2;
  503. unsigned int nrows = plnand->addr_cycles;
  504. unsigned int addr1 = 0, addr2 = 0, row;
  505. u32 cmd_addr;
  506. int i, ret;
  507. ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
  508. if (ret)
  509. return ret;
  510. cmd_addr = PL35X_SMC_CMD_PHASE |
  511. PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) |
  512. PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_READ0) |
  513. PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_READSTART) |
  514. PL35X_SMC_CMD_PHASE_CMD1_VALID;
  515. for (i = 0, row = first_row; row < nrows; i++, row++) {
  516. u8 addr = page >> ((i * 8) & 0xFF);
  517. if (row < 4)
  518. addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr);
  519. else
  520. addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr);
  521. }
  522. /* Send the command and address cycles */
  523. writel(addr1, nfc->io_regs + cmd_addr);
  524. if (plnand->addr_cycles > 4)
  525. writel(addr2, nfc->io_regs + cmd_addr);
  526. /* Wait the data to be available in the NAND cache */
  527. ndelay(PSEC_TO_NSEC(sdr->tRR_min));
  528. ret = pl35x_smc_wait_for_irq(nfc);
  529. if (ret)
  530. goto disable_ecc_engine;
  531. /* Retrieve the raw data with the engine enabled */
  532. pl35x_nand_read_data_op(chip, buf, mtd->writesize, false,
  533. 0, PL35X_SMC_DATA_PHASE_ECC_LAST);
  534. ret = pl35x_smc_wait_for_ecc_done(nfc);
  535. if (ret)
  536. goto disable_ecc_engine;
  537. /* Retrieve the stored ECC bytes */
  538. pl35x_nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
  539. 0, PL35X_SMC_DATA_PHASE_CLEAR_CS);
  540. ret = mtd_ooblayout_get_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi, 0,
  541. chip->ecc.total);
  542. if (ret)
  543. goto disable_ecc_engine;
  544. pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
  545. /* Correct the data and report failures */
  546. return pl35x_nand_recover_data_hwecc(nfc, chip, buf, nfc->ecc_buf);
  547. disable_ecc_engine:
  548. pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
  549. return ret;
  550. }
  551. static int pl35x_nand_exec_op(struct nand_chip *chip,
  552. const struct nand_subop *subop)
  553. {
  554. struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
  555. const struct nand_op_instr *instr, *data_instr = NULL;
  556. unsigned int rdy_tim_ms = 0, naddrs = 0, cmds = 0, last_flags = 0;
  557. u32 addr1 = 0, addr2 = 0, cmd0 = 0, cmd1 = 0, cmd_addr = 0;
  558. unsigned int op_id, len, offset, rdy_del_ns;
  559. int last_instr_type = -1;
  560. bool cmd1_valid = false;
  561. const u8 *addrs;
  562. int i, ret;
  563. for (op_id = 0; op_id < subop->ninstrs; op_id++) {
  564. instr = &subop->instrs[op_id];
  565. switch (instr->type) {
  566. case NAND_OP_CMD_INSTR:
  567. if (!cmds) {
  568. cmd0 = PL35X_SMC_CMD_PHASE_CMD0(instr->ctx.cmd.opcode);
  569. } else {
  570. cmd1 = PL35X_SMC_CMD_PHASE_CMD1(instr->ctx.cmd.opcode);
  571. if (last_instr_type != NAND_OP_DATA_OUT_INSTR)
  572. cmd1_valid = true;
  573. }
  574. cmds++;
  575. break;
  576. case NAND_OP_ADDR_INSTR:
  577. offset = nand_subop_get_addr_start_off(subop, op_id);
  578. naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
  579. addrs = &instr->ctx.addr.addrs[offset];
  580. cmd_addr |= PL35X_SMC_CMD_PHASE_NADDRS(naddrs);
  581. for (i = offset; i < naddrs; i++) {
  582. if (i < 4)
  583. addr1 |= PL35X_SMC_CMD_PHASE_ADDR(i, addrs[i]);
  584. else
  585. addr2 |= PL35X_SMC_CMD_PHASE_ADDR(i - 4, addrs[i]);
  586. }
  587. break;
  588. case NAND_OP_DATA_IN_INSTR:
  589. case NAND_OP_DATA_OUT_INSTR:
  590. data_instr = instr;
  591. len = nand_subop_get_data_len(subop, op_id);
  592. break;
  593. case NAND_OP_WAITRDY_INSTR:
  594. rdy_tim_ms = instr->ctx.waitrdy.timeout_ms;
  595. rdy_del_ns = instr->delay_ns;
  596. break;
  597. }
  598. last_instr_type = instr->type;
  599. }
  600. /* Command phase */
  601. cmd_addr |= PL35X_SMC_CMD_PHASE | cmd0 | cmd1 |
  602. (cmd1_valid ? PL35X_SMC_CMD_PHASE_CMD1_VALID : 0);
  603. writel(addr1, nfc->io_regs + cmd_addr);
  604. if (naddrs > 4)
  605. writel(addr2, nfc->io_regs + cmd_addr);
  606. /* Data phase */
  607. if (data_instr && data_instr->type == NAND_OP_DATA_OUT_INSTR) {
  608. last_flags = PL35X_SMC_DATA_PHASE_CLEAR_CS;
  609. if (cmds == 2)
  610. last_flags |= cmd1 | PL35X_SMC_CMD_PHASE_CMD1_VALID;
  611. pl35x_nand_write_data_op(chip, data_instr->ctx.data.buf.out,
  612. len, data_instr->ctx.data.force_8bit,
  613. 0, last_flags);
  614. }
  615. if (rdy_tim_ms) {
  616. ndelay(rdy_del_ns);
  617. ret = pl35x_smc_wait_for_irq(nfc);
  618. if (ret)
  619. return ret;
  620. }
  621. if (data_instr && data_instr->type == NAND_OP_DATA_IN_INSTR)
  622. pl35x_nand_read_data_op(chip, data_instr->ctx.data.buf.in,
  623. len, data_instr->ctx.data.force_8bit,
  624. 0, PL35X_SMC_DATA_PHASE_CLEAR_CS);
  625. return 0;
  626. }
  627. static const struct nand_op_parser pl35x_nandc_op_parser = NAND_OP_PARSER(
  628. NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
  629. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  630. NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
  631. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  632. NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
  633. NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 2112)),
  634. NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
  635. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  636. NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
  637. NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112),
  638. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  639. NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
  640. NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
  641. NAND_OP_PARSER_PAT_CMD_ELEM(false),
  642. NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
  643. NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112),
  644. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  645. NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
  646. );
  647. static int pl35x_nfc_exec_op(struct nand_chip *chip,
  648. const struct nand_operation *op,
  649. bool check_only)
  650. {
  651. if (!check_only)
  652. pl35x_nand_select_target(chip, op->cs);
  653. return nand_op_parser_exec_op(chip, &pl35x_nandc_op_parser,
  654. op, check_only);
  655. }
  656. static int pl35x_nfc_setup_interface(struct nand_chip *chip, int cs,
  657. const struct nand_interface_config *conf)
  658. {
  659. struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
  660. struct pl35x_nand *plnand = to_pl35x_nand(chip);
  661. struct pl35x_nand_timings tmgs = {};
  662. const struct nand_sdr_timings *sdr;
  663. unsigned int period_ns, val;
  664. struct clk *mclk;
  665. sdr = nand_get_sdr_timings(conf);
  666. if (IS_ERR(sdr))
  667. return PTR_ERR(sdr);
  668. mclk = of_clk_get_by_name(nfc->dev->parent->of_node, "memclk");
  669. if (IS_ERR(mclk)) {
  670. dev_err(nfc->dev, "Failed to retrieve SMC memclk\n");
  671. return PTR_ERR(mclk);
  672. }
  673. /*
  674. * SDR timings are given in pico-seconds while NFC timings must be
  675. * expressed in NAND controller clock cycles. We use the TO_CYCLE()
  676. * macro to convert from one to the other.
  677. */
  678. period_ns = NSEC_PER_SEC / clk_get_rate(mclk);
  679. /*
  680. * PL35X SMC needs one extra read cycle in SDR Mode 5. This is not
  681. * written anywhere in the datasheet but is an empirical observation.
  682. */
  683. val = TO_CYCLES(sdr->tRC_min, period_ns);
  684. if (sdr->tRC_min <= 20000)
  685. val++;
  686. tmgs.t_rc = val;
  687. if (tmgs.t_rc != val || tmgs.t_rc < 2)
  688. return -EINVAL;
  689. val = TO_CYCLES(sdr->tWC_min, period_ns);
  690. tmgs.t_wc = val;
  691. if (tmgs.t_wc != val || tmgs.t_wc < 2)
  692. return -EINVAL;
  693. /*
  694. * For all SDR modes, PL35X SMC needs tREA_max being 1,
  695. * this is also an empirical result.
  696. */
  697. tmgs.t_rea = 1;
  698. val = TO_CYCLES(sdr->tWP_min, period_ns);
  699. tmgs.t_wp = val;
  700. if (tmgs.t_wp != val || tmgs.t_wp < 1)
  701. return -EINVAL;
  702. val = TO_CYCLES(sdr->tCLR_min, period_ns);
  703. tmgs.t_clr = val;
  704. if (tmgs.t_clr != val)
  705. return -EINVAL;
  706. val = TO_CYCLES(sdr->tAR_min, period_ns);
  707. tmgs.t_ar = val;
  708. if (tmgs.t_ar != val)
  709. return -EINVAL;
  710. val = TO_CYCLES(sdr->tRR_min, period_ns);
  711. tmgs.t_rr = val;
  712. if (tmgs.t_rr != val)
  713. return -EINVAL;
  714. if (cs == NAND_DATA_IFACE_CHECK_ONLY)
  715. return 0;
  716. plnand->timings = PL35X_SMC_NAND_TRC_CYCLES(tmgs.t_rc) |
  717. PL35X_SMC_NAND_TWC_CYCLES(tmgs.t_wc) |
  718. PL35X_SMC_NAND_TREA_CYCLES(tmgs.t_rea) |
  719. PL35X_SMC_NAND_TWP_CYCLES(tmgs.t_wp) |
  720. PL35X_SMC_NAND_TCLR_CYCLES(tmgs.t_clr) |
  721. PL35X_SMC_NAND_TAR_CYCLES(tmgs.t_ar) |
  722. PL35X_SMC_NAND_TRR_CYCLES(tmgs.t_rr);
  723. return 0;
  724. }
  725. static void pl35x_smc_set_ecc_pg_size(struct pl35x_nandc *nfc,
  726. struct nand_chip *chip,
  727. unsigned int pg_sz)
  728. {
  729. struct pl35x_nand *plnand = to_pl35x_nand(chip);
  730. u32 sz;
  731. switch (pg_sz) {
  732. case SZ_512:
  733. sz = 1;
  734. break;
  735. case SZ_1K:
  736. sz = 2;
  737. break;
  738. case SZ_2K:
  739. sz = 3;
  740. break;
  741. default:
  742. sz = 0;
  743. break;
  744. }
  745. plnand->ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG);
  746. plnand->ecc_cfg &= ~PL35X_SMC_ECC_CFG_PGSIZE_MASK;
  747. plnand->ecc_cfg |= sz;
  748. writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
  749. }
  750. static int pl35x_nand_init_hw_ecc_controller(struct pl35x_nandc *nfc,
  751. struct nand_chip *chip)
  752. {
  753. struct mtd_info *mtd = nand_to_mtd(chip);
  754. int ret = 0;
  755. if (mtd->writesize < SZ_512 || mtd->writesize > SZ_2K) {
  756. dev_err(nfc->dev,
  757. "The hardware ECC engine is limited to pages up to 2kiB\n");
  758. return -EOPNOTSUPP;
  759. }
  760. chip->ecc.strength = 1;
  761. chip->ecc.bytes = 3;
  762. chip->ecc.size = SZ_512;
  763. chip->ecc.steps = mtd->writesize / chip->ecc.size;
  764. chip->ecc.read_page = pl35x_nand_read_page_hwecc;
  765. chip->ecc.write_page = pl35x_nand_write_page_hwecc;
  766. chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
  767. pl35x_smc_set_ecc_pg_size(nfc, chip, mtd->writesize);
  768. nfc->ecc_buf = devm_kmalloc(nfc->dev, chip->ecc.bytes * chip->ecc.steps,
  769. GFP_KERNEL);
  770. if (!nfc->ecc_buf)
  771. return -ENOMEM;
  772. switch (mtd->oobsize) {
  773. case 16:
  774. /* Legacy Xilinx layout */
  775. mtd_set_ooblayout(mtd, &pl35x_ecc_ooblayout16_ops);
  776. chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
  777. break;
  778. case 64:
  779. mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
  780. break;
  781. default:
  782. dev_err(nfc->dev, "Unsupported OOB size\n");
  783. return -EOPNOTSUPP;
  784. }
  785. return ret;
  786. }
  787. static int pl35x_nand_attach_chip(struct nand_chip *chip)
  788. {
  789. const struct nand_ecc_props *requirements =
  790. nanddev_get_ecc_requirements(&chip->base);
  791. struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
  792. struct pl35x_nand *plnand = to_pl35x_nand(chip);
  793. struct mtd_info *mtd = nand_to_mtd(chip);
  794. int ret;
  795. if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
  796. (!chip->ecc.size || !chip->ecc.strength)) {
  797. if (requirements->step_size && requirements->strength) {
  798. chip->ecc.size = requirements->step_size;
  799. chip->ecc.strength = requirements->strength;
  800. } else {
  801. dev_info(nfc->dev,
  802. "No minimum ECC strength, using 1b/512B\n");
  803. chip->ecc.size = 512;
  804. chip->ecc.strength = 1;
  805. }
  806. }
  807. if (mtd->writesize <= SZ_512)
  808. plnand->addr_cycles = 1;
  809. else
  810. plnand->addr_cycles = 2;
  811. if (chip->options & NAND_ROW_ADDR_3)
  812. plnand->addr_cycles += 3;
  813. else
  814. plnand->addr_cycles += 2;
  815. switch (chip->ecc.engine_type) {
  816. case NAND_ECC_ENGINE_TYPE_ON_DIE:
  817. /* Keep these legacy BBT descriptors for ON_DIE situations */
  818. chip->bbt_td = &bbt_main_descr;
  819. chip->bbt_md = &bbt_mirror_descr;
  820. fallthrough;
  821. case NAND_ECC_ENGINE_TYPE_NONE:
  822. case NAND_ECC_ENGINE_TYPE_SOFT:
  823. break;
  824. case NAND_ECC_ENGINE_TYPE_ON_HOST:
  825. ret = pl35x_nand_init_hw_ecc_controller(nfc, chip);
  826. if (ret)
  827. return ret;
  828. break;
  829. default:
  830. dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
  831. chip->ecc.engine_type);
  832. return -EINVAL;
  833. }
  834. return 0;
  835. }
  836. static const struct nand_controller_ops pl35x_nandc_ops = {
  837. .attach_chip = pl35x_nand_attach_chip,
  838. .exec_op = pl35x_nfc_exec_op,
  839. .setup_interface = pl35x_nfc_setup_interface,
  840. };
  841. static int pl35x_nand_reset_state(struct pl35x_nandc *nfc)
  842. {
  843. int ret;
  844. /* Disable interrupts and clear their status */
  845. writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 |
  846. PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 |
  847. PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1,
  848. nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR);
  849. /* Set default bus width to 8-bit */
  850. ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8);
  851. if (ret)
  852. return ret;
  853. /* Ensure the ECC controller is bypassed by default */
  854. ret = pl35x_smc_set_ecc_mode(nfc, NULL, PL35X_SMC_ECC_CFG_MODE_BYPASS);
  855. if (ret)
  856. return ret;
  857. /*
  858. * Configure the commands that the ECC block uses to detect the
  859. * operations it should start/end.
  860. */
  861. writel(PL35X_SMC_ECC_CMD1_WRITE(NAND_CMD_SEQIN) |
  862. PL35X_SMC_ECC_CMD1_READ(NAND_CMD_READ0) |
  863. PL35X_SMC_ECC_CMD1_READ_END(NAND_CMD_READSTART) |
  864. PL35X_SMC_ECC_CMD1_READ_END_VALID(NAND_CMD_READ1),
  865. nfc->conf_regs + PL35X_SMC_ECC_CMD1);
  866. writel(PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(NAND_CMD_RNDIN) |
  867. PL35X_SMC_ECC_CMD2_READ_COL_CHG(NAND_CMD_RNDOUT) |
  868. PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(NAND_CMD_RNDOUTSTART) |
  869. PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(NAND_CMD_READ1),
  870. nfc->conf_regs + PL35X_SMC_ECC_CMD2);
  871. return 0;
  872. }
  873. static int pl35x_nand_chip_init(struct pl35x_nandc *nfc,
  874. struct device_node *np)
  875. {
  876. struct pl35x_nand *plnand;
  877. struct nand_chip *chip;
  878. struct mtd_info *mtd;
  879. int cs, ret;
  880. plnand = devm_kzalloc(nfc->dev, sizeof(*plnand), GFP_KERNEL);
  881. if (!plnand)
  882. return -ENOMEM;
  883. ret = of_property_read_u32(np, "reg", &cs);
  884. if (ret)
  885. return ret;
  886. if (cs >= PL35X_NAND_MAX_CS) {
  887. dev_err(nfc->dev, "Wrong CS %d\n", cs);
  888. return -EINVAL;
  889. }
  890. if (test_and_set_bit(cs, &nfc->assigned_cs)) {
  891. dev_err(nfc->dev, "Already assigned CS %d\n", cs);
  892. return -EINVAL;
  893. }
  894. plnand->cs = cs;
  895. chip = &plnand->chip;
  896. chip->options = NAND_BUSWIDTH_AUTO | NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE;
  897. chip->bbt_options = NAND_BBT_USE_FLASH;
  898. chip->controller = &nfc->controller;
  899. mtd = nand_to_mtd(chip);
  900. mtd->dev.parent = nfc->dev;
  901. nand_set_flash_node(chip, np);
  902. if (!mtd->name) {
  903. mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
  904. "%s", PL35X_NANDC_DRIVER_NAME);
  905. if (!mtd->name) {
  906. dev_err(nfc->dev, "Failed to allocate mtd->name\n");
  907. return -ENOMEM;
  908. }
  909. }
  910. ret = nand_scan(chip, 1);
  911. if (ret)
  912. return ret;
  913. ret = mtd_device_register(mtd, NULL, 0);
  914. if (ret) {
  915. nand_cleanup(chip);
  916. return ret;
  917. }
  918. list_add_tail(&plnand->node, &nfc->chips);
  919. return ret;
  920. }
  921. static void pl35x_nand_chips_cleanup(struct pl35x_nandc *nfc)
  922. {
  923. struct pl35x_nand *plnand, *tmp;
  924. struct nand_chip *chip;
  925. int ret;
  926. list_for_each_entry_safe(plnand, tmp, &nfc->chips, node) {
  927. chip = &plnand->chip;
  928. ret = mtd_device_unregister(nand_to_mtd(chip));
  929. WARN_ON(ret);
  930. nand_cleanup(chip);
  931. list_del(&plnand->node);
  932. }
  933. }
  934. static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
  935. {
  936. struct device_node *np = nfc->dev->of_node;
  937. int nchips = of_get_child_count(np);
  938. int ret;
  939. if (!nchips || nchips > PL35X_NAND_MAX_CS) {
  940. dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
  941. nchips);
  942. return -EINVAL;
  943. }
  944. for_each_child_of_node_scoped(np, nand_np) {
  945. ret = pl35x_nand_chip_init(nfc, nand_np);
  946. if (ret) {
  947. pl35x_nand_chips_cleanup(nfc);
  948. break;
  949. }
  950. }
  951. return ret;
  952. }
  953. static int pl35x_nand_probe(struct platform_device *pdev)
  954. {
  955. struct device *smc_dev = pdev->dev.parent;
  956. struct amba_device *smc_amba = to_amba_device(smc_dev);
  957. struct pl35x_nandc *nfc;
  958. u32 ret;
  959. nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
  960. if (!nfc)
  961. return -ENOMEM;
  962. nfc->dev = &pdev->dev;
  963. nand_controller_init(&nfc->controller);
  964. nfc->controller.ops = &pl35x_nandc_ops;
  965. INIT_LIST_HEAD(&nfc->chips);
  966. nfc->conf_regs = devm_ioremap_resource(&smc_amba->dev, &smc_amba->res);
  967. if (IS_ERR(nfc->conf_regs))
  968. return PTR_ERR(nfc->conf_regs);
  969. nfc->io_regs = devm_platform_ioremap_resource(pdev, 0);
  970. if (IS_ERR(nfc->io_regs))
  971. return PTR_ERR(nfc->io_regs);
  972. ret = pl35x_nand_reset_state(nfc);
  973. if (ret)
  974. return ret;
  975. ret = pl35x_nand_chips_init(nfc);
  976. if (ret)
  977. return ret;
  978. platform_set_drvdata(pdev, nfc);
  979. return 0;
  980. }
  981. static void pl35x_nand_remove(struct platform_device *pdev)
  982. {
  983. struct pl35x_nandc *nfc = platform_get_drvdata(pdev);
  984. pl35x_nand_chips_cleanup(nfc);
  985. }
  986. static const struct of_device_id pl35x_nand_of_match[] = {
  987. { .compatible = "arm,pl353-nand-r2p1" },
  988. {},
  989. };
  990. MODULE_DEVICE_TABLE(of, pl35x_nand_of_match);
  991. static struct platform_driver pl35x_nandc_driver = {
  992. .probe = pl35x_nand_probe,
  993. .remove_new = pl35x_nand_remove,
  994. .driver = {
  995. .name = PL35X_NANDC_DRIVER_NAME,
  996. .of_match_table = pl35x_nand_of_match,
  997. },
  998. };
  999. module_platform_driver(pl35x_nandc_driver);
  1000. MODULE_AUTHOR("Xilinx, Inc.");
  1001. MODULE_ALIAS("platform:" PL35X_NANDC_DRIVER_NAME);
  1002. MODULE_DESCRIPTION("ARM PL35X NAND controller driver");
  1003. MODULE_LICENSE("GPL");