davinci_nand.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * davinci_nand.c - NAND Flash Driver for DaVinci family chips
  4. *
  5. * Copyright © 2006 Texas Instruments.
  6. *
  7. * Port to 2.6.23 Copyright © 2008 by:
  8. * Sander Huijsen <Shuijsen@optelecom-nkf.com>
  9. * Troy Kisky <troy.kisky@boundarydevices.com>
  10. * Dirk Behme <Dirk.Behme@gmail.com>
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/err.h>
  16. #include <linux/iopoll.h>
  17. #include <linux/mtd/rawnand.h>
  18. #include <linux/mtd/partitions.h>
  19. #include <linux/slab.h>
  20. #include <linux/of.h>
  21. #define NRCSR_OFFSET 0x00
  22. #define NANDFCR_OFFSET 0x60
  23. #define NANDFSR_OFFSET 0x64
  24. #define NANDF1ECC_OFFSET 0x70
  25. /* 4-bit ECC syndrome registers */
  26. #define NAND_4BIT_ECC_LOAD_OFFSET 0xbc
  27. #define NAND_4BIT_ECC1_OFFSET 0xc0
  28. #define NAND_4BIT_ECC2_OFFSET 0xc4
  29. #define NAND_4BIT_ECC3_OFFSET 0xc8
  30. #define NAND_4BIT_ECC4_OFFSET 0xcc
  31. #define NAND_ERR_ADD1_OFFSET 0xd0
  32. #define NAND_ERR_ADD2_OFFSET 0xd4
  33. #define NAND_ERR_ERRVAL1_OFFSET 0xd8
  34. #define NAND_ERR_ERRVAL2_OFFSET 0xdc
  35. /* NOTE: boards don't need to use these address bits
  36. * for ALE/CLE unless they support booting from NAND.
  37. * They're used unless platform data overrides them.
  38. */
  39. #define MASK_ALE 0x08
  40. #define MASK_CLE 0x10
  41. struct davinci_nand_pdata {
  42. uint32_t mask_ale;
  43. uint32_t mask_cle;
  44. /*
  45. * 0-indexed chip-select number of the asynchronous
  46. * interface to which the NAND device has been connected.
  47. *
  48. * So, if you have NAND connected to CS3 of DA850, you
  49. * will pass '1' here. Since the asynchronous interface
  50. * on DA850 starts from CS2.
  51. */
  52. uint32_t core_chipsel;
  53. /* for packages using two chipselects */
  54. uint32_t mask_chipsel;
  55. /* board's default static partition info */
  56. struct mtd_partition *parts;
  57. unsigned int nr_parts;
  58. /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
  59. * soft == NAND_ECC_ENGINE_TYPE_SOFT
  60. * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
  61. *
  62. * All DaVinci-family chips support 1-bit hardware ECC.
  63. * Newer ones also support 4-bit ECC, but are awkward
  64. * using it with large page chips.
  65. */
  66. enum nand_ecc_engine_type engine_type;
  67. enum nand_ecc_placement ecc_placement;
  68. u8 ecc_bits;
  69. /* e.g. NAND_BUSWIDTH_16 */
  70. unsigned int options;
  71. /* e.g. NAND_BBT_USE_FLASH */
  72. unsigned int bbt_options;
  73. /* Main and mirror bbt descriptor overrides */
  74. struct nand_bbt_descr *bbt_td;
  75. struct nand_bbt_descr *bbt_md;
  76. };
  77. /*
  78. * This is a device driver for the NAND flash controller found on the
  79. * various DaVinci family chips. It handles up to four SoC chipselects,
  80. * and some flavors of secondary chipselect (e.g. based on A12) as used
  81. * with multichip packages.
  82. *
  83. * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
  84. * available on chips like the DM355 and OMAP-L137 and needed with the
  85. * more error-prone MLC NAND chips.
  86. *
  87. * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
  88. * outputs in a "wire-AND" configuration, with no per-chip signals.
  89. */
  90. struct davinci_nand_info {
  91. struct nand_controller controller;
  92. struct nand_chip chip;
  93. struct platform_device *pdev;
  94. bool is_readmode;
  95. void __iomem *base;
  96. void __iomem *vaddr;
  97. void __iomem *current_cs;
  98. uint32_t mask_chipsel;
  99. uint32_t mask_ale;
  100. uint32_t mask_cle;
  101. uint32_t core_chipsel;
  102. };
  103. static DEFINE_SPINLOCK(davinci_nand_lock);
  104. static bool ecc4_busy;
  105. static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
  106. {
  107. return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
  108. }
  109. static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
  110. int offset)
  111. {
  112. return __raw_readl(info->base + offset);
  113. }
  114. static inline void davinci_nand_writel(struct davinci_nand_info *info,
  115. int offset, unsigned long value)
  116. {
  117. __raw_writel(value, info->base + offset);
  118. }
  119. /*----------------------------------------------------------------------*/
  120. /*
  121. * 1-bit hardware ECC ... context maintained for each core chipselect
  122. */
  123. static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
  124. {
  125. struct davinci_nand_info *info = to_davinci_nand(mtd);
  126. return davinci_nand_readl(info, NANDF1ECC_OFFSET
  127. + 4 * info->core_chipsel);
  128. }
  129. static void nand_davinci_hwctl_1bit(struct nand_chip *chip, int mode)
  130. {
  131. struct davinci_nand_info *info;
  132. uint32_t nandcfr;
  133. unsigned long flags;
  134. info = to_davinci_nand(nand_to_mtd(chip));
  135. /* Reset ECC hardware */
  136. nand_davinci_readecc_1bit(nand_to_mtd(chip));
  137. spin_lock_irqsave(&davinci_nand_lock, flags);
  138. /* Restart ECC hardware */
  139. nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
  140. nandcfr |= BIT(8 + info->core_chipsel);
  141. davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
  142. spin_unlock_irqrestore(&davinci_nand_lock, flags);
  143. }
  144. /*
  145. * Read hardware ECC value and pack into three bytes
  146. */
  147. static int nand_davinci_calculate_1bit(struct nand_chip *chip,
  148. const u_char *dat, u_char *ecc_code)
  149. {
  150. unsigned int ecc_val = nand_davinci_readecc_1bit(nand_to_mtd(chip));
  151. unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
  152. /* invert so that erased block ecc is correct */
  153. ecc24 = ~ecc24;
  154. ecc_code[0] = (u_char)(ecc24);
  155. ecc_code[1] = (u_char)(ecc24 >> 8);
  156. ecc_code[2] = (u_char)(ecc24 >> 16);
  157. return 0;
  158. }
  159. static int nand_davinci_correct_1bit(struct nand_chip *chip, u_char *dat,
  160. u_char *read_ecc, u_char *calc_ecc)
  161. {
  162. uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
  163. (read_ecc[2] << 16);
  164. uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
  165. (calc_ecc[2] << 16);
  166. uint32_t diff = eccCalc ^ eccNand;
  167. if (diff) {
  168. if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
  169. /* Correctable error */
  170. if ((diff >> (12 + 3)) < chip->ecc.size) {
  171. dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
  172. return 1;
  173. } else {
  174. return -EBADMSG;
  175. }
  176. } else if (!(diff & (diff - 1))) {
  177. /* Single bit ECC error in the ECC itself,
  178. * nothing to fix */
  179. return 1;
  180. } else {
  181. /* Uncorrectable error */
  182. return -EBADMSG;
  183. }
  184. }
  185. return 0;
  186. }
  187. /*----------------------------------------------------------------------*/
  188. /*
  189. * 4-bit hardware ECC ... context maintained over entire AEMIF
  190. *
  191. * This is a syndrome engine, but we avoid NAND_ECC_PLACEMENT_INTERLEAVED
  192. * since that forces use of a problematic "infix OOB" layout.
  193. * Among other things, it trashes manufacturer bad block markers.
  194. * Also, and specific to this hardware, it ECC-protects the "prepad"
  195. * in the OOB ... while having ECC protection for parts of OOB would
  196. * seem useful, the current MTD stack sometimes wants to update the
  197. * OOB without recomputing ECC.
  198. */
  199. static void nand_davinci_hwctl_4bit(struct nand_chip *chip, int mode)
  200. {
  201. struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
  202. unsigned long flags;
  203. u32 val;
  204. /* Reset ECC hardware */
  205. davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
  206. spin_lock_irqsave(&davinci_nand_lock, flags);
  207. /* Start 4-bit ECC calculation for read/write */
  208. val = davinci_nand_readl(info, NANDFCR_OFFSET);
  209. val &= ~(0x03 << 4);
  210. val |= (info->core_chipsel << 4) | BIT(12);
  211. davinci_nand_writel(info, NANDFCR_OFFSET, val);
  212. info->is_readmode = (mode == NAND_ECC_READ);
  213. spin_unlock_irqrestore(&davinci_nand_lock, flags);
  214. }
  215. /* Read raw ECC code after writing to NAND. */
  216. static void
  217. nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
  218. {
  219. const u32 mask = 0x03ff03ff;
  220. code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
  221. code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
  222. code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
  223. code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
  224. }
  225. /* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
  226. static int nand_davinci_calculate_4bit(struct nand_chip *chip,
  227. const u_char *dat, u_char *ecc_code)
  228. {
  229. struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
  230. u32 raw_ecc[4], *p;
  231. unsigned i;
  232. /* After a read, terminate ECC calculation by a dummy read
  233. * of some 4-bit ECC register. ECC covers everything that
  234. * was read; correct() just uses the hardware state, so
  235. * ecc_code is not needed.
  236. */
  237. if (info->is_readmode) {
  238. davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
  239. return 0;
  240. }
  241. /* Pack eight raw 10-bit ecc values into ten bytes, making
  242. * two passes which each convert four values (in upper and
  243. * lower halves of two 32-bit words) into five bytes. The
  244. * ROM boot loader uses this same packing scheme.
  245. */
  246. nand_davinci_readecc_4bit(info, raw_ecc);
  247. for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
  248. *ecc_code++ = p[0] & 0xff;
  249. *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
  250. *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
  251. *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
  252. *ecc_code++ = (p[1] >> 18) & 0xff;
  253. }
  254. return 0;
  255. }
  256. /* Correct up to 4 bits in data we just read, using state left in the
  257. * hardware plus the ecc_code computed when it was first written.
  258. */
  259. static int nand_davinci_correct_4bit(struct nand_chip *chip, u_char *data,
  260. u_char *ecc_code, u_char *null)
  261. {
  262. int i;
  263. struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
  264. unsigned short ecc10[8];
  265. unsigned short *ecc16;
  266. u32 syndrome[4];
  267. u32 ecc_state;
  268. unsigned num_errors, corrected;
  269. unsigned long timeo;
  270. /* Unpack ten bytes into eight 10 bit values. We know we're
  271. * little-endian, and use type punning for less shifting/masking.
  272. */
  273. if (WARN_ON(0x01 & (uintptr_t)ecc_code))
  274. return -EINVAL;
  275. ecc16 = (unsigned short *)ecc_code;
  276. ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
  277. ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
  278. ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
  279. ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
  280. ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
  281. ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
  282. ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
  283. ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
  284. /* Tell ECC controller about the expected ECC codes. */
  285. for (i = 7; i >= 0; i--)
  286. davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
  287. /* Allow time for syndrome calculation ... then read it.
  288. * A syndrome of all zeroes 0 means no detected errors.
  289. */
  290. davinci_nand_readl(info, NANDFSR_OFFSET);
  291. nand_davinci_readecc_4bit(info, syndrome);
  292. if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
  293. return 0;
  294. /*
  295. * Clear any previous address calculation by doing a dummy read of an
  296. * error address register.
  297. */
  298. davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
  299. /* Start address calculation, and wait for it to complete.
  300. * We _could_ start reading more data while this is working,
  301. * to speed up the overall page read.
  302. */
  303. davinci_nand_writel(info, NANDFCR_OFFSET,
  304. davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
  305. /*
  306. * ECC_STATE field reads 0x3 (Error correction complete) immediately
  307. * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
  308. * begin trying to poll for the state, you may fall right out of your
  309. * loop without any of the correction calculations having taken place.
  310. * The recommendation from the hardware team is to initially delay as
  311. * long as ECC_STATE reads less than 4. After that, ECC HW has entered
  312. * correction state.
  313. */
  314. timeo = jiffies + usecs_to_jiffies(100);
  315. do {
  316. ecc_state = (davinci_nand_readl(info,
  317. NANDFSR_OFFSET) >> 8) & 0x0f;
  318. cpu_relax();
  319. } while ((ecc_state < 4) && time_before(jiffies, timeo));
  320. for (;;) {
  321. u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
  322. switch ((fsr >> 8) & 0x0f) {
  323. case 0: /* no error, should not happen */
  324. davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
  325. return 0;
  326. case 1: /* five or more errors detected */
  327. davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
  328. return -EBADMSG;
  329. case 2: /* error addresses computed */
  330. case 3:
  331. num_errors = 1 + ((fsr >> 16) & 0x03);
  332. goto correct;
  333. default: /* still working on it */
  334. cpu_relax();
  335. continue;
  336. }
  337. }
  338. correct:
  339. /* correct each error */
  340. for (i = 0, corrected = 0; i < num_errors; i++) {
  341. int error_address, error_value;
  342. if (i > 1) {
  343. error_address = davinci_nand_readl(info,
  344. NAND_ERR_ADD2_OFFSET);
  345. error_value = davinci_nand_readl(info,
  346. NAND_ERR_ERRVAL2_OFFSET);
  347. } else {
  348. error_address = davinci_nand_readl(info,
  349. NAND_ERR_ADD1_OFFSET);
  350. error_value = davinci_nand_readl(info,
  351. NAND_ERR_ERRVAL1_OFFSET);
  352. }
  353. if (i & 1) {
  354. error_address >>= 16;
  355. error_value >>= 16;
  356. }
  357. error_address &= 0x3ff;
  358. error_address = (512 + 7) - error_address;
  359. if (error_address < 512) {
  360. data[error_address] ^= error_value;
  361. corrected++;
  362. }
  363. }
  364. return corrected;
  365. }
  366. /*----------------------------------------------------------------------*/
  367. /* An ECC layout for using 4-bit ECC with small-page flash, storing
  368. * ten ECC bytes plus the manufacturer's bad block marker byte, and
  369. * and not overlapping the default BBT markers.
  370. */
  371. static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
  372. struct mtd_oob_region *oobregion)
  373. {
  374. if (section > 2)
  375. return -ERANGE;
  376. if (!section) {
  377. oobregion->offset = 0;
  378. oobregion->length = 5;
  379. } else if (section == 1) {
  380. oobregion->offset = 6;
  381. oobregion->length = 2;
  382. } else {
  383. oobregion->offset = 13;
  384. oobregion->length = 3;
  385. }
  386. return 0;
  387. }
  388. static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
  389. struct mtd_oob_region *oobregion)
  390. {
  391. if (section > 1)
  392. return -ERANGE;
  393. if (!section) {
  394. oobregion->offset = 8;
  395. oobregion->length = 5;
  396. } else {
  397. oobregion->offset = 16;
  398. oobregion->length = mtd->oobsize - 16;
  399. }
  400. return 0;
  401. }
  402. static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
  403. .ecc = hwecc4_ooblayout_small_ecc,
  404. .free = hwecc4_ooblayout_small_free,
  405. };
  406. #if defined(CONFIG_OF)
  407. static const struct of_device_id davinci_nand_of_match[] = {
  408. {.compatible = "ti,davinci-nand", },
  409. {.compatible = "ti,keystone-nand", },
  410. {},
  411. };
  412. MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
  413. static struct davinci_nand_pdata
  414. *nand_davinci_get_pdata(struct platform_device *pdev)
  415. {
  416. if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
  417. struct davinci_nand_pdata *pdata;
  418. const char *mode;
  419. u32 prop;
  420. pdata = devm_kzalloc(&pdev->dev,
  421. sizeof(struct davinci_nand_pdata),
  422. GFP_KERNEL);
  423. pdev->dev.platform_data = pdata;
  424. if (!pdata)
  425. return ERR_PTR(-ENOMEM);
  426. if (!of_property_read_u32(pdev->dev.of_node,
  427. "ti,davinci-chipselect", &prop))
  428. pdata->core_chipsel = prop;
  429. else
  430. return ERR_PTR(-EINVAL);
  431. if (!of_property_read_u32(pdev->dev.of_node,
  432. "ti,davinci-mask-ale", &prop))
  433. pdata->mask_ale = prop;
  434. if (!of_property_read_u32(pdev->dev.of_node,
  435. "ti,davinci-mask-cle", &prop))
  436. pdata->mask_cle = prop;
  437. if (!of_property_read_u32(pdev->dev.of_node,
  438. "ti,davinci-mask-chipsel", &prop))
  439. pdata->mask_chipsel = prop;
  440. if (!of_property_read_string(pdev->dev.of_node,
  441. "ti,davinci-ecc-mode", &mode)) {
  442. if (!strncmp("none", mode, 4))
  443. pdata->engine_type = NAND_ECC_ENGINE_TYPE_NONE;
  444. if (!strncmp("soft", mode, 4))
  445. pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
  446. if (!strncmp("hw", mode, 2))
  447. pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
  448. }
  449. if (!of_property_read_u32(pdev->dev.of_node,
  450. "ti,davinci-ecc-bits", &prop))
  451. pdata->ecc_bits = prop;
  452. if (!of_property_read_u32(pdev->dev.of_node,
  453. "ti,davinci-nand-buswidth", &prop) && prop == 16)
  454. pdata->options |= NAND_BUSWIDTH_16;
  455. if (of_property_read_bool(pdev->dev.of_node,
  456. "ti,davinci-nand-use-bbt"))
  457. pdata->bbt_options = NAND_BBT_USE_FLASH;
  458. /*
  459. * Since kernel v4.8, this driver has been fixed to enable
  460. * use of 4-bit hardware ECC with subpages and verified on
  461. * TI's keystone EVMs (K2L, K2HK and K2E).
  462. * However, in the interest of not breaking systems using
  463. * existing UBI partitions, sub-page writes are not being
  464. * (re)enabled. If you want to use subpage writes on Keystone
  465. * platforms (i.e. do not have any existing UBI partitions),
  466. * then use "ti,davinci-nand" as the compatible in your
  467. * device-tree file.
  468. */
  469. if (of_device_is_compatible(pdev->dev.of_node,
  470. "ti,keystone-nand")) {
  471. pdata->options |= NAND_NO_SUBPAGE_WRITE;
  472. }
  473. }
  474. return dev_get_platdata(&pdev->dev);
  475. }
  476. #else
  477. static struct davinci_nand_pdata
  478. *nand_davinci_get_pdata(struct platform_device *pdev)
  479. {
  480. return dev_get_platdata(&pdev->dev);
  481. }
  482. #endif
  483. static int davinci_nand_attach_chip(struct nand_chip *chip)
  484. {
  485. struct mtd_info *mtd = nand_to_mtd(chip);
  486. struct davinci_nand_info *info = to_davinci_nand(mtd);
  487. struct davinci_nand_pdata *pdata = nand_davinci_get_pdata(info->pdev);
  488. int ret = 0;
  489. if (IS_ERR(pdata))
  490. return PTR_ERR(pdata);
  491. /* Use board-specific ECC config */
  492. chip->ecc.engine_type = pdata->engine_type;
  493. chip->ecc.placement = pdata->ecc_placement;
  494. switch (chip->ecc.engine_type) {
  495. case NAND_ECC_ENGINE_TYPE_NONE:
  496. pdata->ecc_bits = 0;
  497. break;
  498. case NAND_ECC_ENGINE_TYPE_SOFT:
  499. pdata->ecc_bits = 0;
  500. /*
  501. * This driver expects Hamming based ECC when engine_type is set
  502. * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
  503. * NAND_ECC_ALGO_HAMMING to avoid adding an extra ->ecc_algo
  504. * field to davinci_nand_pdata.
  505. */
  506. chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
  507. break;
  508. case NAND_ECC_ENGINE_TYPE_ON_HOST:
  509. if (pdata->ecc_bits == 4) {
  510. int chunks = mtd->writesize / 512;
  511. if (!chunks || mtd->oobsize < 16) {
  512. dev_dbg(&info->pdev->dev, "too small\n");
  513. return -EINVAL;
  514. }
  515. /*
  516. * No sanity checks: CPUs must support this,
  517. * and the chips may not use NAND_BUSWIDTH_16.
  518. */
  519. /* No sharing 4-bit hardware between chipselects yet */
  520. spin_lock_irq(&davinci_nand_lock);
  521. if (ecc4_busy)
  522. ret = -EBUSY;
  523. else
  524. ecc4_busy = true;
  525. spin_unlock_irq(&davinci_nand_lock);
  526. if (ret == -EBUSY)
  527. return ret;
  528. chip->ecc.calculate = nand_davinci_calculate_4bit;
  529. chip->ecc.correct = nand_davinci_correct_4bit;
  530. chip->ecc.hwctl = nand_davinci_hwctl_4bit;
  531. chip->ecc.bytes = 10;
  532. chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
  533. chip->ecc.algo = NAND_ECC_ALGO_BCH;
  534. /*
  535. * Update ECC layout if needed ... for 1-bit HW ECC, the
  536. * default is OK, but it allocates 6 bytes when only 3
  537. * are needed (for each 512 bytes). For 4-bit HW ECC,
  538. * the default is not usable: 10 bytes needed, not 6.
  539. *
  540. * For small page chips, preserve the manufacturer's
  541. * badblock marking data ... and make sure a flash BBT
  542. * table marker fits in the free bytes.
  543. */
  544. if (chunks == 1) {
  545. mtd_set_ooblayout(mtd,
  546. &hwecc4_small_ooblayout_ops);
  547. } else if (chunks == 4 || chunks == 8) {
  548. mtd_set_ooblayout(mtd,
  549. nand_get_large_page_ooblayout());
  550. chip->ecc.read_page = nand_read_page_hwecc_oob_first;
  551. } else {
  552. return -EIO;
  553. }
  554. } else {
  555. /* 1bit ecc hamming */
  556. chip->ecc.calculate = nand_davinci_calculate_1bit;
  557. chip->ecc.correct = nand_davinci_correct_1bit;
  558. chip->ecc.hwctl = nand_davinci_hwctl_1bit;
  559. chip->ecc.bytes = 3;
  560. chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
  561. }
  562. chip->ecc.size = 512;
  563. chip->ecc.strength = pdata->ecc_bits;
  564. break;
  565. default:
  566. return -EINVAL;
  567. }
  568. return ret;
  569. }
  570. static void nand_davinci_data_in(struct davinci_nand_info *info, void *buf,
  571. unsigned int len, bool force_8bit)
  572. {
  573. u32 alignment = ((uintptr_t)buf | len) & 3;
  574. if (force_8bit || (alignment & 1))
  575. ioread8_rep(info->current_cs, buf, len);
  576. else if (alignment & 3)
  577. ioread16_rep(info->current_cs, buf, len >> 1);
  578. else
  579. ioread32_rep(info->current_cs, buf, len >> 2);
  580. }
  581. static void nand_davinci_data_out(struct davinci_nand_info *info,
  582. const void *buf, unsigned int len,
  583. bool force_8bit)
  584. {
  585. u32 alignment = ((uintptr_t)buf | len) & 3;
  586. if (force_8bit || (alignment & 1))
  587. iowrite8_rep(info->current_cs, buf, len);
  588. else if (alignment & 3)
  589. iowrite16_rep(info->current_cs, buf, len >> 1);
  590. else
  591. iowrite32_rep(info->current_cs, buf, len >> 2);
  592. }
  593. static int davinci_nand_exec_instr(struct davinci_nand_info *info,
  594. const struct nand_op_instr *instr)
  595. {
  596. unsigned int i, timeout_us;
  597. u32 status;
  598. int ret;
  599. switch (instr->type) {
  600. case NAND_OP_CMD_INSTR:
  601. iowrite8(instr->ctx.cmd.opcode,
  602. info->current_cs + info->mask_cle);
  603. break;
  604. case NAND_OP_ADDR_INSTR:
  605. for (i = 0; i < instr->ctx.addr.naddrs; i++) {
  606. iowrite8(instr->ctx.addr.addrs[i],
  607. info->current_cs + info->mask_ale);
  608. }
  609. break;
  610. case NAND_OP_DATA_IN_INSTR:
  611. nand_davinci_data_in(info, instr->ctx.data.buf.in,
  612. instr->ctx.data.len,
  613. instr->ctx.data.force_8bit);
  614. break;
  615. case NAND_OP_DATA_OUT_INSTR:
  616. nand_davinci_data_out(info, instr->ctx.data.buf.out,
  617. instr->ctx.data.len,
  618. instr->ctx.data.force_8bit);
  619. break;
  620. case NAND_OP_WAITRDY_INSTR:
  621. timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
  622. ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
  623. status, status & BIT(0), 100,
  624. timeout_us);
  625. if (ret)
  626. return ret;
  627. break;
  628. }
  629. if (instr->delay_ns) {
  630. /* Dummy read to be sure that command is sent before ndelay starts */
  631. davinci_nand_readl(info, 0);
  632. ndelay(instr->delay_ns);
  633. }
  634. return 0;
  635. }
  636. static int davinci_nand_exec_op(struct nand_chip *chip,
  637. const struct nand_operation *op,
  638. bool check_only)
  639. {
  640. struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
  641. unsigned int i;
  642. if (check_only)
  643. return 0;
  644. info->current_cs = info->vaddr + (op->cs * info->mask_chipsel);
  645. for (i = 0; i < op->ninstrs; i++) {
  646. int ret;
  647. ret = davinci_nand_exec_instr(info, &op->instrs[i]);
  648. if (ret)
  649. return ret;
  650. }
  651. return 0;
  652. }
  653. static const struct nand_controller_ops davinci_nand_controller_ops = {
  654. .attach_chip = davinci_nand_attach_chip,
  655. .exec_op = davinci_nand_exec_op,
  656. };
  657. static int nand_davinci_probe(struct platform_device *pdev)
  658. {
  659. struct davinci_nand_pdata *pdata;
  660. struct davinci_nand_info *info;
  661. struct resource *res1;
  662. struct resource *res2;
  663. void __iomem *vaddr;
  664. void __iomem *base;
  665. int ret;
  666. uint32_t val;
  667. struct mtd_info *mtd;
  668. pdata = nand_davinci_get_pdata(pdev);
  669. if (IS_ERR(pdata))
  670. return PTR_ERR(pdata);
  671. /* insist on board-specific configuration */
  672. if (!pdata)
  673. return -ENODEV;
  674. /* which external chipselect will we be managing? */
  675. if (pdata->core_chipsel > 3)
  676. return -ENODEV;
  677. info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
  678. if (!info)
  679. return -ENOMEM;
  680. platform_set_drvdata(pdev, info);
  681. res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  682. res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  683. if (!res1 || !res2) {
  684. dev_err(&pdev->dev, "resource missing\n");
  685. return -EINVAL;
  686. }
  687. vaddr = devm_ioremap_resource(&pdev->dev, res1);
  688. if (IS_ERR(vaddr))
  689. return PTR_ERR(vaddr);
  690. /*
  691. * This registers range is used to setup NAND settings. In case with
  692. * TI AEMIF driver, the same memory address range is requested already
  693. * by AEMIF, so we cannot request it twice, just ioremap.
  694. * The AEMIF and NAND drivers not use the same registers in this range.
  695. */
  696. base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
  697. if (!base) {
  698. dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
  699. return -EADDRNOTAVAIL;
  700. }
  701. info->pdev = pdev;
  702. info->base = base;
  703. info->vaddr = vaddr;
  704. mtd = nand_to_mtd(&info->chip);
  705. mtd->dev.parent = &pdev->dev;
  706. nand_set_flash_node(&info->chip, pdev->dev.of_node);
  707. /* options such as NAND_BBT_USE_FLASH */
  708. info->chip.bbt_options = pdata->bbt_options;
  709. /* options such as 16-bit widths */
  710. info->chip.options = pdata->options;
  711. info->chip.bbt_td = pdata->bbt_td;
  712. info->chip.bbt_md = pdata->bbt_md;
  713. info->current_cs = info->vaddr;
  714. info->core_chipsel = pdata->core_chipsel;
  715. info->mask_chipsel = pdata->mask_chipsel;
  716. /* use nandboot-capable ALE/CLE masks by default */
  717. info->mask_ale = pdata->mask_ale ? : MASK_ALE;
  718. info->mask_cle = pdata->mask_cle ? : MASK_CLE;
  719. spin_lock_irq(&davinci_nand_lock);
  720. /* put CSxNAND into NAND mode */
  721. val = davinci_nand_readl(info, NANDFCR_OFFSET);
  722. val |= BIT(info->core_chipsel);
  723. davinci_nand_writel(info, NANDFCR_OFFSET, val);
  724. spin_unlock_irq(&davinci_nand_lock);
  725. /* Scan to find existence of the device(s) */
  726. nand_controller_init(&info->controller);
  727. info->controller.ops = &davinci_nand_controller_ops;
  728. info->chip.controller = &info->controller;
  729. ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
  730. if (ret < 0) {
  731. dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
  732. return ret;
  733. }
  734. if (pdata->parts)
  735. ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
  736. else
  737. ret = mtd_device_register(mtd, NULL, 0);
  738. if (ret < 0)
  739. goto err_cleanup_nand;
  740. val = davinci_nand_readl(info, NRCSR_OFFSET);
  741. dev_info(&pdev->dev, "controller rev. %d.%d\n",
  742. (val >> 8) & 0xff, val & 0xff);
  743. return 0;
  744. err_cleanup_nand:
  745. nand_cleanup(&info->chip);
  746. return ret;
  747. }
  748. static void nand_davinci_remove(struct platform_device *pdev)
  749. {
  750. struct davinci_nand_info *info = platform_get_drvdata(pdev);
  751. struct nand_chip *chip = &info->chip;
  752. int ret;
  753. spin_lock_irq(&davinci_nand_lock);
  754. if (chip->ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED)
  755. ecc4_busy = false;
  756. spin_unlock_irq(&davinci_nand_lock);
  757. ret = mtd_device_unregister(nand_to_mtd(chip));
  758. WARN_ON(ret);
  759. nand_cleanup(chip);
  760. }
  761. static struct platform_driver nand_davinci_driver = {
  762. .probe = nand_davinci_probe,
  763. .remove_new = nand_davinci_remove,
  764. .driver = {
  765. .name = "davinci_nand",
  766. .of_match_table = of_match_ptr(davinci_nand_of_match),
  767. },
  768. };
  769. MODULE_ALIAS("platform:davinci_nand");
  770. module_platform_driver(nand_davinci_driver);
  771. MODULE_LICENSE("GPL");
  772. MODULE_AUTHOR("Texas Instruments");
  773. MODULE_DESCRIPTION("Davinci NAND flash driver");