ecc-mxic.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Support for Macronix external hardware ECC engine for NAND devices, also
  4. * called DPE for Data Processing Engine.
  5. *
  6. * Copyright © 2019 Macronix
  7. * Author: Miquel Raynal <miquel.raynal@bootlin.com>
  8. */
  9. #include <linux/dma-mapping.h>
  10. #include <linux/init.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/iopoll.h>
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/mtd/mtd.h>
  17. #include <linux/mtd/nand.h>
  18. #include <linux/mtd/nand-ecc-mxic.h>
  19. #include <linux/mutex.h>
  20. #include <linux/of.h>
  21. #include <linux/of_platform.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/slab.h>
  24. /* DPE Configuration */
  25. #define DP_CONFIG 0x00
  26. #define ECC_EN BIT(0)
  27. #define ECC_TYP(idx) (((idx) << 3) & GENMASK(6, 3))
  28. /* DPE Interrupt Status */
  29. #define INTRPT_STS 0x04
  30. #define TRANS_CMPLT BIT(0)
  31. #define SDMA_MAIN BIT(1)
  32. #define SDMA_SPARE BIT(2)
  33. #define ECC_ERR BIT(3)
  34. #define TO_SPARE BIT(4)
  35. #define TO_MAIN BIT(5)
  36. /* DPE Interrupt Status Enable */
  37. #define INTRPT_STS_EN 0x08
  38. /* DPE Interrupt Signal Enable */
  39. #define INTRPT_SIG_EN 0x0C
  40. /* Host Controller Configuration */
  41. #define HC_CONFIG 0x10
  42. #define DEV2MEM 0 /* TRANS_TYP_DMA in the spec */
  43. #define MEM2MEM BIT(4) /* TRANS_TYP_IO in the spec */
  44. #define MAPPING BIT(5) /* TRANS_TYP_MAPPING in the spec */
  45. #define ECC_PACKED 0 /* LAYOUT_TYP_INTEGRATED in the spec */
  46. #define ECC_INTERLEAVED BIT(2) /* LAYOUT_TYP_DISTRIBUTED in the spec */
  47. #define BURST_TYP_FIXED 0
  48. #define BURST_TYP_INCREASING BIT(0)
  49. /* Host Controller Slave Address */
  50. #define HC_SLV_ADDR 0x14
  51. /* ECC Chunk Size */
  52. #define CHUNK_SIZE 0x20
  53. /* Main Data Size */
  54. #define MAIN_SIZE 0x24
  55. /* Spare Data Size */
  56. #define SPARE_SIZE 0x28
  57. #define META_SZ(reg) ((reg) & GENMASK(7, 0))
  58. #define PARITY_SZ(reg) (((reg) & GENMASK(15, 8)) >> 8)
  59. #define RSV_SZ(reg) (((reg) & GENMASK(23, 16)) >> 16)
  60. #define SPARE_SZ(reg) ((reg) >> 24)
  61. /* ECC Chunk Count */
  62. #define CHUNK_CNT 0x30
  63. /* SDMA Control */
  64. #define SDMA_CTRL 0x40
  65. #define WRITE_NAND 0
  66. #define READ_NAND BIT(1)
  67. #define CONT_NAND BIT(29)
  68. #define CONT_SYSM BIT(30) /* Continue System Memory? */
  69. #define SDMA_STRT BIT(31)
  70. /* SDMA Address of Main Data */
  71. #define SDMA_MAIN_ADDR 0x44
  72. /* SDMA Address of Spare Data */
  73. #define SDMA_SPARE_ADDR 0x48
  74. /* DPE Version Number */
  75. #define DP_VER 0xD0
  76. #define DP_VER_OFFSET 16
  77. /* Status bytes between each chunk of spare data */
  78. #define STAT_BYTES 4
  79. #define NO_ERR 0x00
  80. #define MAX_CORR_ERR 0x28
  81. #define UNCORR_ERR 0xFE
  82. #define ERASED_CHUNK 0xFF
  83. struct mxic_ecc_engine {
  84. struct device *dev;
  85. void __iomem *regs;
  86. int irq;
  87. struct completion complete;
  88. struct nand_ecc_engine external_engine;
  89. struct nand_ecc_engine pipelined_engine;
  90. struct mutex lock;
  91. };
  92. struct mxic_ecc_ctx {
  93. /* ECC machinery */
  94. unsigned int data_step_sz;
  95. unsigned int oob_step_sz;
  96. unsigned int parity_sz;
  97. unsigned int meta_sz;
  98. u8 *status;
  99. int steps;
  100. /* DMA boilerplate */
  101. struct nand_ecc_req_tweak_ctx req_ctx;
  102. u8 *oobwithstat;
  103. struct scatterlist sg[2];
  104. struct nand_page_io_req *req;
  105. unsigned int pageoffs;
  106. };
  107. static struct mxic_ecc_engine *ext_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
  108. {
  109. return container_of(eng, struct mxic_ecc_engine, external_engine);
  110. }
  111. static struct mxic_ecc_engine *pip_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
  112. {
  113. return container_of(eng, struct mxic_ecc_engine, pipelined_engine);
  114. }
  115. static struct mxic_ecc_engine *nand_to_mxic(struct nand_device *nand)
  116. {
  117. struct nand_ecc_engine *eng = nand->ecc.engine;
  118. if (eng->integration == NAND_ECC_ENGINE_INTEGRATION_EXTERNAL)
  119. return ext_ecc_eng_to_mxic(eng);
  120. else
  121. return pip_ecc_eng_to_mxic(eng);
  122. }
  123. static int mxic_ecc_ooblayout_ecc(struct mtd_info *mtd, int section,
  124. struct mtd_oob_region *oobregion)
  125. {
  126. struct nand_device *nand = mtd_to_nanddev(mtd);
  127. struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
  128. if (section < 0 || section >= ctx->steps)
  129. return -ERANGE;
  130. oobregion->offset = (section * ctx->oob_step_sz) + ctx->meta_sz;
  131. oobregion->length = ctx->parity_sz;
  132. return 0;
  133. }
  134. static int mxic_ecc_ooblayout_free(struct mtd_info *mtd, int section,
  135. struct mtd_oob_region *oobregion)
  136. {
  137. struct nand_device *nand = mtd_to_nanddev(mtd);
  138. struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
  139. if (section < 0 || section >= ctx->steps)
  140. return -ERANGE;
  141. if (!section) {
  142. oobregion->offset = 2;
  143. oobregion->length = ctx->meta_sz - 2;
  144. } else {
  145. oobregion->offset = section * ctx->oob_step_sz;
  146. oobregion->length = ctx->meta_sz;
  147. }
  148. return 0;
  149. }
  150. static const struct mtd_ooblayout_ops mxic_ecc_ooblayout_ops = {
  151. .ecc = mxic_ecc_ooblayout_ecc,
  152. .free = mxic_ecc_ooblayout_free,
  153. };
  154. static void mxic_ecc_disable_engine(struct mxic_ecc_engine *mxic)
  155. {
  156. u32 reg;
  157. reg = readl(mxic->regs + DP_CONFIG);
  158. reg &= ~ECC_EN;
  159. writel(reg, mxic->regs + DP_CONFIG);
  160. }
  161. static void mxic_ecc_enable_engine(struct mxic_ecc_engine *mxic)
  162. {
  163. u32 reg;
  164. reg = readl(mxic->regs + DP_CONFIG);
  165. reg |= ECC_EN;
  166. writel(reg, mxic->regs + DP_CONFIG);
  167. }
  168. static void mxic_ecc_disable_int(struct mxic_ecc_engine *mxic)
  169. {
  170. writel(0, mxic->regs + INTRPT_SIG_EN);
  171. }
  172. static void mxic_ecc_enable_int(struct mxic_ecc_engine *mxic)
  173. {
  174. writel(TRANS_CMPLT, mxic->regs + INTRPT_SIG_EN);
  175. }
  176. static irqreturn_t mxic_ecc_isr(int irq, void *dev_id)
  177. {
  178. struct mxic_ecc_engine *mxic = dev_id;
  179. u32 sts;
  180. sts = readl(mxic->regs + INTRPT_STS);
  181. if (!sts)
  182. return IRQ_NONE;
  183. if (sts & TRANS_CMPLT)
  184. complete(&mxic->complete);
  185. writel(sts, mxic->regs + INTRPT_STS);
  186. return IRQ_HANDLED;
  187. }
  188. static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev)
  189. {
  190. struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
  191. struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
  192. struct nand_ecc_props *reqs = &nand->ecc.requirements;
  193. struct nand_ecc_props *user = &nand->ecc.user_conf;
  194. struct mtd_info *mtd = nanddev_to_mtd(nand);
  195. int step_size = 0, strength = 0, desired_correction = 0, steps, idx;
  196. static const int possible_strength[] = {4, 8, 40, 48};
  197. static const int spare_size[] = {32, 32, 96, 96};
  198. struct mxic_ecc_ctx *ctx;
  199. u32 spare_reg;
  200. int ret;
  201. ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
  202. if (!ctx)
  203. return -ENOMEM;
  204. nand->ecc.ctx.priv = ctx;
  205. /* Only large page NAND chips may use BCH */
  206. if (mtd->oobsize < 64) {
  207. pr_err("BCH cannot be used with small page NAND chips\n");
  208. return -EINVAL;
  209. }
  210. mtd_set_ooblayout(mtd, &mxic_ecc_ooblayout_ops);
  211. /* Enable all status bits */
  212. writel(TRANS_CMPLT | SDMA_MAIN | SDMA_SPARE | ECC_ERR |
  213. TO_SPARE | TO_MAIN, mxic->regs + INTRPT_STS_EN);
  214. /* Configure the correction depending on the NAND device topology */
  215. if (user->step_size && user->strength) {
  216. step_size = user->step_size;
  217. strength = user->strength;
  218. } else if (reqs->step_size && reqs->strength) {
  219. step_size = reqs->step_size;
  220. strength = reqs->strength;
  221. }
  222. if (step_size && strength) {
  223. steps = mtd->writesize / step_size;
  224. desired_correction = steps * strength;
  225. }
  226. /* Step size is fixed to 1kiB, strength may vary (4 possible values) */
  227. conf->step_size = SZ_1K;
  228. steps = mtd->writesize / conf->step_size;
  229. ctx->status = devm_kzalloc(dev, steps * sizeof(u8), GFP_KERNEL);
  230. if (!ctx->status)
  231. return -ENOMEM;
  232. if (desired_correction) {
  233. strength = desired_correction / steps;
  234. for (idx = 0; idx < ARRAY_SIZE(possible_strength); idx++)
  235. if (possible_strength[idx] >= strength)
  236. break;
  237. idx = min_t(unsigned int, idx,
  238. ARRAY_SIZE(possible_strength) - 1);
  239. } else {
  240. /* Missing data, maximize the correction */
  241. idx = ARRAY_SIZE(possible_strength) - 1;
  242. }
  243. /* Tune the selected strength until it fits in the OOB area */
  244. for (; idx >= 0; idx--) {
  245. if (spare_size[idx] * steps <= mtd->oobsize)
  246. break;
  247. }
  248. /* This engine cannot be used with this NAND device */
  249. if (idx < 0)
  250. return -EINVAL;
  251. /* Configure the engine for the desired strength */
  252. writel(ECC_TYP(idx), mxic->regs + DP_CONFIG);
  253. conf->strength = possible_strength[idx];
  254. spare_reg = readl(mxic->regs + SPARE_SIZE);
  255. ctx->steps = steps;
  256. ctx->data_step_sz = mtd->writesize / steps;
  257. ctx->oob_step_sz = mtd->oobsize / steps;
  258. ctx->parity_sz = PARITY_SZ(spare_reg);
  259. ctx->meta_sz = META_SZ(spare_reg);
  260. /* Ensure buffers will contain enough bytes to store the STAT_BYTES */
  261. ctx->req_ctx.oob_buffer_size = nanddev_per_page_oobsize(nand) +
  262. (ctx->steps * STAT_BYTES);
  263. ret = nand_ecc_init_req_tweaking(&ctx->req_ctx, nand);
  264. if (ret)
  265. return ret;
  266. ctx->oobwithstat = kmalloc(mtd->oobsize + (ctx->steps * STAT_BYTES),
  267. GFP_KERNEL);
  268. if (!ctx->oobwithstat) {
  269. ret = -ENOMEM;
  270. goto cleanup_req_tweak;
  271. }
  272. sg_init_table(ctx->sg, 2);
  273. /* Configuration dump and sanity checks */
  274. dev_err(dev, "DPE version number: %d\n",
  275. readl(mxic->regs + DP_VER) >> DP_VER_OFFSET);
  276. dev_err(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE));
  277. dev_err(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE));
  278. dev_err(dev, "Spare size: %d\n", SPARE_SZ(spare_reg));
  279. dev_err(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg));
  280. dev_err(dev, "Parity size: %d\n", ctx->parity_sz);
  281. dev_err(dev, "Meta size: %d\n", ctx->meta_sz);
  282. if ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) !=
  283. SPARE_SZ(spare_reg)) {
  284. dev_err(dev, "Wrong OOB configuration: %d + %d + %ld != %d\n",
  285. ctx->meta_sz, ctx->parity_sz, RSV_SZ(spare_reg),
  286. SPARE_SZ(spare_reg));
  287. ret = -EINVAL;
  288. goto free_oobwithstat;
  289. }
  290. if (ctx->oob_step_sz != SPARE_SZ(spare_reg)) {
  291. dev_err(dev, "Wrong OOB configuration: %d != %d\n",
  292. ctx->oob_step_sz, SPARE_SZ(spare_reg));
  293. ret = -EINVAL;
  294. goto free_oobwithstat;
  295. }
  296. return 0;
  297. free_oobwithstat:
  298. kfree(ctx->oobwithstat);
  299. cleanup_req_tweak:
  300. nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
  301. return ret;
  302. }
  303. static int mxic_ecc_init_ctx_external(struct nand_device *nand)
  304. {
  305. struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
  306. struct device *dev = nand->ecc.engine->dev;
  307. int ret;
  308. dev_info(dev, "Macronix ECC engine in external mode\n");
  309. ret = mxic_ecc_init_ctx(nand, dev);
  310. if (ret)
  311. return ret;
  312. /* Trigger each step manually */
  313. writel(1, mxic->regs + CHUNK_CNT);
  314. writel(BURST_TYP_INCREASING | ECC_PACKED | MEM2MEM,
  315. mxic->regs + HC_CONFIG);
  316. return 0;
  317. }
  318. static int mxic_ecc_init_ctx_pipelined(struct nand_device *nand)
  319. {
  320. struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
  321. struct mxic_ecc_ctx *ctx;
  322. struct device *dev;
  323. int ret;
  324. dev = nand_ecc_get_engine_dev(nand->ecc.engine->dev);
  325. if (!dev)
  326. return -EINVAL;
  327. dev_info(dev, "Macronix ECC engine in pipelined/mapping mode\n");
  328. ret = mxic_ecc_init_ctx(nand, dev);
  329. if (ret)
  330. return ret;
  331. ctx = nand_to_ecc_ctx(nand);
  332. /* All steps should be handled in one go directly by the internal DMA */
  333. writel(ctx->steps, mxic->regs + CHUNK_CNT);
  334. /*
  335. * Interleaved ECC scheme cannot be used otherwise factory bad block
  336. * markers would be lost. A packed layout is mandatory.
  337. */
  338. writel(BURST_TYP_INCREASING | ECC_PACKED | MAPPING,
  339. mxic->regs + HC_CONFIG);
  340. return 0;
  341. }
  342. static void mxic_ecc_cleanup_ctx(struct nand_device *nand)
  343. {
  344. struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
  345. if (ctx) {
  346. nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
  347. kfree(ctx->oobwithstat);
  348. }
  349. }
  350. static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic)
  351. {
  352. u32 val;
  353. int ret;
  354. if (mxic->irq) {
  355. reinit_completion(&mxic->complete);
  356. mxic_ecc_enable_int(mxic);
  357. ret = wait_for_completion_timeout(&mxic->complete,
  358. msecs_to_jiffies(1000));
  359. ret = ret ? 0 : -ETIMEDOUT;
  360. mxic_ecc_disable_int(mxic);
  361. } else {
  362. ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val,
  363. val & TRANS_CMPLT, 10, USEC_PER_SEC);
  364. writel(val, mxic->regs + INTRPT_STS);
  365. }
  366. if (ret) {
  367. dev_err(mxic->dev, "Timeout on data xfer completion\n");
  368. return -ETIMEDOUT;
  369. }
  370. return 0;
  371. }
  372. static int mxic_ecc_process_data(struct mxic_ecc_engine *mxic,
  373. unsigned int direction)
  374. {
  375. unsigned int dir = (direction == NAND_PAGE_READ) ?
  376. READ_NAND : WRITE_NAND;
  377. int ret;
  378. mxic_ecc_enable_engine(mxic);
  379. /* Trigger processing */
  380. writel(SDMA_STRT | dir, mxic->regs + SDMA_CTRL);
  381. /* Wait for completion */
  382. ret = mxic_ecc_data_xfer_wait_for_completion(mxic);
  383. mxic_ecc_disable_engine(mxic);
  384. return ret;
  385. }
  386. int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
  387. unsigned int direction, dma_addr_t dirmap)
  388. {
  389. struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
  390. if (dirmap)
  391. writel(dirmap, mxic->regs + HC_SLV_ADDR);
  392. return mxic_ecc_process_data(mxic, direction);
  393. }
  394. EXPORT_SYMBOL_GPL(mxic_ecc_process_data_pipelined);
  395. static void mxic_ecc_extract_status_bytes(struct mxic_ecc_ctx *ctx)
  396. {
  397. u8 *buf = ctx->oobwithstat;
  398. int next_stat_pos;
  399. int step;
  400. /* Extract the ECC status */
  401. for (step = 0; step < ctx->steps; step++) {
  402. next_stat_pos = ctx->oob_step_sz +
  403. ((STAT_BYTES + ctx->oob_step_sz) * step);
  404. ctx->status[step] = buf[next_stat_pos];
  405. }
  406. }
  407. static void mxic_ecc_reconstruct_oobbuf(struct mxic_ecc_ctx *ctx,
  408. u8 *dst, const u8 *src)
  409. {
  410. int step;
  411. /* Reconstruct the OOB buffer linearly (without the ECC status bytes) */
  412. for (step = 0; step < ctx->steps; step++)
  413. memcpy(dst + (step * ctx->oob_step_sz),
  414. src + (step * (ctx->oob_step_sz + STAT_BYTES)),
  415. ctx->oob_step_sz);
  416. }
  417. static void mxic_ecc_add_room_in_oobbuf(struct mxic_ecc_ctx *ctx,
  418. u8 *dst, const u8 *src)
  419. {
  420. int step;
  421. /* Add some space in the OOB buffer for the status bytes */
  422. for (step = 0; step < ctx->steps; step++)
  423. memcpy(dst + (step * (ctx->oob_step_sz + STAT_BYTES)),
  424. src + (step * ctx->oob_step_sz),
  425. ctx->oob_step_sz);
  426. }
  427. static int mxic_ecc_count_biterrs(struct mxic_ecc_engine *mxic,
  428. struct nand_device *nand)
  429. {
  430. struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
  431. struct mtd_info *mtd = nanddev_to_mtd(nand);
  432. struct device *dev = mxic->dev;
  433. unsigned int max_bf = 0;
  434. bool failure = false;
  435. int step;
  436. for (step = 0; step < ctx->steps; step++) {
  437. u8 stat = ctx->status[step];
  438. if (stat == NO_ERR) {
  439. dev_dbg(dev, "ECC step %d: no error\n", step);
  440. } else if (stat == ERASED_CHUNK) {
  441. dev_dbg(dev, "ECC step %d: erased\n", step);
  442. } else if (stat == UNCORR_ERR || stat > MAX_CORR_ERR) {
  443. dev_dbg(dev, "ECC step %d: uncorrectable\n", step);
  444. mtd->ecc_stats.failed++;
  445. failure = true;
  446. } else {
  447. dev_dbg(dev, "ECC step %d: %d bits corrected\n",
  448. step, stat);
  449. max_bf = max_t(unsigned int, max_bf, stat);
  450. mtd->ecc_stats.corrected += stat;
  451. }
  452. }
  453. return failure ? -EBADMSG : max_bf;
  454. }
  455. /* External ECC engine helpers */
  456. static int mxic_ecc_prepare_io_req_external(struct nand_device *nand,
  457. struct nand_page_io_req *req)
  458. {
  459. struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
  460. struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
  461. struct mtd_info *mtd = nanddev_to_mtd(nand);
  462. int offset, nents, step, ret;
  463. if (req->mode == MTD_OPS_RAW)
  464. return 0;
  465. nand_ecc_tweak_req(&ctx->req_ctx, req);
  466. ctx->req = req;
  467. if (req->type == NAND_PAGE_READ)
  468. return 0;
  469. mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat,
  470. ctx->req->oobbuf.out);
  471. sg_set_buf(&ctx->sg[0], req->databuf.out, req->datalen);
  472. sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
  473. req->ooblen + (ctx->steps * STAT_BYTES));
  474. nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
  475. if (!nents)
  476. return -EINVAL;
  477. mutex_lock(&mxic->lock);
  478. for (step = 0; step < ctx->steps; step++) {
  479. writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
  480. mxic->regs + SDMA_MAIN_ADDR);
  481. writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
  482. mxic->regs + SDMA_SPARE_ADDR);
  483. ret = mxic_ecc_process_data(mxic, ctx->req->type);
  484. if (ret)
  485. break;
  486. }
  487. mutex_unlock(&mxic->lock);
  488. dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
  489. if (ret)
  490. return ret;
  491. /* Retrieve the calculated ECC bytes */
  492. for (step = 0; step < ctx->steps; step++) {
  493. offset = ctx->meta_sz + (step * ctx->oob_step_sz);
  494. mtd_ooblayout_get_eccbytes(mtd,
  495. (u8 *)ctx->req->oobbuf.out + offset,
  496. ctx->oobwithstat + (step * STAT_BYTES),
  497. step * ctx->parity_sz,
  498. ctx->parity_sz);
  499. }
  500. return 0;
  501. }
  502. static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
  503. struct nand_page_io_req *req)
  504. {
  505. struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
  506. struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
  507. int nents, step, ret = 0;
  508. if (req->mode == MTD_OPS_RAW)
  509. return 0;
  510. if (req->type == NAND_PAGE_WRITE) {
  511. nand_ecc_restore_req(&ctx->req_ctx, req);
  512. return 0;
  513. }
  514. /* Copy the OOB buffer and add room for the ECC engine status bytes */
  515. mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
  516. sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
  517. sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
  518. req->ooblen + (ctx->steps * STAT_BYTES));
  519. nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
  520. if (!nents)
  521. return -EINVAL;
  522. mutex_lock(&mxic->lock);
  523. for (step = 0; step < ctx->steps; step++) {
  524. writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
  525. mxic->regs + SDMA_MAIN_ADDR);
  526. writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
  527. mxic->regs + SDMA_SPARE_ADDR);
  528. ret = mxic_ecc_process_data(mxic, ctx->req->type);
  529. if (ret)
  530. break;
  531. }
  532. mutex_unlock(&mxic->lock);
  533. dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
  534. if (ret) {
  535. nand_ecc_restore_req(&ctx->req_ctx, req);
  536. return ret;
  537. }
  538. /* Extract the status bytes and reconstruct the buffer */
  539. mxic_ecc_extract_status_bytes(ctx);
  540. mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in, ctx->oobwithstat);
  541. nand_ecc_restore_req(&ctx->req_ctx, req);
  542. return mxic_ecc_count_biterrs(mxic, nand);
  543. }
  544. /* Pipelined ECC engine helpers */
  545. static int mxic_ecc_prepare_io_req_pipelined(struct nand_device *nand,
  546. struct nand_page_io_req *req)
  547. {
  548. struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
  549. struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
  550. int nents;
  551. if (req->mode == MTD_OPS_RAW)
  552. return 0;
  553. nand_ecc_tweak_req(&ctx->req_ctx, req);
  554. ctx->req = req;
  555. /* Copy the OOB buffer and add room for the ECC engine status bytes */
  556. mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
  557. sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
  558. sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
  559. req->ooblen + (ctx->steps * STAT_BYTES));
  560. nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
  561. if (!nents)
  562. return -EINVAL;
  563. mutex_lock(&mxic->lock);
  564. writel(sg_dma_address(&ctx->sg[0]), mxic->regs + SDMA_MAIN_ADDR);
  565. writel(sg_dma_address(&ctx->sg[1]), mxic->regs + SDMA_SPARE_ADDR);
  566. return 0;
  567. }
  568. static int mxic_ecc_finish_io_req_pipelined(struct nand_device *nand,
  569. struct nand_page_io_req *req)
  570. {
  571. struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
  572. struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
  573. int ret = 0;
  574. if (req->mode == MTD_OPS_RAW)
  575. return 0;
  576. mutex_unlock(&mxic->lock);
  577. dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
  578. if (req->type == NAND_PAGE_READ) {
  579. mxic_ecc_extract_status_bytes(ctx);
  580. mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in,
  581. ctx->oobwithstat);
  582. ret = mxic_ecc_count_biterrs(mxic, nand);
  583. }
  584. nand_ecc_restore_req(&ctx->req_ctx, req);
  585. return ret;
  586. }
  587. static struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = {
  588. .init_ctx = mxic_ecc_init_ctx_external,
  589. .cleanup_ctx = mxic_ecc_cleanup_ctx,
  590. .prepare_io_req = mxic_ecc_prepare_io_req_external,
  591. .finish_io_req = mxic_ecc_finish_io_req_external,
  592. };
  593. static struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops = {
  594. .init_ctx = mxic_ecc_init_ctx_pipelined,
  595. .cleanup_ctx = mxic_ecc_cleanup_ctx,
  596. .prepare_io_req = mxic_ecc_prepare_io_req_pipelined,
  597. .finish_io_req = mxic_ecc_finish_io_req_pipelined,
  598. };
  599. struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
  600. {
  601. return &mxic_ecc_engine_pipelined_ops;
  602. }
  603. EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_ops);
  604. static struct platform_device *
  605. mxic_ecc_get_pdev(struct platform_device *spi_pdev)
  606. {
  607. struct platform_device *eng_pdev;
  608. struct device_node *np;
  609. /* Retrieve the nand-ecc-engine phandle */
  610. np = of_parse_phandle(spi_pdev->dev.of_node, "nand-ecc-engine", 0);
  611. if (!np)
  612. return NULL;
  613. /* Jump to the engine's device node */
  614. eng_pdev = of_find_device_by_node(np);
  615. of_node_put(np);
  616. return eng_pdev;
  617. }
  618. void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng)
  619. {
  620. struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
  621. platform_device_put(to_platform_device(mxic->dev));
  622. }
  623. EXPORT_SYMBOL_GPL(mxic_ecc_put_pipelined_engine);
  624. struct nand_ecc_engine *
  625. mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev)
  626. {
  627. struct platform_device *eng_pdev;
  628. struct mxic_ecc_engine *mxic;
  629. eng_pdev = mxic_ecc_get_pdev(spi_pdev);
  630. if (!eng_pdev)
  631. return ERR_PTR(-ENODEV);
  632. mxic = platform_get_drvdata(eng_pdev);
  633. if (!mxic) {
  634. platform_device_put(eng_pdev);
  635. return ERR_PTR(-EPROBE_DEFER);
  636. }
  637. return &mxic->pipelined_engine;
  638. }
  639. EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_engine);
  640. /*
  641. * Only the external ECC engine is exported as the pipelined is SoC specific, so
  642. * it is registered directly by the drivers that wrap it.
  643. */
  644. static int mxic_ecc_probe(struct platform_device *pdev)
  645. {
  646. struct device *dev = &pdev->dev;
  647. struct mxic_ecc_engine *mxic;
  648. int ret;
  649. mxic = devm_kzalloc(&pdev->dev, sizeof(*mxic), GFP_KERNEL);
  650. if (!mxic)
  651. return -ENOMEM;
  652. mxic->dev = &pdev->dev;
  653. /*
  654. * Both memory regions for the ECC engine itself and the AXI slave
  655. * address are mandatory.
  656. */
  657. mxic->regs = devm_platform_ioremap_resource(pdev, 0);
  658. if (IS_ERR(mxic->regs)) {
  659. dev_err(&pdev->dev, "Missing memory region\n");
  660. return PTR_ERR(mxic->regs);
  661. }
  662. mxic_ecc_disable_engine(mxic);
  663. mxic_ecc_disable_int(mxic);
  664. /* IRQ is optional yet much more efficient */
  665. mxic->irq = platform_get_irq_byname_optional(pdev, "ecc-engine");
  666. if (mxic->irq > 0) {
  667. ret = devm_request_irq(&pdev->dev, mxic->irq, mxic_ecc_isr, 0,
  668. "mxic-ecc", mxic);
  669. if (ret)
  670. return ret;
  671. } else {
  672. dev_info(dev, "Invalid or missing IRQ, fallback to polling\n");
  673. mxic->irq = 0;
  674. }
  675. mutex_init(&mxic->lock);
  676. /*
  677. * In external mode, the device is the ECC engine. In pipelined mode,
  678. * the device is the host controller. The device is used to match the
  679. * right ECC engine based on the DT properties.
  680. */
  681. mxic->external_engine.dev = &pdev->dev;
  682. mxic->external_engine.integration = NAND_ECC_ENGINE_INTEGRATION_EXTERNAL;
  683. mxic->external_engine.ops = &mxic_ecc_engine_external_ops;
  684. nand_ecc_register_on_host_hw_engine(&mxic->external_engine);
  685. platform_set_drvdata(pdev, mxic);
  686. return 0;
  687. }
  688. static void mxic_ecc_remove(struct platform_device *pdev)
  689. {
  690. struct mxic_ecc_engine *mxic = platform_get_drvdata(pdev);
  691. nand_ecc_unregister_on_host_hw_engine(&mxic->external_engine);
  692. }
  693. static const struct of_device_id mxic_ecc_of_ids[] = {
  694. {
  695. .compatible = "mxicy,nand-ecc-engine-rev3",
  696. },
  697. { /* sentinel */ },
  698. };
  699. MODULE_DEVICE_TABLE(of, mxic_ecc_of_ids);
  700. static struct platform_driver mxic_ecc_driver = {
  701. .driver = {
  702. .name = "mxic-nand-ecc-engine",
  703. .of_match_table = mxic_ecc_of_ids,
  704. },
  705. .probe = mxic_ecc_probe,
  706. .remove_new = mxic_ecc_remove,
  707. };
  708. module_platform_driver(mxic_ecc_driver);
  709. MODULE_LICENSE("GPL");
  710. MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
  711. MODULE_DESCRIPTION("Macronix NAND hardware ECC controller");