core.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2017 Free Electrons
  4. *
  5. * Authors:
  6. * Boris Brezillon <boris.brezillon@free-electrons.com>
  7. * Peter Pan <peterpandong@micron.com>
  8. */
  9. #define pr_fmt(fmt) "nand: " fmt
  10. #include <linux/module.h>
  11. #include <linux/mtd/nand.h>
  12. /**
  13. * nanddev_isbad() - Check if a block is bad
  14. * @nand: NAND device
  15. * @pos: position pointing to the block we want to check
  16. *
  17. * Return: true if the block is bad, false otherwise.
  18. */
  19. bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
  20. {
  21. if (mtd_check_expert_analysis_mode())
  22. return false;
  23. if (nanddev_bbt_is_initialized(nand)) {
  24. unsigned int entry;
  25. int status;
  26. entry = nanddev_bbt_pos_to_entry(nand, pos);
  27. status = nanddev_bbt_get_block_status(nand, entry);
  28. /* Lazy block status retrieval */
  29. if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
  30. if (nand->ops->isbad(nand, pos))
  31. status = NAND_BBT_BLOCK_FACTORY_BAD;
  32. else
  33. status = NAND_BBT_BLOCK_GOOD;
  34. nanddev_bbt_set_block_status(nand, entry, status);
  35. }
  36. if (status == NAND_BBT_BLOCK_WORN ||
  37. status == NAND_BBT_BLOCK_FACTORY_BAD)
  38. return true;
  39. return false;
  40. }
  41. return nand->ops->isbad(nand, pos);
  42. }
  43. EXPORT_SYMBOL_GPL(nanddev_isbad);
  44. /**
  45. * nanddev_markbad() - Mark a block as bad
  46. * @nand: NAND device
  47. * @pos: position of the block to mark bad
  48. *
  49. * Mark a block bad. This function is updating the BBT if available and
  50. * calls the low-level markbad hook (nand->ops->markbad()).
  51. *
  52. * Return: 0 in case of success, a negative error code otherwise.
  53. */
  54. int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
  55. {
  56. struct mtd_info *mtd = nanddev_to_mtd(nand);
  57. unsigned int entry;
  58. int ret = 0;
  59. if (nanddev_isbad(nand, pos))
  60. return 0;
  61. ret = nand->ops->markbad(nand, pos);
  62. if (ret)
  63. pr_warn("failed to write BBM to block @%llx (err = %d)\n",
  64. nanddev_pos_to_offs(nand, pos), ret);
  65. if (!nanddev_bbt_is_initialized(nand))
  66. goto out;
  67. entry = nanddev_bbt_pos_to_entry(nand, pos);
  68. ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
  69. if (ret)
  70. goto out;
  71. ret = nanddev_bbt_update(nand);
  72. out:
  73. if (!ret)
  74. mtd->ecc_stats.badblocks++;
  75. return ret;
  76. }
  77. EXPORT_SYMBOL_GPL(nanddev_markbad);
  78. /**
  79. * nanddev_isreserved() - Check whether an eraseblock is reserved or not
  80. * @nand: NAND device
  81. * @pos: NAND position to test
  82. *
  83. * Checks whether the eraseblock pointed by @pos is reserved or not.
  84. *
  85. * Return: true if the eraseblock is reserved, false otherwise.
  86. */
  87. bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
  88. {
  89. unsigned int entry;
  90. int status;
  91. if (!nanddev_bbt_is_initialized(nand))
  92. return false;
  93. /* Return info from the table */
  94. entry = nanddev_bbt_pos_to_entry(nand, pos);
  95. status = nanddev_bbt_get_block_status(nand, entry);
  96. return status == NAND_BBT_BLOCK_RESERVED;
  97. }
  98. EXPORT_SYMBOL_GPL(nanddev_isreserved);
  99. /**
  100. * nanddev_erase() - Erase a NAND portion
  101. * @nand: NAND device
  102. * @pos: position of the block to erase
  103. *
  104. * Erases the block if it's not bad.
  105. *
  106. * Return: 0 in case of success, a negative error code otherwise.
  107. */
  108. static int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
  109. {
  110. if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
  111. pr_warn("attempt to erase a bad/reserved block @%llx\n",
  112. nanddev_pos_to_offs(nand, pos));
  113. return -EIO;
  114. }
  115. return nand->ops->erase(nand, pos);
  116. }
  117. /**
  118. * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
  119. * @mtd: MTD device
  120. * @einfo: erase request
  121. *
  122. * This is a simple mtd->_erase() implementation iterating over all blocks
  123. * concerned by @einfo and calling nand->ops->erase() on each of them.
  124. *
  125. * Note that mtd->_erase should not be directly assigned to this helper,
  126. * because there's no locking here. NAND specialized layers should instead
  127. * implement there own wrapper around nanddev_mtd_erase() taking the
  128. * appropriate lock before calling nanddev_mtd_erase().
  129. *
  130. * Return: 0 in case of success, a negative error code otherwise.
  131. */
  132. int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
  133. {
  134. struct nand_device *nand = mtd_to_nanddev(mtd);
  135. struct nand_pos pos, last;
  136. int ret;
  137. nanddev_offs_to_pos(nand, einfo->addr, &pos);
  138. nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
  139. while (nanddev_pos_cmp(&pos, &last) <= 0) {
  140. ret = nanddev_erase(nand, &pos);
  141. if (ret) {
  142. einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
  143. return ret;
  144. }
  145. nanddev_pos_next_eraseblock(nand, &pos);
  146. }
  147. return 0;
  148. }
  149. EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
  150. /**
  151. * nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on
  152. * a specific region of the NAND device
  153. * @mtd: MTD device
  154. * @offs: offset of the NAND region
  155. * @len: length of the NAND region
  156. *
  157. * Default implementation for mtd->_max_bad_blocks(). Only works if
  158. * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
  159. *
  160. * Return: a positive number encoding the maximum number of eraseblocks on a
  161. * portion of memory, a negative error code otherwise.
  162. */
  163. int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
  164. {
  165. struct nand_device *nand = mtd_to_nanddev(mtd);
  166. struct nand_pos pos, end;
  167. unsigned int max_bb = 0;
  168. if (!nand->memorg.max_bad_eraseblocks_per_lun)
  169. return -ENOTSUPP;
  170. nanddev_offs_to_pos(nand, offs, &pos);
  171. nanddev_offs_to_pos(nand, offs + len, &end);
  172. for (nanddev_offs_to_pos(nand, offs, &pos);
  173. nanddev_pos_cmp(&pos, &end) < 0;
  174. nanddev_pos_next_lun(nand, &pos))
  175. max_bb += nand->memorg.max_bad_eraseblocks_per_lun;
  176. return max_bb;
  177. }
  178. EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
  179. /**
  180. * nanddev_get_ecc_engine() - Find and get a suitable ECC engine
  181. * @nand: NAND device
  182. */
  183. static int nanddev_get_ecc_engine(struct nand_device *nand)
  184. {
  185. int engine_type;
  186. /* Read the user desires in terms of ECC engine/configuration */
  187. of_get_nand_ecc_user_config(nand);
  188. engine_type = nand->ecc.user_conf.engine_type;
  189. if (engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
  190. engine_type = nand->ecc.defaults.engine_type;
  191. switch (engine_type) {
  192. case NAND_ECC_ENGINE_TYPE_NONE:
  193. return 0;
  194. case NAND_ECC_ENGINE_TYPE_SOFT:
  195. nand->ecc.engine = nand_ecc_get_sw_engine(nand);
  196. break;
  197. case NAND_ECC_ENGINE_TYPE_ON_DIE:
  198. nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand);
  199. break;
  200. case NAND_ECC_ENGINE_TYPE_ON_HOST:
  201. nand->ecc.engine = nand_ecc_get_on_host_hw_engine(nand);
  202. if (PTR_ERR(nand->ecc.engine) == -EPROBE_DEFER)
  203. return -EPROBE_DEFER;
  204. break;
  205. default:
  206. pr_err("Missing ECC engine type\n");
  207. }
  208. if (!nand->ecc.engine)
  209. return -EINVAL;
  210. return 0;
  211. }
  212. /**
  213. * nanddev_put_ecc_engine() - Dettach and put the in-use ECC engine
  214. * @nand: NAND device
  215. */
  216. static int nanddev_put_ecc_engine(struct nand_device *nand)
  217. {
  218. switch (nand->ecc.ctx.conf.engine_type) {
  219. case NAND_ECC_ENGINE_TYPE_ON_HOST:
  220. nand_ecc_put_on_host_hw_engine(nand);
  221. break;
  222. case NAND_ECC_ENGINE_TYPE_NONE:
  223. case NAND_ECC_ENGINE_TYPE_SOFT:
  224. case NAND_ECC_ENGINE_TYPE_ON_DIE:
  225. default:
  226. break;
  227. }
  228. return 0;
  229. }
  230. /**
  231. * nanddev_find_ecc_configuration() - Find a suitable ECC configuration
  232. * @nand: NAND device
  233. */
  234. static int nanddev_find_ecc_configuration(struct nand_device *nand)
  235. {
  236. int ret;
  237. if (!nand->ecc.engine)
  238. return -ENOTSUPP;
  239. ret = nand_ecc_init_ctx(nand);
  240. if (ret)
  241. return ret;
  242. if (!nand_ecc_is_strong_enough(nand))
  243. pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
  244. nand->mtd.name);
  245. return 0;
  246. }
  247. /**
  248. * nanddev_ecc_engine_init() - Initialize an ECC engine for the chip
  249. * @nand: NAND device
  250. */
  251. int nanddev_ecc_engine_init(struct nand_device *nand)
  252. {
  253. int ret;
  254. /* Look for the ECC engine to use */
  255. ret = nanddev_get_ecc_engine(nand);
  256. if (ret) {
  257. if (ret != -EPROBE_DEFER)
  258. pr_err("No ECC engine found\n");
  259. return ret;
  260. }
  261. /* No ECC engine requested */
  262. if (!nand->ecc.engine)
  263. return 0;
  264. /* Configure the engine: balance user input and chip requirements */
  265. ret = nanddev_find_ecc_configuration(nand);
  266. if (ret) {
  267. pr_err("No suitable ECC configuration\n");
  268. nanddev_put_ecc_engine(nand);
  269. return ret;
  270. }
  271. return 0;
  272. }
  273. EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init);
  274. /**
  275. * nanddev_ecc_engine_cleanup() - Cleanup ECC engine initializations
  276. * @nand: NAND device
  277. */
  278. void nanddev_ecc_engine_cleanup(struct nand_device *nand)
  279. {
  280. if (nand->ecc.engine)
  281. nand_ecc_cleanup_ctx(nand);
  282. nanddev_put_ecc_engine(nand);
  283. }
  284. EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup);
  285. /**
  286. * nanddev_init() - Initialize a NAND device
  287. * @nand: NAND device
  288. * @ops: NAND device operations
  289. * @owner: NAND device owner
  290. *
  291. * Initializes a NAND device object. Consistency checks are done on @ops and
  292. * @nand->memorg. Also takes care of initializing the BBT.
  293. *
  294. * Return: 0 in case of success, a negative error code otherwise.
  295. */
  296. int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
  297. struct module *owner)
  298. {
  299. struct mtd_info *mtd = nanddev_to_mtd(nand);
  300. struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
  301. if (!nand || !ops)
  302. return -EINVAL;
  303. if (!ops->erase || !ops->markbad || !ops->isbad)
  304. return -EINVAL;
  305. if (!memorg->bits_per_cell || !memorg->pagesize ||
  306. !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
  307. !memorg->planes_per_lun || !memorg->luns_per_target ||
  308. !memorg->ntargets)
  309. return -EINVAL;
  310. nand->rowconv.eraseblock_addr_shift =
  311. fls(memorg->pages_per_eraseblock - 1);
  312. nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
  313. nand->rowconv.eraseblock_addr_shift;
  314. nand->ops = ops;
  315. mtd->type = memorg->bits_per_cell == 1 ?
  316. MTD_NANDFLASH : MTD_MLCNANDFLASH;
  317. mtd->flags = MTD_CAP_NANDFLASH;
  318. mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
  319. mtd->writesize = memorg->pagesize;
  320. mtd->writebufsize = memorg->pagesize;
  321. mtd->oobsize = memorg->oobsize;
  322. mtd->size = nanddev_size(nand);
  323. mtd->owner = owner;
  324. return nanddev_bbt_init(nand);
  325. }
  326. EXPORT_SYMBOL_GPL(nanddev_init);
  327. /**
  328. * nanddev_cleanup() - Release resources allocated in nanddev_init()
  329. * @nand: NAND device
  330. *
  331. * Basically undoes what has been done in nanddev_init().
  332. */
  333. void nanddev_cleanup(struct nand_device *nand)
  334. {
  335. if (nanddev_bbt_is_initialized(nand))
  336. nanddev_bbt_cleanup(nand);
  337. }
  338. EXPORT_SYMBOL_GPL(nanddev_cleanup);
  339. MODULE_DESCRIPTION("Generic NAND framework");
  340. MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
  341. MODULE_LICENSE("GPL v2");