nand_hynix.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. /*
  2. * Copyright (C) 2017 Free Electrons
  3. * Copyright (C) 2017 NextThing Co
  4. *
  5. * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/mtd/rawnand.h>
  18. #include <linux/sizes.h>
  19. #include <linux/slab.h>
  20. #define NAND_HYNIX_CMD_SET_PARAMS 0x36
  21. #define NAND_HYNIX_CMD_APPLY_PARAMS 0x16
  22. #define NAND_HYNIX_1XNM_RR_REPEAT 8
  23. /**
  24. * struct hynix_read_retry - read-retry data
  25. * @nregs: number of register to set when applying a new read-retry mode
  26. * @regs: register offsets (NAND chip dependent)
  27. * @values: array of values to set in registers. The array size is equal to
  28. * (nregs * nmodes)
  29. */
  30. struct hynix_read_retry {
  31. int nregs;
  32. const u8 *regs;
  33. u8 values[0];
  34. };
  35. /**
  36. * struct hynix_nand - private Hynix NAND struct
  37. * @nand_technology: manufacturing process expressed in picometer
  38. * @read_retry: read-retry information
  39. */
  40. struct hynix_nand {
  41. const struct hynix_read_retry *read_retry;
  42. };
  43. /**
  44. * struct hynix_read_retry_otp - structure describing how the read-retry OTP
  45. * area
  46. * @nregs: number of hynix private registers to set before reading the reading
  47. * the OTP area
  48. * @regs: registers that should be configured
  49. * @values: values that should be set in regs
  50. * @page: the address to pass to the READ_PAGE command. Depends on the NAND
  51. * chip
  52. * @size: size of the read-retry OTP section
  53. */
  54. struct hynix_read_retry_otp {
  55. int nregs;
  56. const u8 *regs;
  57. const u8 *values;
  58. int page;
  59. int size;
  60. };
  61. static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
  62. {
  63. u8 jedecid[5] = { };
  64. int ret;
  65. ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
  66. if (ret)
  67. return false;
  68. return !strncmp("JEDEC", jedecid, sizeof(jedecid));
  69. }
  70. static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
  71. {
  72. struct mtd_info *mtd = nand_to_mtd(chip);
  73. if (chip->exec_op) {
  74. struct nand_op_instr instrs[] = {
  75. NAND_OP_CMD(cmd, 0),
  76. };
  77. struct nand_operation op = NAND_OPERATION(instrs);
  78. return nand_exec_op(chip, &op);
  79. }
  80. chip->cmdfunc(mtd, cmd, -1, -1);
  81. return 0;
  82. }
  83. static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
  84. {
  85. struct mtd_info *mtd = nand_to_mtd(chip);
  86. u16 column = ((u16)addr << 8) | addr;
  87. if (chip->exec_op) {
  88. struct nand_op_instr instrs[] = {
  89. NAND_OP_ADDR(1, &addr, 0),
  90. NAND_OP_8BIT_DATA_OUT(1, &val, 0),
  91. };
  92. struct nand_operation op = NAND_OPERATION(instrs);
  93. return nand_exec_op(chip, &op);
  94. }
  95. chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
  96. chip->write_byte(mtd, val);
  97. return 0;
  98. }
  99. static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
  100. {
  101. struct nand_chip *chip = mtd_to_nand(mtd);
  102. struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
  103. const u8 *values;
  104. int i, ret;
  105. values = hynix->read_retry->values +
  106. (retry_mode * hynix->read_retry->nregs);
  107. /* Enter 'Set Hynix Parameters' mode */
  108. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
  109. if (ret)
  110. return ret;
  111. /*
  112. * Configure the NAND in the requested read-retry mode.
  113. * This is done by setting pre-defined values in internal NAND
  114. * registers.
  115. *
  116. * The set of registers is NAND specific, and the values are either
  117. * predefined or extracted from an OTP area on the NAND (values are
  118. * probably tweaked at production in this case).
  119. */
  120. for (i = 0; i < hynix->read_retry->nregs; i++) {
  121. ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
  122. values[i]);
  123. if (ret)
  124. return ret;
  125. }
  126. /* Apply the new settings. */
  127. return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
  128. }
  129. /**
  130. * hynix_get_majority - get the value that is occurring the most in a given
  131. * set of values
  132. * @in: the array of values to test
  133. * @repeat: the size of the in array
  134. * @out: pointer used to store the output value
  135. *
  136. * This function implements the 'majority check' logic that is supposed to
  137. * overcome the unreliability of MLC NANDs when reading the OTP area storing
  138. * the read-retry parameters.
  139. *
  140. * It's based on a pretty simple assumption: if we repeat the same value
  141. * several times and then take the one that is occurring the most, we should
  142. * find the correct value.
  143. * Let's hope this dummy algorithm prevents us from losing the read-retry
  144. * parameters.
  145. */
  146. static int hynix_get_majority(const u8 *in, int repeat, u8 *out)
  147. {
  148. int i, j, half = repeat / 2;
  149. /*
  150. * We only test the first half of the in array because we must ensure
  151. * that the value is at least occurring repeat / 2 times.
  152. *
  153. * This loop is suboptimal since we may count the occurrences of the
  154. * same value several time, but we are doing that on small sets, which
  155. * makes it acceptable.
  156. */
  157. for (i = 0; i < half; i++) {
  158. int cnt = 0;
  159. u8 val = in[i];
  160. /* Count all values that are matching the one at index i. */
  161. for (j = i + 1; j < repeat; j++) {
  162. if (in[j] == val)
  163. cnt++;
  164. }
  165. /* We found a value occurring more than repeat / 2. */
  166. if (cnt > half) {
  167. *out = val;
  168. return 0;
  169. }
  170. }
  171. return -EIO;
  172. }
  173. static int hynix_read_rr_otp(struct nand_chip *chip,
  174. const struct hynix_read_retry_otp *info,
  175. void *buf)
  176. {
  177. int i, ret;
  178. ret = nand_reset_op(chip);
  179. if (ret)
  180. return ret;
  181. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
  182. if (ret)
  183. return ret;
  184. for (i = 0; i < info->nregs; i++) {
  185. ret = hynix_nand_reg_write_op(chip, info->regs[i],
  186. info->values[i]);
  187. if (ret)
  188. return ret;
  189. }
  190. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
  191. if (ret)
  192. return ret;
  193. /* Sequence to enter OTP mode? */
  194. ret = hynix_nand_cmd_op(chip, 0x17);
  195. if (ret)
  196. return ret;
  197. ret = hynix_nand_cmd_op(chip, 0x4);
  198. if (ret)
  199. return ret;
  200. ret = hynix_nand_cmd_op(chip, 0x19);
  201. if (ret)
  202. return ret;
  203. /* Now read the page */
  204. ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
  205. if (ret)
  206. return ret;
  207. /* Put everything back to normal */
  208. ret = nand_reset_op(chip);
  209. if (ret)
  210. return ret;
  211. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
  212. if (ret)
  213. return ret;
  214. ret = hynix_nand_reg_write_op(chip, 0x38, 0);
  215. if (ret)
  216. return ret;
  217. ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
  218. if (ret)
  219. return ret;
  220. return nand_read_page_op(chip, 0, 0, NULL, 0);
  221. }
  222. #define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
  223. #define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS 8
  224. #define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv) \
  225. (16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize)))
  226. static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs,
  227. int mode, int reg, bool inv, u8 *val)
  228. {
  229. u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT];
  230. int val_offs = (mode * nregs) + reg;
  231. int set_size = nmodes * nregs;
  232. int i, ret;
  233. for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) {
  234. int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv);
  235. tmp[i] = buf[val_offs + set_offs];
  236. }
  237. ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val);
  238. if (ret)
  239. return ret;
  240. if (inv)
  241. *val = ~*val;
  242. return 0;
  243. }
  244. static u8 hynix_1xnm_mlc_read_retry_regs[] = {
  245. 0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf
  246. };
  247. static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip,
  248. const struct hynix_read_retry_otp *info)
  249. {
  250. struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
  251. struct hynix_read_retry *rr = NULL;
  252. int ret, i, j;
  253. u8 nregs, nmodes;
  254. u8 *buf;
  255. buf = kmalloc(info->size, GFP_KERNEL);
  256. if (!buf)
  257. return -ENOMEM;
  258. ret = hynix_read_rr_otp(chip, info, buf);
  259. if (ret)
  260. goto out;
  261. ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT,
  262. &nmodes);
  263. if (ret)
  264. goto out;
  265. ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT,
  266. NAND_HYNIX_1XNM_RR_REPEAT,
  267. &nregs);
  268. if (ret)
  269. goto out;
  270. rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL);
  271. if (!rr) {
  272. ret = -ENOMEM;
  273. goto out;
  274. }
  275. for (i = 0; i < nmodes; i++) {
  276. for (j = 0; j < nregs; j++) {
  277. u8 *val = rr->values + (i * nregs);
  278. ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
  279. false, val);
  280. if (!ret)
  281. continue;
  282. ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
  283. true, val);
  284. if (ret)
  285. goto out;
  286. }
  287. }
  288. rr->nregs = nregs;
  289. rr->regs = hynix_1xnm_mlc_read_retry_regs;
  290. hynix->read_retry = rr;
  291. chip->setup_read_retry = hynix_nand_setup_read_retry;
  292. chip->read_retries = nmodes;
  293. out:
  294. kfree(buf);
  295. if (ret)
  296. kfree(rr);
  297. return ret;
  298. }
  299. static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 };
  300. static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 };
  301. static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = {
  302. {
  303. .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
  304. .regs = hynix_mlc_1xnm_rr_otp_regs,
  305. .values = hynix_mlc_1xnm_rr_otp_values,
  306. .page = 0x21f,
  307. .size = 784
  308. },
  309. {
  310. .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
  311. .regs = hynix_mlc_1xnm_rr_otp_regs,
  312. .values = hynix_mlc_1xnm_rr_otp_values,
  313. .page = 0x200,
  314. .size = 528,
  315. },
  316. };
  317. static int hynix_nand_rr_init(struct nand_chip *chip)
  318. {
  319. int i, ret = 0;
  320. bool valid_jedecid;
  321. valid_jedecid = hynix_nand_has_valid_jedecid(chip);
  322. /*
  323. * We only support read-retry for 1xnm NANDs, and those NANDs all
  324. * expose a valid JEDEC ID.
  325. */
  326. if (valid_jedecid) {
  327. u8 nand_tech = chip->id.data[5] >> 4;
  328. /* 1xnm technology */
  329. if (nand_tech == 4) {
  330. for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps);
  331. i++) {
  332. /*
  333. * FIXME: Hynix recommend to copy the
  334. * read-retry OTP area into a normal page.
  335. */
  336. ret = hynix_mlc_1xnm_rr_init(chip,
  337. hynix_mlc_1xnm_rr_otps);
  338. if (!ret)
  339. break;
  340. }
  341. }
  342. }
  343. if (ret)
  344. pr_warn("failed to initialize read-retry infrastructure");
  345. return 0;
  346. }
  347. static void hynix_nand_extract_oobsize(struct nand_chip *chip,
  348. bool valid_jedecid)
  349. {
  350. struct mtd_info *mtd = nand_to_mtd(chip);
  351. u8 oobsize;
  352. oobsize = ((chip->id.data[3] >> 2) & 0x3) |
  353. ((chip->id.data[3] >> 4) & 0x4);
  354. if (valid_jedecid) {
  355. switch (oobsize) {
  356. case 0:
  357. mtd->oobsize = 2048;
  358. break;
  359. case 1:
  360. mtd->oobsize = 1664;
  361. break;
  362. case 2:
  363. mtd->oobsize = 1024;
  364. break;
  365. case 3:
  366. mtd->oobsize = 640;
  367. break;
  368. default:
  369. /*
  370. * We should never reach this case, but if that
  371. * happens, this probably means Hynix decided to use
  372. * a different extended ID format, and we should find
  373. * a way to support it.
  374. */
  375. WARN(1, "Invalid OOB size");
  376. break;
  377. }
  378. } else {
  379. switch (oobsize) {
  380. case 0:
  381. mtd->oobsize = 128;
  382. break;
  383. case 1:
  384. mtd->oobsize = 224;
  385. break;
  386. case 2:
  387. mtd->oobsize = 448;
  388. break;
  389. case 3:
  390. mtd->oobsize = 64;
  391. break;
  392. case 4:
  393. mtd->oobsize = 32;
  394. break;
  395. case 5:
  396. mtd->oobsize = 16;
  397. break;
  398. case 6:
  399. mtd->oobsize = 640;
  400. break;
  401. default:
  402. /*
  403. * We should never reach this case, but if that
  404. * happens, this probably means Hynix decided to use
  405. * a different extended ID format, and we should find
  406. * a way to support it.
  407. */
  408. WARN(1, "Invalid OOB size");
  409. break;
  410. }
  411. /*
  412. * The datasheet of H27UCG8T2BTR mentions that the "Redundant
  413. * Area Size" is encoded "per 8KB" (page size). This chip uses
  414. * a page size of 16KiB. The datasheet mentions an OOB size of
  415. * 1.280 bytes, but the OOB size encoded in the ID bytes (using
  416. * the existing logic above) is 640 bytes.
  417. * Update the OOB size for this chip by taking the value
  418. * determined above and scaling it to the actual page size (so
  419. * the actual OOB size for this chip is: 640 * 16k / 8k).
  420. */
  421. if (chip->id.data[1] == 0xde)
  422. mtd->oobsize *= mtd->writesize / SZ_8K;
  423. }
  424. }
  425. static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
  426. bool valid_jedecid)
  427. {
  428. u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
  429. if (valid_jedecid) {
  430. /* Reference: H27UCG8T2E datasheet */
  431. chip->ecc_step_ds = 1024;
  432. switch (ecc_level) {
  433. case 0:
  434. chip->ecc_step_ds = 0;
  435. chip->ecc_strength_ds = 0;
  436. break;
  437. case 1:
  438. chip->ecc_strength_ds = 4;
  439. break;
  440. case 2:
  441. chip->ecc_strength_ds = 24;
  442. break;
  443. case 3:
  444. chip->ecc_strength_ds = 32;
  445. break;
  446. case 4:
  447. chip->ecc_strength_ds = 40;
  448. break;
  449. case 5:
  450. chip->ecc_strength_ds = 50;
  451. break;
  452. case 6:
  453. chip->ecc_strength_ds = 60;
  454. break;
  455. default:
  456. /*
  457. * We should never reach this case, but if that
  458. * happens, this probably means Hynix decided to use
  459. * a different extended ID format, and we should find
  460. * a way to support it.
  461. */
  462. WARN(1, "Invalid ECC requirements");
  463. }
  464. } else {
  465. /*
  466. * The ECC requirements field meaning depends on the
  467. * NAND technology.
  468. */
  469. u8 nand_tech = chip->id.data[5] & 0x7;
  470. if (nand_tech < 3) {
  471. /* > 26nm, reference: H27UBG8T2A datasheet */
  472. if (ecc_level < 5) {
  473. chip->ecc_step_ds = 512;
  474. chip->ecc_strength_ds = 1 << ecc_level;
  475. } else if (ecc_level < 7) {
  476. if (ecc_level == 5)
  477. chip->ecc_step_ds = 2048;
  478. else
  479. chip->ecc_step_ds = 1024;
  480. chip->ecc_strength_ds = 24;
  481. } else {
  482. /*
  483. * We should never reach this case, but if that
  484. * happens, this probably means Hynix decided
  485. * to use a different extended ID format, and
  486. * we should find a way to support it.
  487. */
  488. WARN(1, "Invalid ECC requirements");
  489. }
  490. } else {
  491. /* <= 26nm, reference: H27UBG8T2B datasheet */
  492. if (!ecc_level) {
  493. chip->ecc_step_ds = 0;
  494. chip->ecc_strength_ds = 0;
  495. } else if (ecc_level < 5) {
  496. chip->ecc_step_ds = 512;
  497. chip->ecc_strength_ds = 1 << (ecc_level - 1);
  498. } else {
  499. chip->ecc_step_ds = 1024;
  500. chip->ecc_strength_ds = 24 +
  501. (8 * (ecc_level - 5));
  502. }
  503. }
  504. }
  505. }
  506. static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
  507. bool valid_jedecid)
  508. {
  509. u8 nand_tech;
  510. /* We need scrambling on all TLC NANDs*/
  511. if (chip->bits_per_cell > 2)
  512. chip->options |= NAND_NEED_SCRAMBLING;
  513. /* And on MLC NANDs with sub-3xnm process */
  514. if (valid_jedecid) {
  515. nand_tech = chip->id.data[5] >> 4;
  516. /* < 3xnm */
  517. if (nand_tech > 0)
  518. chip->options |= NAND_NEED_SCRAMBLING;
  519. } else {
  520. nand_tech = chip->id.data[5] & 0x7;
  521. /* < 32nm */
  522. if (nand_tech > 2)
  523. chip->options |= NAND_NEED_SCRAMBLING;
  524. }
  525. }
  526. static void hynix_nand_decode_id(struct nand_chip *chip)
  527. {
  528. struct mtd_info *mtd = nand_to_mtd(chip);
  529. bool valid_jedecid;
  530. u8 tmp;
  531. /*
  532. * Exclude all SLC NANDs from this advanced detection scheme.
  533. * According to the ranges defined in several datasheets, it might
  534. * appear that even SLC NANDs could fall in this extended ID scheme.
  535. * If that the case rework the test to let SLC NANDs go through the
  536. * detection process.
  537. */
  538. if (chip->id.len < 6 || nand_is_slc(chip)) {
  539. nand_decode_ext_id(chip);
  540. return;
  541. }
  542. /* Extract pagesize */
  543. mtd->writesize = 2048 << (chip->id.data[3] & 0x03);
  544. tmp = (chip->id.data[3] >> 4) & 0x3;
  545. /*
  546. * When bit7 is set that means we start counting at 1MiB, otherwise
  547. * we start counting at 128KiB and shift this value the content of
  548. * ID[3][4:5].
  549. * The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in
  550. * this case the erasesize is set to 768KiB.
  551. */
  552. if (chip->id.data[3] & 0x80)
  553. mtd->erasesize = SZ_1M << tmp;
  554. else if (tmp == 3)
  555. mtd->erasesize = SZ_512K + SZ_256K;
  556. else
  557. mtd->erasesize = SZ_128K << tmp;
  558. /*
  559. * Modern Toggle DDR NANDs have a valid JEDECID even though they are
  560. * not exposing a valid JEDEC parameter table.
  561. * These NANDs use a different NAND ID scheme.
  562. */
  563. valid_jedecid = hynix_nand_has_valid_jedecid(chip);
  564. hynix_nand_extract_oobsize(chip, valid_jedecid);
  565. hynix_nand_extract_ecc_requirements(chip, valid_jedecid);
  566. hynix_nand_extract_scrambling_requirements(chip, valid_jedecid);
  567. }
  568. static void hynix_nand_cleanup(struct nand_chip *chip)
  569. {
  570. struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
  571. if (!hynix)
  572. return;
  573. kfree(hynix->read_retry);
  574. kfree(hynix);
  575. nand_set_manufacturer_data(chip, NULL);
  576. }
  577. static int hynix_nand_init(struct nand_chip *chip)
  578. {
  579. struct hynix_nand *hynix;
  580. int ret;
  581. if (!nand_is_slc(chip))
  582. chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
  583. else
  584. chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
  585. hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
  586. if (!hynix)
  587. return -ENOMEM;
  588. nand_set_manufacturer_data(chip, hynix);
  589. ret = hynix_nand_rr_init(chip);
  590. if (ret)
  591. hynix_nand_cleanup(chip);
  592. return ret;
  593. }
  594. const struct nand_manufacturer_ops hynix_nand_manuf_ops = {
  595. .detect = hynix_nand_decode_id,
  596. .init = hynix_nand_init,
  597. .cleanup = hynix_nand_cleanup,
  598. };