tegra_nand.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2018 Stefan Agner <stefan@agner.ch>
  4. * Copyright (C) 2014-2015 Lucas Stach <dev@lynxeye.de>
  5. * Copyright (C) 2012 Avionic Design GmbH
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/completion.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/err.h>
  11. #include <linux/gpio/consumer.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/io.h>
  14. #include <linux/module.h>
  15. #include <linux/mtd/partitions.h>
  16. #include <linux/mtd/rawnand.h>
  17. #include <linux/of.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/reset.h>
  20. #define COMMAND 0x00
  21. #define COMMAND_GO BIT(31)
  22. #define COMMAND_CLE BIT(30)
  23. #define COMMAND_ALE BIT(29)
  24. #define COMMAND_PIO BIT(28)
  25. #define COMMAND_TX BIT(27)
  26. #define COMMAND_RX BIT(26)
  27. #define COMMAND_SEC_CMD BIT(25)
  28. #define COMMAND_AFT_DAT BIT(24)
  29. #define COMMAND_TRANS_SIZE(size) ((((size) - 1) & 0xf) << 20)
  30. #define COMMAND_A_VALID BIT(19)
  31. #define COMMAND_B_VALID BIT(18)
  32. #define COMMAND_RD_STATUS_CHK BIT(17)
  33. #define COMMAND_RBSY_CHK BIT(16)
  34. #define COMMAND_CE(x) BIT(8 + ((x) & 0x7))
  35. #define COMMAND_CLE_SIZE(size) ((((size) - 1) & 0x3) << 4)
  36. #define COMMAND_ALE_SIZE(size) ((((size) - 1) & 0xf) << 0)
  37. #define STATUS 0x04
  38. #define ISR 0x08
  39. #define ISR_CORRFAIL_ERR BIT(24)
  40. #define ISR_UND BIT(7)
  41. #define ISR_OVR BIT(6)
  42. #define ISR_CMD_DONE BIT(5)
  43. #define ISR_ECC_ERR BIT(4)
  44. #define IER 0x0c
  45. #define IER_ERR_TRIG_VAL(x) (((x) & 0xf) << 16)
  46. #define IER_UND BIT(7)
  47. #define IER_OVR BIT(6)
  48. #define IER_CMD_DONE BIT(5)
  49. #define IER_ECC_ERR BIT(4)
  50. #define IER_GIE BIT(0)
  51. #define CONFIG 0x10
  52. #define CONFIG_HW_ECC BIT(31)
  53. #define CONFIG_ECC_SEL BIT(30)
  54. #define CONFIG_ERR_COR BIT(29)
  55. #define CONFIG_PIPE_EN BIT(28)
  56. #define CONFIG_TVAL_4 (0 << 24)
  57. #define CONFIG_TVAL_6 (1 << 24)
  58. #define CONFIG_TVAL_8 (2 << 24)
  59. #define CONFIG_SKIP_SPARE BIT(23)
  60. #define CONFIG_BUS_WIDTH_16 BIT(21)
  61. #define CONFIG_COM_BSY BIT(20)
  62. #define CONFIG_PS_256 (0 << 16)
  63. #define CONFIG_PS_512 (1 << 16)
  64. #define CONFIG_PS_1024 (2 << 16)
  65. #define CONFIG_PS_2048 (3 << 16)
  66. #define CONFIG_PS_4096 (4 << 16)
  67. #define CONFIG_SKIP_SPARE_SIZE_4 (0 << 14)
  68. #define CONFIG_SKIP_SPARE_SIZE_8 (1 << 14)
  69. #define CONFIG_SKIP_SPARE_SIZE_12 (2 << 14)
  70. #define CONFIG_SKIP_SPARE_SIZE_16 (3 << 14)
  71. #define CONFIG_TAG_BYTE_SIZE(x) ((x) & 0xff)
  72. #define TIMING_1 0x14
  73. #define TIMING_TRP_RESP(x) (((x) & 0xf) << 28)
  74. #define TIMING_TWB(x) (((x) & 0xf) << 24)
  75. #define TIMING_TCR_TAR_TRR(x) (((x) & 0xf) << 20)
  76. #define TIMING_TWHR(x) (((x) & 0xf) << 16)
  77. #define TIMING_TCS(x) (((x) & 0x3) << 14)
  78. #define TIMING_TWH(x) (((x) & 0x3) << 12)
  79. #define TIMING_TWP(x) (((x) & 0xf) << 8)
  80. #define TIMING_TRH(x) (((x) & 0x3) << 4)
  81. #define TIMING_TRP(x) (((x) & 0xf) << 0)
  82. #define RESP 0x18
  83. #define TIMING_2 0x1c
  84. #define TIMING_TADL(x) ((x) & 0xf)
  85. #define CMD_REG1 0x20
  86. #define CMD_REG2 0x24
  87. #define ADDR_REG1 0x28
  88. #define ADDR_REG2 0x2c
  89. #define DMA_MST_CTRL 0x30
  90. #define DMA_MST_CTRL_GO BIT(31)
  91. #define DMA_MST_CTRL_IN (0 << 30)
  92. #define DMA_MST_CTRL_OUT BIT(30)
  93. #define DMA_MST_CTRL_PERF_EN BIT(29)
  94. #define DMA_MST_CTRL_IE_DONE BIT(28)
  95. #define DMA_MST_CTRL_REUSE BIT(27)
  96. #define DMA_MST_CTRL_BURST_1 (2 << 24)
  97. #define DMA_MST_CTRL_BURST_4 (3 << 24)
  98. #define DMA_MST_CTRL_BURST_8 (4 << 24)
  99. #define DMA_MST_CTRL_BURST_16 (5 << 24)
  100. #define DMA_MST_CTRL_IS_DONE BIT(20)
  101. #define DMA_MST_CTRL_EN_A BIT(2)
  102. #define DMA_MST_CTRL_EN_B BIT(1)
  103. #define DMA_CFG_A 0x34
  104. #define DMA_CFG_B 0x38
  105. #define FIFO_CTRL 0x3c
  106. #define FIFO_CTRL_CLR_ALL BIT(3)
  107. #define DATA_PTR 0x40
  108. #define TAG_PTR 0x44
  109. #define ECC_PTR 0x48
  110. #define DEC_STATUS 0x4c
  111. #define DEC_STATUS_A_ECC_FAIL BIT(1)
  112. #define DEC_STATUS_ERR_COUNT_MASK 0x00ff0000
  113. #define DEC_STATUS_ERR_COUNT_SHIFT 16
  114. #define HWSTATUS_CMD 0x50
  115. #define HWSTATUS_MASK 0x54
  116. #define HWSTATUS_RDSTATUS_MASK(x) (((x) & 0xff) << 24)
  117. #define HWSTATUS_RDSTATUS_VALUE(x) (((x) & 0xff) << 16)
  118. #define HWSTATUS_RBSY_MASK(x) (((x) & 0xff) << 8)
  119. #define HWSTATUS_RBSY_VALUE(x) (((x) & 0xff) << 0)
  120. #define BCH_CONFIG 0xcc
  121. #define BCH_ENABLE BIT(0)
  122. #define BCH_TVAL_4 (0 << 4)
  123. #define BCH_TVAL_8 (1 << 4)
  124. #define BCH_TVAL_14 (2 << 4)
  125. #define BCH_TVAL_16 (3 << 4)
  126. #define DEC_STAT_RESULT 0xd0
  127. #define DEC_STAT_BUF 0xd4
  128. #define DEC_STAT_BUF_FAIL_SEC_FLAG_MASK 0xff000000
  129. #define DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT 24
  130. #define DEC_STAT_BUF_CORR_SEC_FLAG_MASK 0x00ff0000
  131. #define DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT 16
  132. #define DEC_STAT_BUF_MAX_CORR_CNT_MASK 0x00001f00
  133. #define DEC_STAT_BUF_MAX_CORR_CNT_SHIFT 8
  134. #define OFFSET(val, off) ((val) < (off) ? 0 : (val) - (off))
  135. #define SKIP_SPARE_BYTES 4
  136. #define BITS_PER_STEP_RS 18
  137. #define BITS_PER_STEP_BCH 13
  138. #define INT_MASK (IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE)
  139. #define HWSTATUS_CMD_DEFAULT NAND_STATUS_READY
  140. #define HWSTATUS_MASK_DEFAULT (HWSTATUS_RDSTATUS_MASK(1) | \
  141. HWSTATUS_RDSTATUS_VALUE(0) | \
  142. HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \
  143. HWSTATUS_RBSY_VALUE(NAND_STATUS_READY))
  144. struct tegra_nand_controller {
  145. struct nand_controller controller;
  146. struct device *dev;
  147. void __iomem *regs;
  148. int irq;
  149. struct clk *clk;
  150. struct completion command_complete;
  151. struct completion dma_complete;
  152. bool last_read_error;
  153. int cur_cs;
  154. struct nand_chip *chip;
  155. };
  156. struct tegra_nand_chip {
  157. struct nand_chip chip;
  158. struct gpio_desc *wp_gpio;
  159. struct mtd_oob_region ecc;
  160. u32 config;
  161. u32 config_ecc;
  162. u32 bch_config;
  163. int cs[1];
  164. };
  165. static inline struct tegra_nand_controller *
  166. to_tegra_ctrl(struct nand_controller *hw_ctrl)
  167. {
  168. return container_of(hw_ctrl, struct tegra_nand_controller, controller);
  169. }
  170. static inline struct tegra_nand_chip *to_tegra_chip(struct nand_chip *chip)
  171. {
  172. return container_of(chip, struct tegra_nand_chip, chip);
  173. }
  174. static int tegra_nand_ooblayout_rs_ecc(struct mtd_info *mtd, int section,
  175. struct mtd_oob_region *oobregion)
  176. {
  177. struct nand_chip *chip = mtd_to_nand(mtd);
  178. int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_RS * chip->ecc.strength,
  179. BITS_PER_BYTE);
  180. if (section > 0)
  181. return -ERANGE;
  182. oobregion->offset = SKIP_SPARE_BYTES;
  183. oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
  184. return 0;
  185. }
  186. static int tegra_nand_ooblayout_no_free(struct mtd_info *mtd, int section,
  187. struct mtd_oob_region *oobregion)
  188. {
  189. return -ERANGE;
  190. }
  191. static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops = {
  192. .ecc = tegra_nand_ooblayout_rs_ecc,
  193. .free = tegra_nand_ooblayout_no_free,
  194. };
  195. static int tegra_nand_ooblayout_bch_ecc(struct mtd_info *mtd, int section,
  196. struct mtd_oob_region *oobregion)
  197. {
  198. struct nand_chip *chip = mtd_to_nand(mtd);
  199. int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_BCH * chip->ecc.strength,
  200. BITS_PER_BYTE);
  201. if (section > 0)
  202. return -ERANGE;
  203. oobregion->offset = SKIP_SPARE_BYTES;
  204. oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
  205. return 0;
  206. }
  207. static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops = {
  208. .ecc = tegra_nand_ooblayout_bch_ecc,
  209. .free = tegra_nand_ooblayout_no_free,
  210. };
  211. static irqreturn_t tegra_nand_irq(int irq, void *data)
  212. {
  213. struct tegra_nand_controller *ctrl = data;
  214. u32 isr, dma;
  215. isr = readl_relaxed(ctrl->regs + ISR);
  216. dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
  217. dev_dbg(ctrl->dev, "isr %08x\n", isr);
  218. if (!isr && !(dma & DMA_MST_CTRL_IS_DONE))
  219. return IRQ_NONE;
  220. /*
  221. * The bit name is somewhat missleading: This is also set when
  222. * HW ECC was successful. The data sheet states:
  223. * Correctable OR Un-correctable errors occurred in the DMA transfer...
  224. */
  225. if (isr & ISR_CORRFAIL_ERR)
  226. ctrl->last_read_error = true;
  227. if (isr & ISR_CMD_DONE)
  228. complete(&ctrl->command_complete);
  229. if (isr & ISR_UND)
  230. dev_err(ctrl->dev, "FIFO underrun\n");
  231. if (isr & ISR_OVR)
  232. dev_err(ctrl->dev, "FIFO overrun\n");
  233. /* handle DMA interrupts */
  234. if (dma & DMA_MST_CTRL_IS_DONE) {
  235. writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
  236. complete(&ctrl->dma_complete);
  237. }
  238. /* clear interrupts */
  239. writel_relaxed(isr, ctrl->regs + ISR);
  240. return IRQ_HANDLED;
  241. }
  242. static const char * const tegra_nand_reg_names[] = {
  243. "COMMAND",
  244. "STATUS",
  245. "ISR",
  246. "IER",
  247. "CONFIG",
  248. "TIMING",
  249. NULL,
  250. "TIMING2",
  251. "CMD_REG1",
  252. "CMD_REG2",
  253. "ADDR_REG1",
  254. "ADDR_REG2",
  255. "DMA_MST_CTRL",
  256. "DMA_CFG_A",
  257. "DMA_CFG_B",
  258. "FIFO_CTRL",
  259. };
  260. static void tegra_nand_dump_reg(struct tegra_nand_controller *ctrl)
  261. {
  262. u32 reg;
  263. int i;
  264. dev_err(ctrl->dev, "Tegra NAND controller register dump\n");
  265. for (i = 0; i < ARRAY_SIZE(tegra_nand_reg_names); i++) {
  266. const char *reg_name = tegra_nand_reg_names[i];
  267. if (!reg_name)
  268. continue;
  269. reg = readl_relaxed(ctrl->regs + (i * 4));
  270. dev_err(ctrl->dev, "%s: 0x%08x\n", reg_name, reg);
  271. }
  272. }
  273. static void tegra_nand_controller_abort(struct tegra_nand_controller *ctrl)
  274. {
  275. u32 isr, dma;
  276. disable_irq(ctrl->irq);
  277. /* Abort current command/DMA operation */
  278. writel_relaxed(0, ctrl->regs + DMA_MST_CTRL);
  279. writel_relaxed(0, ctrl->regs + COMMAND);
  280. /* clear interrupts */
  281. isr = readl_relaxed(ctrl->regs + ISR);
  282. writel_relaxed(isr, ctrl->regs + ISR);
  283. dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
  284. writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
  285. reinit_completion(&ctrl->command_complete);
  286. reinit_completion(&ctrl->dma_complete);
  287. enable_irq(ctrl->irq);
  288. }
  289. static int tegra_nand_cmd(struct nand_chip *chip,
  290. const struct nand_subop *subop)
  291. {
  292. const struct nand_op_instr *instr;
  293. const struct nand_op_instr *instr_data_in = NULL;
  294. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  295. unsigned int op_id, size = 0, offset = 0;
  296. bool first_cmd = true;
  297. u32 reg, cmd = 0;
  298. int ret;
  299. for (op_id = 0; op_id < subop->ninstrs; op_id++) {
  300. unsigned int naddrs, i;
  301. const u8 *addrs;
  302. u32 addr1 = 0, addr2 = 0;
  303. instr = &subop->instrs[op_id];
  304. switch (instr->type) {
  305. case NAND_OP_CMD_INSTR:
  306. if (first_cmd) {
  307. cmd |= COMMAND_CLE;
  308. writel_relaxed(instr->ctx.cmd.opcode,
  309. ctrl->regs + CMD_REG1);
  310. } else {
  311. cmd |= COMMAND_SEC_CMD;
  312. writel_relaxed(instr->ctx.cmd.opcode,
  313. ctrl->regs + CMD_REG2);
  314. }
  315. first_cmd = false;
  316. break;
  317. case NAND_OP_ADDR_INSTR:
  318. offset = nand_subop_get_addr_start_off(subop, op_id);
  319. naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
  320. addrs = &instr->ctx.addr.addrs[offset];
  321. cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(naddrs);
  322. for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
  323. addr1 |= *addrs++ << (BITS_PER_BYTE * i);
  324. naddrs -= i;
  325. for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
  326. addr2 |= *addrs++ << (BITS_PER_BYTE * i);
  327. writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
  328. writel_relaxed(addr2, ctrl->regs + ADDR_REG2);
  329. break;
  330. case NAND_OP_DATA_IN_INSTR:
  331. size = nand_subop_get_data_len(subop, op_id);
  332. offset = nand_subop_get_data_start_off(subop, op_id);
  333. cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
  334. COMMAND_RX | COMMAND_A_VALID;
  335. instr_data_in = instr;
  336. break;
  337. case NAND_OP_DATA_OUT_INSTR:
  338. size = nand_subop_get_data_len(subop, op_id);
  339. offset = nand_subop_get_data_start_off(subop, op_id);
  340. cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
  341. COMMAND_TX | COMMAND_A_VALID;
  342. memcpy(&reg, instr->ctx.data.buf.out + offset, size);
  343. writel_relaxed(reg, ctrl->regs + RESP);
  344. break;
  345. case NAND_OP_WAITRDY_INSTR:
  346. cmd |= COMMAND_RBSY_CHK;
  347. break;
  348. }
  349. }
  350. cmd |= COMMAND_GO | COMMAND_CE(ctrl->cur_cs);
  351. writel_relaxed(cmd, ctrl->regs + COMMAND);
  352. ret = wait_for_completion_timeout(&ctrl->command_complete,
  353. msecs_to_jiffies(500));
  354. if (!ret) {
  355. dev_err(ctrl->dev, "COMMAND timeout\n");
  356. tegra_nand_dump_reg(ctrl);
  357. tegra_nand_controller_abort(ctrl);
  358. return -ETIMEDOUT;
  359. }
  360. if (instr_data_in) {
  361. reg = readl_relaxed(ctrl->regs + RESP);
  362. memcpy(instr_data_in->ctx.data.buf.in + offset, &reg, size);
  363. }
  364. return 0;
  365. }
  366. static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
  367. NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
  368. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  369. NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
  370. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  371. NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
  372. NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
  373. NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)),
  374. NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
  375. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  376. NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
  377. NAND_OP_PARSER_PAT_CMD_ELEM(true),
  378. NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
  379. NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
  380. );
  381. static int tegra_nand_exec_op(struct nand_chip *chip,
  382. const struct nand_operation *op,
  383. bool check_only)
  384. {
  385. return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
  386. check_only);
  387. }
  388. static void tegra_nand_select_chip(struct mtd_info *mtd, int die_nr)
  389. {
  390. struct nand_chip *chip = mtd_to_nand(mtd);
  391. struct tegra_nand_chip *nand = to_tegra_chip(chip);
  392. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  393. WARN_ON(die_nr >= (int)ARRAY_SIZE(nand->cs));
  394. if (die_nr < 0 || die_nr > 0) {
  395. ctrl->cur_cs = -1;
  396. return;
  397. }
  398. ctrl->cur_cs = nand->cs[die_nr];
  399. }
  400. static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
  401. struct nand_chip *chip, bool enable)
  402. {
  403. struct tegra_nand_chip *nand = to_tegra_chip(chip);
  404. if (chip->ecc.algo == NAND_ECC_BCH && enable)
  405. writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
  406. else
  407. writel_relaxed(0, ctrl->regs + BCH_CONFIG);
  408. if (enable)
  409. writel_relaxed(nand->config_ecc, ctrl->regs + CONFIG);
  410. else
  411. writel_relaxed(nand->config, ctrl->regs + CONFIG);
  412. }
  413. static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
  414. void *buf, void *oob_buf, int oob_len, int page,
  415. bool read)
  416. {
  417. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  418. enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  419. dma_addr_t dma_addr = 0, dma_addr_oob = 0;
  420. u32 addr1, cmd, dma_ctrl;
  421. int ret;
  422. if (read) {
  423. writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
  424. writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
  425. } else {
  426. writel_relaxed(NAND_CMD_SEQIN, ctrl->regs + CMD_REG1);
  427. writel_relaxed(NAND_CMD_PAGEPROG, ctrl->regs + CMD_REG2);
  428. }
  429. cmd = COMMAND_CLE | COMMAND_SEC_CMD;
  430. /* Lower 16-bits are column, by default 0 */
  431. addr1 = page << 16;
  432. if (!buf)
  433. addr1 |= mtd->writesize;
  434. writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
  435. if (chip->options & NAND_ROW_ADDR_3) {
  436. writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2);
  437. cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(5);
  438. } else {
  439. cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(4);
  440. }
  441. if (buf) {
  442. dma_addr = dma_map_single(ctrl->dev, buf, mtd->writesize, dir);
  443. ret = dma_mapping_error(ctrl->dev, dma_addr);
  444. if (ret) {
  445. dev_err(ctrl->dev, "dma mapping error\n");
  446. return -EINVAL;
  447. }
  448. writel_relaxed(mtd->writesize - 1, ctrl->regs + DMA_CFG_A);
  449. writel_relaxed(dma_addr, ctrl->regs + DATA_PTR);
  450. }
  451. if (oob_buf) {
  452. dma_addr_oob = dma_map_single(ctrl->dev, oob_buf, mtd->oobsize,
  453. dir);
  454. ret = dma_mapping_error(ctrl->dev, dma_addr_oob);
  455. if (ret) {
  456. dev_err(ctrl->dev, "dma mapping error\n");
  457. ret = -EINVAL;
  458. goto err_unmap_dma_page;
  459. }
  460. writel_relaxed(oob_len - 1, ctrl->regs + DMA_CFG_B);
  461. writel_relaxed(dma_addr_oob, ctrl->regs + TAG_PTR);
  462. }
  463. dma_ctrl = DMA_MST_CTRL_GO | DMA_MST_CTRL_PERF_EN |
  464. DMA_MST_CTRL_IE_DONE | DMA_MST_CTRL_IS_DONE |
  465. DMA_MST_CTRL_BURST_16;
  466. if (buf)
  467. dma_ctrl |= DMA_MST_CTRL_EN_A;
  468. if (oob_buf)
  469. dma_ctrl |= DMA_MST_CTRL_EN_B;
  470. if (read)
  471. dma_ctrl |= DMA_MST_CTRL_IN | DMA_MST_CTRL_REUSE;
  472. else
  473. dma_ctrl |= DMA_MST_CTRL_OUT;
  474. writel_relaxed(dma_ctrl, ctrl->regs + DMA_MST_CTRL);
  475. cmd |= COMMAND_GO | COMMAND_RBSY_CHK | COMMAND_TRANS_SIZE(9) |
  476. COMMAND_CE(ctrl->cur_cs);
  477. if (buf)
  478. cmd |= COMMAND_A_VALID;
  479. if (oob_buf)
  480. cmd |= COMMAND_B_VALID;
  481. if (read)
  482. cmd |= COMMAND_RX;
  483. else
  484. cmd |= COMMAND_TX | COMMAND_AFT_DAT;
  485. writel_relaxed(cmd, ctrl->regs + COMMAND);
  486. ret = wait_for_completion_timeout(&ctrl->command_complete,
  487. msecs_to_jiffies(500));
  488. if (!ret) {
  489. dev_err(ctrl->dev, "COMMAND timeout\n");
  490. tegra_nand_dump_reg(ctrl);
  491. tegra_nand_controller_abort(ctrl);
  492. ret = -ETIMEDOUT;
  493. goto err_unmap_dma;
  494. }
  495. ret = wait_for_completion_timeout(&ctrl->dma_complete,
  496. msecs_to_jiffies(500));
  497. if (!ret) {
  498. dev_err(ctrl->dev, "DMA timeout\n");
  499. tegra_nand_dump_reg(ctrl);
  500. tegra_nand_controller_abort(ctrl);
  501. ret = -ETIMEDOUT;
  502. goto err_unmap_dma;
  503. }
  504. ret = 0;
  505. err_unmap_dma:
  506. if (oob_buf)
  507. dma_unmap_single(ctrl->dev, dma_addr_oob, mtd->oobsize, dir);
  508. err_unmap_dma_page:
  509. if (buf)
  510. dma_unmap_single(ctrl->dev, dma_addr, mtd->writesize, dir);
  511. return ret;
  512. }
  513. static int tegra_nand_read_page_raw(struct mtd_info *mtd,
  514. struct nand_chip *chip, u8 *buf,
  515. int oob_required, int page)
  516. {
  517. void *oob_buf = oob_required ? chip->oob_poi : NULL;
  518. return tegra_nand_page_xfer(mtd, chip, buf, oob_buf,
  519. mtd->oobsize, page, true);
  520. }
  521. static int tegra_nand_write_page_raw(struct mtd_info *mtd,
  522. struct nand_chip *chip, const u8 *buf,
  523. int oob_required, int page)
  524. {
  525. void *oob_buf = oob_required ? chip->oob_poi : NULL;
  526. return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
  527. mtd->oobsize, page, false);
  528. }
  529. static int tegra_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  530. int page)
  531. {
  532. return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
  533. mtd->oobsize, page, true);
  534. }
  535. static int tegra_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  536. int page)
  537. {
  538. return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
  539. mtd->oobsize, page, false);
  540. }
  541. static int tegra_nand_read_page_hwecc(struct mtd_info *mtd,
  542. struct nand_chip *chip, u8 *buf,
  543. int oob_required, int page)
  544. {
  545. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  546. struct tegra_nand_chip *nand = to_tegra_chip(chip);
  547. void *oob_buf = oob_required ? chip->oob_poi : NULL;
  548. u32 dec_stat, max_corr_cnt;
  549. unsigned long fail_sec_flag;
  550. int ret;
  551. tegra_nand_hw_ecc(ctrl, chip, true);
  552. ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true);
  553. tegra_nand_hw_ecc(ctrl, chip, false);
  554. if (ret)
  555. return ret;
  556. /* No correctable or un-correctable errors, page must have 0 bitflips */
  557. if (!ctrl->last_read_error)
  558. return 0;
  559. /*
  560. * Correctable or un-correctable errors occurred. Use DEC_STAT_BUF
  561. * which contains information for all ECC selections.
  562. *
  563. * Note that since we do not use Command Queues DEC_RESULT does not
  564. * state the number of pages we can read from the DEC_STAT_BUF. But
  565. * since CORRFAIL_ERR did occur during page read we do have a valid
  566. * result in DEC_STAT_BUF.
  567. */
  568. ctrl->last_read_error = false;
  569. dec_stat = readl_relaxed(ctrl->regs + DEC_STAT_BUF);
  570. fail_sec_flag = (dec_stat & DEC_STAT_BUF_FAIL_SEC_FLAG_MASK) >>
  571. DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT;
  572. max_corr_cnt = (dec_stat & DEC_STAT_BUF_MAX_CORR_CNT_MASK) >>
  573. DEC_STAT_BUF_MAX_CORR_CNT_SHIFT;
  574. if (fail_sec_flag) {
  575. int bit, max_bitflips = 0;
  576. /*
  577. * Since we do not support subpage writes, a complete page
  578. * is either written or not. We can take a shortcut here by
  579. * checking wheather any of the sector has been successful
  580. * read. If at least one sectors has been read successfully,
  581. * the page must have been a written previously. It cannot
  582. * be an erased page.
  583. *
  584. * E.g. controller might return fail_sec_flag with 0x4, which
  585. * would mean only the third sector failed to correct. The
  586. * page must have been written and the third sector is really
  587. * not correctable anymore.
  588. */
  589. if (fail_sec_flag ^ GENMASK(chip->ecc.steps - 1, 0)) {
  590. mtd->ecc_stats.failed += hweight8(fail_sec_flag);
  591. return max_corr_cnt;
  592. }
  593. /*
  594. * All sectors failed to correct, but the ECC isn't smart
  595. * enough to figure out if a page is really just erased.
  596. * Read OOB data and check whether data/OOB is completely
  597. * erased or if error correction just failed for all sub-
  598. * pages.
  599. */
  600. ret = tegra_nand_read_oob(mtd, chip, page);
  601. if (ret < 0)
  602. return ret;
  603. for_each_set_bit(bit, &fail_sec_flag, chip->ecc.steps) {
  604. u8 *data = buf + (chip->ecc.size * bit);
  605. u8 *oob = chip->oob_poi + nand->ecc.offset +
  606. (chip->ecc.bytes * bit);
  607. ret = nand_check_erased_ecc_chunk(data, chip->ecc.size,
  608. oob, chip->ecc.bytes,
  609. NULL, 0,
  610. chip->ecc.strength);
  611. if (ret < 0) {
  612. mtd->ecc_stats.failed++;
  613. } else {
  614. mtd->ecc_stats.corrected += ret;
  615. max_bitflips = max(ret, max_bitflips);
  616. }
  617. }
  618. return max_t(unsigned int, max_corr_cnt, max_bitflips);
  619. } else {
  620. int corr_sec_flag;
  621. corr_sec_flag = (dec_stat & DEC_STAT_BUF_CORR_SEC_FLAG_MASK) >>
  622. DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT;
  623. /*
  624. * The value returned in the register is the maximum of
  625. * bitflips encountered in any of the ECC regions. As there is
  626. * no way to get the number of bitflips in a specific regions
  627. * we are not able to deliver correct stats but instead
  628. * overestimate the number of corrected bitflips by assuming
  629. * that all regions where errors have been corrected
  630. * encountered the maximum number of bitflips.
  631. */
  632. mtd->ecc_stats.corrected += max_corr_cnt * hweight8(corr_sec_flag);
  633. return max_corr_cnt;
  634. }
  635. }
  636. static int tegra_nand_write_page_hwecc(struct mtd_info *mtd,
  637. struct nand_chip *chip, const u8 *buf,
  638. int oob_required, int page)
  639. {
  640. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  641. void *oob_buf = oob_required ? chip->oob_poi : NULL;
  642. int ret;
  643. tegra_nand_hw_ecc(ctrl, chip, true);
  644. ret = tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
  645. 0, page, false);
  646. tegra_nand_hw_ecc(ctrl, chip, false);
  647. return ret;
  648. }
  649. static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
  650. const struct nand_sdr_timings *timings)
  651. {
  652. /*
  653. * The period (and all other timings in this function) is in ps,
  654. * so need to take care here to avoid integer overflows.
  655. */
  656. unsigned int rate = clk_get_rate(ctrl->clk) / 1000000;
  657. unsigned int period = DIV_ROUND_UP(1000000, rate);
  658. u32 val, reg = 0;
  659. val = DIV_ROUND_UP(max3(timings->tAR_min, timings->tRR_min,
  660. timings->tRC_min), period);
  661. reg |= TIMING_TCR_TAR_TRR(OFFSET(val, 3));
  662. val = DIV_ROUND_UP(max(max(timings->tCS_min, timings->tCH_min),
  663. max(timings->tALS_min, timings->tALH_min)),
  664. period);
  665. reg |= TIMING_TCS(OFFSET(val, 2));
  666. val = DIV_ROUND_UP(max(timings->tRP_min, timings->tREA_max) + 6000,
  667. period);
  668. reg |= TIMING_TRP(OFFSET(val, 1)) | TIMING_TRP_RESP(OFFSET(val, 1));
  669. reg |= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings->tWB_max, period), 1));
  670. reg |= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings->tWHR_min, period), 1));
  671. reg |= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings->tWH_min, period), 1));
  672. reg |= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings->tWP_min, period), 1));
  673. reg |= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings->tREH_min, period), 1));
  674. writel_relaxed(reg, ctrl->regs + TIMING_1);
  675. val = DIV_ROUND_UP(timings->tADL_min, period);
  676. reg = TIMING_TADL(OFFSET(val, 3));
  677. writel_relaxed(reg, ctrl->regs + TIMING_2);
  678. }
  679. static int tegra_nand_setup_data_interface(struct mtd_info *mtd, int csline,
  680. const struct nand_data_interface *conf)
  681. {
  682. struct nand_chip *chip = mtd_to_nand(mtd);
  683. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  684. const struct nand_sdr_timings *timings;
  685. timings = nand_get_sdr_timings(conf);
  686. if (IS_ERR(timings))
  687. return PTR_ERR(timings);
  688. if (csline == NAND_DATA_IFACE_CHECK_ONLY)
  689. return 0;
  690. tegra_nand_setup_timing(ctrl, timings);
  691. return 0;
  692. }
  693. static const int rs_strength_bootable[] = { 4 };
  694. static const int rs_strength[] = { 4, 6, 8 };
  695. static const int bch_strength_bootable[] = { 8, 16 };
  696. static const int bch_strength[] = { 4, 8, 14, 16 };
  697. static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
  698. int strength_len, int bits_per_step,
  699. int oobsize)
  700. {
  701. bool maximize = chip->ecc.options & NAND_ECC_MAXIMIZE;
  702. int i;
  703. /*
  704. * Loop through available strengths. Backwards in case we try to
  705. * maximize the BCH strength.
  706. */
  707. for (i = 0; i < strength_len; i++) {
  708. int strength_sel, bytes_per_step, bytes_per_page;
  709. if (maximize) {
  710. strength_sel = strength[strength_len - i - 1];
  711. } else {
  712. strength_sel = strength[i];
  713. if (strength_sel < chip->ecc_strength_ds)
  714. continue;
  715. }
  716. bytes_per_step = DIV_ROUND_UP(bits_per_step * strength_sel,
  717. BITS_PER_BYTE);
  718. bytes_per_page = round_up(bytes_per_step * chip->ecc.steps, 4);
  719. /* Check whether strength fits OOB */
  720. if (bytes_per_page < (oobsize - SKIP_SPARE_BYTES))
  721. return strength_sel;
  722. }
  723. return -EINVAL;
  724. }
  725. static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
  726. {
  727. const int *strength;
  728. int strength_len, bits_per_step;
  729. switch (chip->ecc.algo) {
  730. case NAND_ECC_RS:
  731. bits_per_step = BITS_PER_STEP_RS;
  732. if (chip->options & NAND_IS_BOOT_MEDIUM) {
  733. strength = rs_strength_bootable;
  734. strength_len = ARRAY_SIZE(rs_strength_bootable);
  735. } else {
  736. strength = rs_strength;
  737. strength_len = ARRAY_SIZE(rs_strength);
  738. }
  739. break;
  740. case NAND_ECC_BCH:
  741. bits_per_step = BITS_PER_STEP_BCH;
  742. if (chip->options & NAND_IS_BOOT_MEDIUM) {
  743. strength = bch_strength_bootable;
  744. strength_len = ARRAY_SIZE(bch_strength_bootable);
  745. } else {
  746. strength = bch_strength;
  747. strength_len = ARRAY_SIZE(bch_strength);
  748. }
  749. break;
  750. default:
  751. return -EINVAL;
  752. }
  753. return tegra_nand_get_strength(chip, strength, strength_len,
  754. bits_per_step, oobsize);
  755. }
  756. static int tegra_nand_attach_chip(struct nand_chip *chip)
  757. {
  758. struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
  759. struct tegra_nand_chip *nand = to_tegra_chip(chip);
  760. struct mtd_info *mtd = nand_to_mtd(chip);
  761. int bits_per_step;
  762. int ret;
  763. if (chip->bbt_options & NAND_BBT_USE_FLASH)
  764. chip->bbt_options |= NAND_BBT_NO_OOB;
  765. chip->ecc.mode = NAND_ECC_HW;
  766. chip->ecc.size = 512;
  767. chip->ecc.steps = mtd->writesize / chip->ecc.size;
  768. if (chip->ecc_step_ds != 512) {
  769. dev_err(ctrl->dev, "Unsupported step size %d\n",
  770. chip->ecc_step_ds);
  771. return -EINVAL;
  772. }
  773. chip->ecc.read_page = tegra_nand_read_page_hwecc;
  774. chip->ecc.write_page = tegra_nand_write_page_hwecc;
  775. chip->ecc.read_page_raw = tegra_nand_read_page_raw;
  776. chip->ecc.write_page_raw = tegra_nand_write_page_raw;
  777. chip->ecc.read_oob = tegra_nand_read_oob;
  778. chip->ecc.write_oob = tegra_nand_write_oob;
  779. if (chip->options & NAND_BUSWIDTH_16)
  780. nand->config |= CONFIG_BUS_WIDTH_16;
  781. if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
  782. if (mtd->writesize < 2048)
  783. chip->ecc.algo = NAND_ECC_RS;
  784. else
  785. chip->ecc.algo = NAND_ECC_BCH;
  786. }
  787. if (chip->ecc.algo == NAND_ECC_BCH && mtd->writesize < 2048) {
  788. dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
  789. return -EINVAL;
  790. }
  791. if (!chip->ecc.strength) {
  792. ret = tegra_nand_select_strength(chip, mtd->oobsize);
  793. if (ret < 0) {
  794. dev_err(ctrl->dev,
  795. "No valid strength found, minimum %d\n",
  796. chip->ecc_strength_ds);
  797. return ret;
  798. }
  799. chip->ecc.strength = ret;
  800. }
  801. nand->config_ecc = CONFIG_PIPE_EN | CONFIG_SKIP_SPARE |
  802. CONFIG_SKIP_SPARE_SIZE_4;
  803. switch (chip->ecc.algo) {
  804. case NAND_ECC_RS:
  805. bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
  806. mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
  807. nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
  808. CONFIG_ERR_COR;
  809. switch (chip->ecc.strength) {
  810. case 4:
  811. nand->config_ecc |= CONFIG_TVAL_4;
  812. break;
  813. case 6:
  814. nand->config_ecc |= CONFIG_TVAL_6;
  815. break;
  816. case 8:
  817. nand->config_ecc |= CONFIG_TVAL_8;
  818. break;
  819. default:
  820. dev_err(ctrl->dev, "ECC strength %d not supported\n",
  821. chip->ecc.strength);
  822. return -EINVAL;
  823. }
  824. break;
  825. case NAND_ECC_BCH:
  826. bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
  827. mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
  828. nand->bch_config = BCH_ENABLE;
  829. switch (chip->ecc.strength) {
  830. case 4:
  831. nand->bch_config |= BCH_TVAL_4;
  832. break;
  833. case 8:
  834. nand->bch_config |= BCH_TVAL_8;
  835. break;
  836. case 14:
  837. nand->bch_config |= BCH_TVAL_14;
  838. break;
  839. case 16:
  840. nand->bch_config |= BCH_TVAL_16;
  841. break;
  842. default:
  843. dev_err(ctrl->dev, "ECC strength %d not supported\n",
  844. chip->ecc.strength);
  845. return -EINVAL;
  846. }
  847. break;
  848. default:
  849. dev_err(ctrl->dev, "ECC algorithm not supported\n");
  850. return -EINVAL;
  851. }
  852. dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
  853. chip->ecc.algo == NAND_ECC_BCH ? "BCH" : "RS",
  854. chip->ecc.strength);
  855. chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
  856. switch (mtd->writesize) {
  857. case 256:
  858. nand->config |= CONFIG_PS_256;
  859. break;
  860. case 512:
  861. nand->config |= CONFIG_PS_512;
  862. break;
  863. case 1024:
  864. nand->config |= CONFIG_PS_1024;
  865. break;
  866. case 2048:
  867. nand->config |= CONFIG_PS_2048;
  868. break;
  869. case 4096:
  870. nand->config |= CONFIG_PS_4096;
  871. break;
  872. default:
  873. dev_err(ctrl->dev, "Unsupported writesize %d\n",
  874. mtd->writesize);
  875. return -ENODEV;
  876. }
  877. /* Store complete configuration for HW ECC in config_ecc */
  878. nand->config_ecc |= nand->config;
  879. /* Non-HW ECC read/writes complete OOB */
  880. nand->config |= CONFIG_TAG_BYTE_SIZE(mtd->oobsize - 1);
  881. writel_relaxed(nand->config, ctrl->regs + CONFIG);
  882. return 0;
  883. }
  884. static const struct nand_controller_ops tegra_nand_controller_ops = {
  885. .attach_chip = &tegra_nand_attach_chip,
  886. };
  887. static int tegra_nand_chips_init(struct device *dev,
  888. struct tegra_nand_controller *ctrl)
  889. {
  890. struct device_node *np = dev->of_node;
  891. struct device_node *np_nand;
  892. int nsels, nchips = of_get_child_count(np);
  893. struct tegra_nand_chip *nand;
  894. struct mtd_info *mtd;
  895. struct nand_chip *chip;
  896. int ret;
  897. u32 cs;
  898. if (nchips != 1) {
  899. dev_err(dev, "Currently only one NAND chip supported\n");
  900. return -EINVAL;
  901. }
  902. np_nand = of_get_next_child(np, NULL);
  903. nsels = of_property_count_elems_of_size(np_nand, "reg", sizeof(u32));
  904. if (nsels != 1) {
  905. dev_err(dev, "Missing/invalid reg property\n");
  906. return -EINVAL;
  907. }
  908. /* Retrieve CS id, currently only single die NAND supported */
  909. ret = of_property_read_u32(np_nand, "reg", &cs);
  910. if (ret) {
  911. dev_err(dev, "could not retrieve reg property: %d\n", ret);
  912. return ret;
  913. }
  914. nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
  915. if (!nand)
  916. return -ENOMEM;
  917. nand->cs[0] = cs;
  918. nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
  919. if (IS_ERR(nand->wp_gpio)) {
  920. ret = PTR_ERR(nand->wp_gpio);
  921. dev_err(dev, "Failed to request WP GPIO: %d\n", ret);
  922. return ret;
  923. }
  924. chip = &nand->chip;
  925. chip->controller = &ctrl->controller;
  926. mtd = nand_to_mtd(chip);
  927. mtd->dev.parent = dev;
  928. mtd->owner = THIS_MODULE;
  929. nand_set_flash_node(chip, np_nand);
  930. if (!mtd->name)
  931. mtd->name = "tegra_nand";
  932. chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER;
  933. chip->exec_op = tegra_nand_exec_op;
  934. chip->select_chip = tegra_nand_select_chip;
  935. chip->setup_data_interface = tegra_nand_setup_data_interface;
  936. ret = nand_scan(chip, 1);
  937. if (ret)
  938. return ret;
  939. mtd_ooblayout_ecc(mtd, 0, &nand->ecc);
  940. ret = mtd_device_register(mtd, NULL, 0);
  941. if (ret) {
  942. dev_err(dev, "Failed to register mtd device: %d\n", ret);
  943. nand_cleanup(chip);
  944. return ret;
  945. }
  946. ctrl->chip = chip;
  947. return 0;
  948. }
  949. static int tegra_nand_probe(struct platform_device *pdev)
  950. {
  951. struct reset_control *rst;
  952. struct tegra_nand_controller *ctrl;
  953. struct resource *res;
  954. int err = 0;
  955. ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
  956. if (!ctrl)
  957. return -ENOMEM;
  958. ctrl->dev = &pdev->dev;
  959. nand_controller_init(&ctrl->controller);
  960. ctrl->controller.ops = &tegra_nand_controller_ops;
  961. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  962. ctrl->regs = devm_ioremap_resource(&pdev->dev, res);
  963. if (IS_ERR(ctrl->regs))
  964. return PTR_ERR(ctrl->regs);
  965. rst = devm_reset_control_get(&pdev->dev, "nand");
  966. if (IS_ERR(rst))
  967. return PTR_ERR(rst);
  968. ctrl->clk = devm_clk_get(&pdev->dev, "nand");
  969. if (IS_ERR(ctrl->clk))
  970. return PTR_ERR(ctrl->clk);
  971. err = clk_prepare_enable(ctrl->clk);
  972. if (err)
  973. return err;
  974. err = reset_control_reset(rst);
  975. if (err) {
  976. dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
  977. goto err_disable_clk;
  978. }
  979. writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
  980. writel_relaxed(HWSTATUS_MASK_DEFAULT, ctrl->regs + HWSTATUS_MASK);
  981. writel_relaxed(INT_MASK, ctrl->regs + IER);
  982. init_completion(&ctrl->command_complete);
  983. init_completion(&ctrl->dma_complete);
  984. ctrl->irq = platform_get_irq(pdev, 0);
  985. err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
  986. dev_name(&pdev->dev), ctrl);
  987. if (err) {
  988. dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
  989. goto err_disable_clk;
  990. }
  991. writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
  992. err = tegra_nand_chips_init(ctrl->dev, ctrl);
  993. if (err)
  994. goto err_disable_clk;
  995. platform_set_drvdata(pdev, ctrl);
  996. return 0;
  997. err_disable_clk:
  998. clk_disable_unprepare(ctrl->clk);
  999. return err;
  1000. }
  1001. static int tegra_nand_remove(struct platform_device *pdev)
  1002. {
  1003. struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
  1004. struct nand_chip *chip = ctrl->chip;
  1005. struct mtd_info *mtd = nand_to_mtd(chip);
  1006. int ret;
  1007. ret = mtd_device_unregister(mtd);
  1008. if (ret)
  1009. return ret;
  1010. nand_cleanup(chip);
  1011. clk_disable_unprepare(ctrl->clk);
  1012. return 0;
  1013. }
  1014. static const struct of_device_id tegra_nand_of_match[] = {
  1015. { .compatible = "nvidia,tegra20-nand" },
  1016. { /* sentinel */ }
  1017. };
  1018. MODULE_DEVICE_TABLE(of, tegra_nand_of_match);
  1019. static struct platform_driver tegra_nand_driver = {
  1020. .driver = {
  1021. .name = "tegra-nand",
  1022. .of_match_table = tegra_nand_of_match,
  1023. },
  1024. .probe = tegra_nand_probe,
  1025. .remove = tegra_nand_remove,
  1026. };
  1027. module_platform_driver(tegra_nand_driver);
  1028. MODULE_DESCRIPTION("NVIDIA Tegra NAND driver");
  1029. MODULE_AUTHOR("Thierry Reding <thierry.reding@nvidia.com>");
  1030. MODULE_AUTHOR("Lucas Stach <dev@lynxeye.de>");
  1031. MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
  1032. MODULE_LICENSE("GPL v2");