denali.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413
  1. /*
  2. * NAND Flash Controller Device Driver
  3. * Copyright © 2009-2010, Intel Corporation and its suppliers.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/bitfield.h>
  15. #include <linux/completion.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/io.h>
  19. #include <linux/module.h>
  20. #include <linux/mtd/mtd.h>
  21. #include <linux/mtd/rawnand.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include "denali.h"
  25. MODULE_LICENSE("GPL");
  26. #define DENALI_NAND_NAME "denali-nand"
  27. #define DENALI_DEFAULT_OOB_SKIP_BYTES 8
  28. /* for Indexed Addressing */
  29. #define DENALI_INDEXED_CTRL 0x00
  30. #define DENALI_INDEXED_DATA 0x10
  31. #define DENALI_MAP00 (0 << 26) /* direct access to buffer */
  32. #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
  33. #define DENALI_MAP10 (2 << 26) /* high-level control plane */
  34. #define DENALI_MAP11 (3 << 26) /* direct controller access */
  35. /* MAP11 access cycle type */
  36. #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
  37. #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
  38. #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
  39. /* MAP10 commands */
  40. #define DENALI_ERASE 0x01
  41. #define DENALI_BANK(denali) ((denali)->active_bank << 24)
  42. #define DENALI_INVALID_BANK -1
  43. #define DENALI_NR_BANKS 4
  44. static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
  45. {
  46. return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
  47. }
  48. /*
  49. * Direct Addressing - the slave address forms the control information (command
  50. * type, bank, block, and page address). The slave data is the actual data to
  51. * be transferred. This mode requires 28 bits of address region allocated.
  52. */
  53. static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
  54. {
  55. return ioread32(denali->host + addr);
  56. }
  57. static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
  58. u32 data)
  59. {
  60. iowrite32(data, denali->host + addr);
  61. }
  62. /*
  63. * Indexed Addressing - address translation module intervenes in passing the
  64. * control information. This mode reduces the required address range. The
  65. * control information and transferred data are latched by the registers in
  66. * the translation module.
  67. */
  68. static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
  69. {
  70. iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  71. return ioread32(denali->host + DENALI_INDEXED_DATA);
  72. }
  73. static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
  74. u32 data)
  75. {
  76. iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  77. iowrite32(data, denali->host + DENALI_INDEXED_DATA);
  78. }
  79. /*
  80. * Use the configuration feature register to determine the maximum number of
  81. * banks that the hardware supports.
  82. */
  83. static void denali_detect_max_banks(struct denali_nand_info *denali)
  84. {
  85. uint32_t features = ioread32(denali->reg + FEATURES);
  86. denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
  87. /* the encoding changed from rev 5.0 to 5.1 */
  88. if (denali->revision < 0x0501)
  89. denali->max_banks <<= 1;
  90. }
  91. static void denali_enable_irq(struct denali_nand_info *denali)
  92. {
  93. int i;
  94. for (i = 0; i < DENALI_NR_BANKS; i++)
  95. iowrite32(U32_MAX, denali->reg + INTR_EN(i));
  96. iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
  97. }
  98. static void denali_disable_irq(struct denali_nand_info *denali)
  99. {
  100. int i;
  101. for (i = 0; i < DENALI_NR_BANKS; i++)
  102. iowrite32(0, denali->reg + INTR_EN(i));
  103. iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
  104. }
  105. static void denali_clear_irq(struct denali_nand_info *denali,
  106. int bank, uint32_t irq_status)
  107. {
  108. /* write one to clear bits */
  109. iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
  110. }
  111. static void denali_clear_irq_all(struct denali_nand_info *denali)
  112. {
  113. int i;
  114. for (i = 0; i < DENALI_NR_BANKS; i++)
  115. denali_clear_irq(denali, i, U32_MAX);
  116. }
  117. static irqreturn_t denali_isr(int irq, void *dev_id)
  118. {
  119. struct denali_nand_info *denali = dev_id;
  120. irqreturn_t ret = IRQ_NONE;
  121. uint32_t irq_status;
  122. int i;
  123. spin_lock(&denali->irq_lock);
  124. for (i = 0; i < DENALI_NR_BANKS; i++) {
  125. irq_status = ioread32(denali->reg + INTR_STATUS(i));
  126. if (irq_status)
  127. ret = IRQ_HANDLED;
  128. denali_clear_irq(denali, i, irq_status);
  129. if (i != denali->active_bank)
  130. continue;
  131. denali->irq_status |= irq_status;
  132. if (denali->irq_status & denali->irq_mask)
  133. complete(&denali->complete);
  134. }
  135. spin_unlock(&denali->irq_lock);
  136. return ret;
  137. }
  138. static void denali_reset_irq(struct denali_nand_info *denali)
  139. {
  140. unsigned long flags;
  141. spin_lock_irqsave(&denali->irq_lock, flags);
  142. denali->irq_status = 0;
  143. denali->irq_mask = 0;
  144. spin_unlock_irqrestore(&denali->irq_lock, flags);
  145. }
  146. static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
  147. uint32_t irq_mask)
  148. {
  149. unsigned long time_left, flags;
  150. uint32_t irq_status;
  151. spin_lock_irqsave(&denali->irq_lock, flags);
  152. irq_status = denali->irq_status;
  153. if (irq_mask & irq_status) {
  154. /* return immediately if the IRQ has already happened. */
  155. spin_unlock_irqrestore(&denali->irq_lock, flags);
  156. return irq_status;
  157. }
  158. denali->irq_mask = irq_mask;
  159. reinit_completion(&denali->complete);
  160. spin_unlock_irqrestore(&denali->irq_lock, flags);
  161. time_left = wait_for_completion_timeout(&denali->complete,
  162. msecs_to_jiffies(1000));
  163. if (!time_left) {
  164. dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
  165. irq_mask);
  166. return 0;
  167. }
  168. return denali->irq_status;
  169. }
  170. static uint32_t denali_check_irq(struct denali_nand_info *denali)
  171. {
  172. unsigned long flags;
  173. uint32_t irq_status;
  174. spin_lock_irqsave(&denali->irq_lock, flags);
  175. irq_status = denali->irq_status;
  176. spin_unlock_irqrestore(&denali->irq_lock, flags);
  177. return irq_status;
  178. }
  179. static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  180. {
  181. struct denali_nand_info *denali = mtd_to_denali(mtd);
  182. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  183. int i;
  184. for (i = 0; i < len; i++)
  185. buf[i] = denali->host_read(denali, addr);
  186. }
  187. static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
  188. {
  189. struct denali_nand_info *denali = mtd_to_denali(mtd);
  190. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  191. int i;
  192. for (i = 0; i < len; i++)
  193. denali->host_write(denali, addr, buf[i]);
  194. }
  195. static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
  196. {
  197. struct denali_nand_info *denali = mtd_to_denali(mtd);
  198. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  199. uint16_t *buf16 = (uint16_t *)buf;
  200. int i;
  201. for (i = 0; i < len / 2; i++)
  202. buf16[i] = denali->host_read(denali, addr);
  203. }
  204. static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
  205. int len)
  206. {
  207. struct denali_nand_info *denali = mtd_to_denali(mtd);
  208. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  209. const uint16_t *buf16 = (const uint16_t *)buf;
  210. int i;
  211. for (i = 0; i < len / 2; i++)
  212. denali->host_write(denali, addr, buf16[i]);
  213. }
  214. static uint8_t denali_read_byte(struct mtd_info *mtd)
  215. {
  216. uint8_t byte;
  217. denali_read_buf(mtd, &byte, 1);
  218. return byte;
  219. }
  220. static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
  221. {
  222. denali_write_buf(mtd, &byte, 1);
  223. }
  224. static uint16_t denali_read_word(struct mtd_info *mtd)
  225. {
  226. uint16_t word;
  227. denali_read_buf16(mtd, (uint8_t *)&word, 2);
  228. return word;
  229. }
  230. static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
  231. {
  232. struct denali_nand_info *denali = mtd_to_denali(mtd);
  233. uint32_t type;
  234. if (ctrl & NAND_CLE)
  235. type = DENALI_MAP11_CMD;
  236. else if (ctrl & NAND_ALE)
  237. type = DENALI_MAP11_ADDR;
  238. else
  239. return;
  240. /*
  241. * Some commands are followed by chip->dev_ready or chip->waitfunc.
  242. * irq_status must be cleared here to catch the R/B# interrupt later.
  243. */
  244. if (ctrl & NAND_CTRL_CHANGE)
  245. denali_reset_irq(denali);
  246. denali->host_write(denali, DENALI_BANK(denali) | type, dat);
  247. }
  248. static int denali_dev_ready(struct mtd_info *mtd)
  249. {
  250. struct denali_nand_info *denali = mtd_to_denali(mtd);
  251. return !!(denali_check_irq(denali) & INTR__INT_ACT);
  252. }
  253. static int denali_check_erased_page(struct mtd_info *mtd,
  254. struct nand_chip *chip, uint8_t *buf,
  255. unsigned long uncor_ecc_flags,
  256. unsigned int max_bitflips)
  257. {
  258. struct denali_nand_info *denali = mtd_to_denali(mtd);
  259. uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
  260. int ecc_steps = chip->ecc.steps;
  261. int ecc_size = chip->ecc.size;
  262. int ecc_bytes = chip->ecc.bytes;
  263. int i, stat;
  264. for (i = 0; i < ecc_steps; i++) {
  265. if (!(uncor_ecc_flags & BIT(i)))
  266. continue;
  267. stat = nand_check_erased_ecc_chunk(buf, ecc_size,
  268. ecc_code, ecc_bytes,
  269. NULL, 0,
  270. chip->ecc.strength);
  271. if (stat < 0) {
  272. mtd->ecc_stats.failed++;
  273. } else {
  274. mtd->ecc_stats.corrected += stat;
  275. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  276. }
  277. buf += ecc_size;
  278. ecc_code += ecc_bytes;
  279. }
  280. return max_bitflips;
  281. }
  282. static int denali_hw_ecc_fixup(struct mtd_info *mtd,
  283. struct denali_nand_info *denali,
  284. unsigned long *uncor_ecc_flags)
  285. {
  286. struct nand_chip *chip = mtd_to_nand(mtd);
  287. int bank = denali->active_bank;
  288. uint32_t ecc_cor;
  289. unsigned int max_bitflips;
  290. ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
  291. ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
  292. if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
  293. /*
  294. * This flag is set when uncorrectable error occurs at least in
  295. * one ECC sector. We can not know "how many sectors", or
  296. * "which sector(s)". We need erase-page check for all sectors.
  297. */
  298. *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
  299. return 0;
  300. }
  301. max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
  302. /*
  303. * The register holds the maximum of per-sector corrected bitflips.
  304. * This is suitable for the return value of the ->read_page() callback.
  305. * Unfortunately, we can not know the total number of corrected bits in
  306. * the page. Increase the stats by max_bitflips. (compromised solution)
  307. */
  308. mtd->ecc_stats.corrected += max_bitflips;
  309. return max_bitflips;
  310. }
  311. static int denali_sw_ecc_fixup(struct mtd_info *mtd,
  312. struct denali_nand_info *denali,
  313. unsigned long *uncor_ecc_flags, uint8_t *buf)
  314. {
  315. unsigned int ecc_size = denali->nand.ecc.size;
  316. unsigned int bitflips = 0;
  317. unsigned int max_bitflips = 0;
  318. uint32_t err_addr, err_cor_info;
  319. unsigned int err_byte, err_sector, err_device;
  320. uint8_t err_cor_value;
  321. unsigned int prev_sector = 0;
  322. uint32_t irq_status;
  323. denali_reset_irq(denali);
  324. do {
  325. err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
  326. err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
  327. err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
  328. err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
  329. err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
  330. err_cor_info);
  331. err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
  332. err_cor_info);
  333. /* reset the bitflip counter when crossing ECC sector */
  334. if (err_sector != prev_sector)
  335. bitflips = 0;
  336. if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
  337. /*
  338. * Check later if this is a real ECC error, or
  339. * an erased sector.
  340. */
  341. *uncor_ecc_flags |= BIT(err_sector);
  342. } else if (err_byte < ecc_size) {
  343. /*
  344. * If err_byte is larger than ecc_size, means error
  345. * happened in OOB, so we ignore it. It's no need for
  346. * us to correct it err_device is represented the NAND
  347. * error bits are happened in if there are more than
  348. * one NAND connected.
  349. */
  350. int offset;
  351. unsigned int flips_in_byte;
  352. offset = (err_sector * ecc_size + err_byte) *
  353. denali->devs_per_cs + err_device;
  354. /* correct the ECC error */
  355. flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
  356. buf[offset] ^= err_cor_value;
  357. mtd->ecc_stats.corrected += flips_in_byte;
  358. bitflips += flips_in_byte;
  359. max_bitflips = max(max_bitflips, bitflips);
  360. }
  361. prev_sector = err_sector;
  362. } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
  363. /*
  364. * Once handle all ECC errors, controller will trigger an
  365. * ECC_TRANSACTION_DONE interrupt.
  366. */
  367. irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
  368. if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
  369. return -EIO;
  370. return max_bitflips;
  371. }
  372. static void denali_setup_dma64(struct denali_nand_info *denali,
  373. dma_addr_t dma_addr, int page, int write)
  374. {
  375. uint32_t mode;
  376. const int page_count = 1;
  377. mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
  378. /* DMA is a three step process */
  379. /*
  380. * 1. setup transfer type, interrupt when complete,
  381. * burst len = 64 bytes, the number of pages
  382. */
  383. denali->host_write(denali, mode,
  384. 0x01002000 | (64 << 16) | (write << 8) | page_count);
  385. /* 2. set memory low address */
  386. denali->host_write(denali, mode, lower_32_bits(dma_addr));
  387. /* 3. set memory high address */
  388. denali->host_write(denali, mode, upper_32_bits(dma_addr));
  389. }
  390. static void denali_setup_dma32(struct denali_nand_info *denali,
  391. dma_addr_t dma_addr, int page, int write)
  392. {
  393. uint32_t mode;
  394. const int page_count = 1;
  395. mode = DENALI_MAP10 | DENALI_BANK(denali);
  396. /* DMA is a four step process */
  397. /* 1. setup transfer type and # of pages */
  398. denali->host_write(denali, mode | page,
  399. 0x2000 | (write << 8) | page_count);
  400. /* 2. set memory high address bits 23:8 */
  401. denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
  402. /* 3. set memory low address bits 23:8 */
  403. denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
  404. /* 4. interrupt when complete, burst len = 64 bytes */
  405. denali->host_write(denali, mode | 0x14000, 0x2400);
  406. }
  407. static int denali_pio_read(struct denali_nand_info *denali, void *buf,
  408. size_t size, int page, int raw)
  409. {
  410. u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
  411. uint32_t *buf32 = (uint32_t *)buf;
  412. uint32_t irq_status, ecc_err_mask;
  413. int i;
  414. if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
  415. ecc_err_mask = INTR__ECC_UNCOR_ERR;
  416. else
  417. ecc_err_mask = INTR__ECC_ERR;
  418. denali_reset_irq(denali);
  419. for (i = 0; i < size / 4; i++)
  420. *buf32++ = denali->host_read(denali, addr);
  421. irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
  422. if (!(irq_status & INTR__PAGE_XFER_INC))
  423. return -EIO;
  424. if (irq_status & INTR__ERASED_PAGE)
  425. memset(buf, 0xff, size);
  426. return irq_status & ecc_err_mask ? -EBADMSG : 0;
  427. }
  428. static int denali_pio_write(struct denali_nand_info *denali,
  429. const void *buf, size_t size, int page, int raw)
  430. {
  431. u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
  432. const uint32_t *buf32 = (uint32_t *)buf;
  433. uint32_t irq_status;
  434. int i;
  435. denali_reset_irq(denali);
  436. for (i = 0; i < size / 4; i++)
  437. denali->host_write(denali, addr, *buf32++);
  438. irq_status = denali_wait_for_irq(denali,
  439. INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
  440. if (!(irq_status & INTR__PROGRAM_COMP))
  441. return -EIO;
  442. return 0;
  443. }
  444. static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
  445. size_t size, int page, int raw, int write)
  446. {
  447. if (write)
  448. return denali_pio_write(denali, buf, size, page, raw);
  449. else
  450. return denali_pio_read(denali, buf, size, page, raw);
  451. }
  452. static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
  453. size_t size, int page, int raw, int write)
  454. {
  455. dma_addr_t dma_addr;
  456. uint32_t irq_mask, irq_status, ecc_err_mask;
  457. enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  458. int ret = 0;
  459. dma_addr = dma_map_single(denali->dev, buf, size, dir);
  460. if (dma_mapping_error(denali->dev, dma_addr)) {
  461. dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
  462. return denali_pio_xfer(denali, buf, size, page, raw, write);
  463. }
  464. if (write) {
  465. /*
  466. * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
  467. * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
  468. * when the page program is completed.
  469. */
  470. irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
  471. ecc_err_mask = 0;
  472. } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
  473. irq_mask = INTR__DMA_CMD_COMP;
  474. ecc_err_mask = INTR__ECC_UNCOR_ERR;
  475. } else {
  476. irq_mask = INTR__DMA_CMD_COMP;
  477. ecc_err_mask = INTR__ECC_ERR;
  478. }
  479. iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
  480. /*
  481. * The ->setup_dma() hook kicks DMA by using the data/command
  482. * interface, which belongs to a different AXI port from the
  483. * register interface. Read back the register to avoid a race.
  484. */
  485. ioread32(denali->reg + DMA_ENABLE);
  486. denali_reset_irq(denali);
  487. denali->setup_dma(denali, dma_addr, page, write);
  488. irq_status = denali_wait_for_irq(denali, irq_mask);
  489. if (!(irq_status & INTR__DMA_CMD_COMP))
  490. ret = -EIO;
  491. else if (irq_status & ecc_err_mask)
  492. ret = -EBADMSG;
  493. iowrite32(0, denali->reg + DMA_ENABLE);
  494. dma_unmap_single(denali->dev, dma_addr, size, dir);
  495. if (irq_status & INTR__ERASED_PAGE)
  496. memset(buf, 0xff, size);
  497. return ret;
  498. }
  499. static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
  500. size_t size, int page, int raw, int write)
  501. {
  502. iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
  503. iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
  504. denali->reg + TRANSFER_SPARE_REG);
  505. if (denali->dma_avail)
  506. return denali_dma_xfer(denali, buf, size, page, raw, write);
  507. else
  508. return denali_pio_xfer(denali, buf, size, page, raw, write);
  509. }
  510. static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
  511. int page, int write)
  512. {
  513. struct denali_nand_info *denali = mtd_to_denali(mtd);
  514. int writesize = mtd->writesize;
  515. int oobsize = mtd->oobsize;
  516. uint8_t *bufpoi = chip->oob_poi;
  517. int ecc_steps = chip->ecc.steps;
  518. int ecc_size = chip->ecc.size;
  519. int ecc_bytes = chip->ecc.bytes;
  520. int oob_skip = denali->oob_skip_bytes;
  521. size_t size = writesize + oobsize;
  522. int i, pos, len;
  523. /* BBM at the beginning of the OOB area */
  524. if (write)
  525. nand_prog_page_begin_op(chip, page, writesize, bufpoi,
  526. oob_skip);
  527. else
  528. nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
  529. bufpoi += oob_skip;
  530. /* OOB ECC */
  531. for (i = 0; i < ecc_steps; i++) {
  532. pos = ecc_size + i * (ecc_size + ecc_bytes);
  533. len = ecc_bytes;
  534. if (pos >= writesize)
  535. pos += oob_skip;
  536. else if (pos + len > writesize)
  537. len = writesize - pos;
  538. if (write)
  539. nand_change_write_column_op(chip, pos, bufpoi, len,
  540. false);
  541. else
  542. nand_change_read_column_op(chip, pos, bufpoi, len,
  543. false);
  544. bufpoi += len;
  545. if (len < ecc_bytes) {
  546. len = ecc_bytes - len;
  547. if (write)
  548. nand_change_write_column_op(chip, writesize +
  549. oob_skip, bufpoi,
  550. len, false);
  551. else
  552. nand_change_read_column_op(chip, writesize +
  553. oob_skip, bufpoi,
  554. len, false);
  555. bufpoi += len;
  556. }
  557. }
  558. /* OOB free */
  559. len = oobsize - (bufpoi - chip->oob_poi);
  560. if (write)
  561. nand_change_write_column_op(chip, size - len, bufpoi, len,
  562. false);
  563. else
  564. nand_change_read_column_op(chip, size - len, bufpoi, len,
  565. false);
  566. }
  567. static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  568. uint8_t *buf, int oob_required, int page)
  569. {
  570. struct denali_nand_info *denali = mtd_to_denali(mtd);
  571. int writesize = mtd->writesize;
  572. int oobsize = mtd->oobsize;
  573. int ecc_steps = chip->ecc.steps;
  574. int ecc_size = chip->ecc.size;
  575. int ecc_bytes = chip->ecc.bytes;
  576. void *tmp_buf = denali->buf;
  577. int oob_skip = denali->oob_skip_bytes;
  578. size_t size = writesize + oobsize;
  579. int ret, i, pos, len;
  580. ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
  581. if (ret)
  582. return ret;
  583. /* Arrange the buffer for syndrome payload/ecc layout */
  584. if (buf) {
  585. for (i = 0; i < ecc_steps; i++) {
  586. pos = i * (ecc_size + ecc_bytes);
  587. len = ecc_size;
  588. if (pos >= writesize)
  589. pos += oob_skip;
  590. else if (pos + len > writesize)
  591. len = writesize - pos;
  592. memcpy(buf, tmp_buf + pos, len);
  593. buf += len;
  594. if (len < ecc_size) {
  595. len = ecc_size - len;
  596. memcpy(buf, tmp_buf + writesize + oob_skip,
  597. len);
  598. buf += len;
  599. }
  600. }
  601. }
  602. if (oob_required) {
  603. uint8_t *oob = chip->oob_poi;
  604. /* BBM at the beginning of the OOB area */
  605. memcpy(oob, tmp_buf + writesize, oob_skip);
  606. oob += oob_skip;
  607. /* OOB ECC */
  608. for (i = 0; i < ecc_steps; i++) {
  609. pos = ecc_size + i * (ecc_size + ecc_bytes);
  610. len = ecc_bytes;
  611. if (pos >= writesize)
  612. pos += oob_skip;
  613. else if (pos + len > writesize)
  614. len = writesize - pos;
  615. memcpy(oob, tmp_buf + pos, len);
  616. oob += len;
  617. if (len < ecc_bytes) {
  618. len = ecc_bytes - len;
  619. memcpy(oob, tmp_buf + writesize + oob_skip,
  620. len);
  621. oob += len;
  622. }
  623. }
  624. /* OOB free */
  625. len = oobsize - (oob - chip->oob_poi);
  626. memcpy(oob, tmp_buf + size - len, len);
  627. }
  628. return 0;
  629. }
  630. static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  631. int page)
  632. {
  633. denali_oob_xfer(mtd, chip, page, 0);
  634. return 0;
  635. }
  636. static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  637. int page)
  638. {
  639. struct denali_nand_info *denali = mtd_to_denali(mtd);
  640. denali_reset_irq(denali);
  641. denali_oob_xfer(mtd, chip, page, 1);
  642. return nand_prog_page_end_op(chip);
  643. }
  644. static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  645. uint8_t *buf, int oob_required, int page)
  646. {
  647. struct denali_nand_info *denali = mtd_to_denali(mtd);
  648. unsigned long uncor_ecc_flags = 0;
  649. int stat = 0;
  650. int ret;
  651. ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
  652. if (ret && ret != -EBADMSG)
  653. return ret;
  654. if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
  655. stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
  656. else if (ret == -EBADMSG)
  657. stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
  658. if (stat < 0)
  659. return stat;
  660. if (uncor_ecc_flags) {
  661. ret = denali_read_oob(mtd, chip, page);
  662. if (ret)
  663. return ret;
  664. stat = denali_check_erased_page(mtd, chip, buf,
  665. uncor_ecc_flags, stat);
  666. }
  667. return stat;
  668. }
  669. static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  670. const uint8_t *buf, int oob_required, int page)
  671. {
  672. struct denali_nand_info *denali = mtd_to_denali(mtd);
  673. int writesize = mtd->writesize;
  674. int oobsize = mtd->oobsize;
  675. int ecc_steps = chip->ecc.steps;
  676. int ecc_size = chip->ecc.size;
  677. int ecc_bytes = chip->ecc.bytes;
  678. void *tmp_buf = denali->buf;
  679. int oob_skip = denali->oob_skip_bytes;
  680. size_t size = writesize + oobsize;
  681. int i, pos, len;
  682. /*
  683. * Fill the buffer with 0xff first except the full page transfer.
  684. * This simplifies the logic.
  685. */
  686. if (!buf || !oob_required)
  687. memset(tmp_buf, 0xff, size);
  688. /* Arrange the buffer for syndrome payload/ecc layout */
  689. if (buf) {
  690. for (i = 0; i < ecc_steps; i++) {
  691. pos = i * (ecc_size + ecc_bytes);
  692. len = ecc_size;
  693. if (pos >= writesize)
  694. pos += oob_skip;
  695. else if (pos + len > writesize)
  696. len = writesize - pos;
  697. memcpy(tmp_buf + pos, buf, len);
  698. buf += len;
  699. if (len < ecc_size) {
  700. len = ecc_size - len;
  701. memcpy(tmp_buf + writesize + oob_skip, buf,
  702. len);
  703. buf += len;
  704. }
  705. }
  706. }
  707. if (oob_required) {
  708. const uint8_t *oob = chip->oob_poi;
  709. /* BBM at the beginning of the OOB area */
  710. memcpy(tmp_buf + writesize, oob, oob_skip);
  711. oob += oob_skip;
  712. /* OOB ECC */
  713. for (i = 0; i < ecc_steps; i++) {
  714. pos = ecc_size + i * (ecc_size + ecc_bytes);
  715. len = ecc_bytes;
  716. if (pos >= writesize)
  717. pos += oob_skip;
  718. else if (pos + len > writesize)
  719. len = writesize - pos;
  720. memcpy(tmp_buf + pos, oob, len);
  721. oob += len;
  722. if (len < ecc_bytes) {
  723. len = ecc_bytes - len;
  724. memcpy(tmp_buf + writesize + oob_skip, oob,
  725. len);
  726. oob += len;
  727. }
  728. }
  729. /* OOB free */
  730. len = oobsize - (oob - chip->oob_poi);
  731. memcpy(tmp_buf + size - len, oob, len);
  732. }
  733. return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
  734. }
  735. static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  736. const uint8_t *buf, int oob_required, int page)
  737. {
  738. struct denali_nand_info *denali = mtd_to_denali(mtd);
  739. return denali_data_xfer(denali, (void *)buf, mtd->writesize,
  740. page, 0, 1);
  741. }
  742. static void denali_select_chip(struct mtd_info *mtd, int chip)
  743. {
  744. struct denali_nand_info *denali = mtd_to_denali(mtd);
  745. denali->active_bank = chip;
  746. }
  747. static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
  748. {
  749. struct denali_nand_info *denali = mtd_to_denali(mtd);
  750. uint32_t irq_status;
  751. /* R/B# pin transitioned from low to high? */
  752. irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
  753. return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
  754. }
  755. static int denali_erase(struct mtd_info *mtd, int page)
  756. {
  757. struct denali_nand_info *denali = mtd_to_denali(mtd);
  758. uint32_t irq_status;
  759. denali_reset_irq(denali);
  760. denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
  761. DENALI_ERASE);
  762. /* wait for erase to complete or failure to occur */
  763. irq_status = denali_wait_for_irq(denali,
  764. INTR__ERASE_COMP | INTR__ERASE_FAIL);
  765. return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
  766. }
  767. static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
  768. const struct nand_data_interface *conf)
  769. {
  770. struct denali_nand_info *denali = mtd_to_denali(mtd);
  771. const struct nand_sdr_timings *timings;
  772. unsigned long t_x, mult_x;
  773. int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
  774. int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
  775. int addr_2_data_mask;
  776. uint32_t tmp;
  777. timings = nand_get_sdr_timings(conf);
  778. if (IS_ERR(timings))
  779. return PTR_ERR(timings);
  780. /* clk_x period in picoseconds */
  781. t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
  782. if (!t_x)
  783. return -EINVAL;
  784. /*
  785. * The bus interface clock, clk_x, is phase aligned with the core clock.
  786. * The clk_x is an integral multiple N of the core clk. The value N is
  787. * configured at IP delivery time, and its available value is 4, 5, 6.
  788. */
  789. mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
  790. if (mult_x < 4 || mult_x > 6)
  791. return -EINVAL;
  792. if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
  793. return 0;
  794. /* tREA -> ACC_CLKS */
  795. acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
  796. acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
  797. tmp = ioread32(denali->reg + ACC_CLKS);
  798. tmp &= ~ACC_CLKS__VALUE;
  799. tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
  800. iowrite32(tmp, denali->reg + ACC_CLKS);
  801. /* tRWH -> RE_2_WE */
  802. re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
  803. re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
  804. tmp = ioread32(denali->reg + RE_2_WE);
  805. tmp &= ~RE_2_WE__VALUE;
  806. tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
  807. iowrite32(tmp, denali->reg + RE_2_WE);
  808. /* tRHZ -> RE_2_RE */
  809. re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
  810. re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
  811. tmp = ioread32(denali->reg + RE_2_RE);
  812. tmp &= ~RE_2_RE__VALUE;
  813. tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
  814. iowrite32(tmp, denali->reg + RE_2_RE);
  815. /*
  816. * tCCS, tWHR -> WE_2_RE
  817. *
  818. * With WE_2_RE properly set, the Denali controller automatically takes
  819. * care of the delay; the driver need not set NAND_WAIT_TCCS.
  820. */
  821. we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
  822. we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
  823. tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
  824. tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
  825. tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
  826. iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
  827. /* tADL -> ADDR_2_DATA */
  828. /* for older versions, ADDR_2_DATA is only 6 bit wide */
  829. addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
  830. if (denali->revision < 0x0501)
  831. addr_2_data_mask >>= 1;
  832. addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
  833. addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
  834. tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
  835. tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
  836. tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
  837. iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
  838. /* tREH, tWH -> RDWR_EN_HI_CNT */
  839. rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
  840. t_x);
  841. rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
  842. tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
  843. tmp &= ~RDWR_EN_HI_CNT__VALUE;
  844. tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
  845. iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
  846. /* tRP, tWP -> RDWR_EN_LO_CNT */
  847. rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
  848. rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
  849. t_x);
  850. rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x);
  851. rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
  852. rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
  853. tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
  854. tmp &= ~RDWR_EN_LO_CNT__VALUE;
  855. tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
  856. iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
  857. /* tCS, tCEA -> CS_SETUP_CNT */
  858. cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
  859. (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
  860. 0);
  861. cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
  862. tmp = ioread32(denali->reg + CS_SETUP_CNT);
  863. tmp &= ~CS_SETUP_CNT__VALUE;
  864. tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
  865. iowrite32(tmp, denali->reg + CS_SETUP_CNT);
  866. return 0;
  867. }
  868. static void denali_reset_banks(struct denali_nand_info *denali)
  869. {
  870. u32 irq_status;
  871. int i;
  872. for (i = 0; i < denali->max_banks; i++) {
  873. denali->active_bank = i;
  874. denali_reset_irq(denali);
  875. iowrite32(DEVICE_RESET__BANK(i),
  876. denali->reg + DEVICE_RESET);
  877. irq_status = denali_wait_for_irq(denali,
  878. INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
  879. if (!(irq_status & INTR__INT_ACT))
  880. break;
  881. }
  882. dev_dbg(denali->dev, "%d chips connected\n", i);
  883. denali->max_banks = i;
  884. }
  885. static void denali_hw_init(struct denali_nand_info *denali)
  886. {
  887. /*
  888. * The REVISION register may not be reliable. Platforms are allowed to
  889. * override it.
  890. */
  891. if (!denali->revision)
  892. denali->revision = swab16(ioread32(denali->reg + REVISION));
  893. /*
  894. * Set how many bytes should be skipped before writing data in OOB.
  895. * If a non-zero value has already been set (by firmware or something),
  896. * just use it. Otherwise, set the driver default.
  897. */
  898. denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
  899. if (!denali->oob_skip_bytes) {
  900. denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
  901. iowrite32(denali->oob_skip_bytes,
  902. denali->reg + SPARE_AREA_SKIP_BYTES);
  903. }
  904. denali_detect_max_banks(denali);
  905. iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
  906. iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
  907. iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
  908. }
  909. int denali_calc_ecc_bytes(int step_size, int strength)
  910. {
  911. /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
  912. return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
  913. }
  914. EXPORT_SYMBOL(denali_calc_ecc_bytes);
  915. static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
  916. struct mtd_oob_region *oobregion)
  917. {
  918. struct denali_nand_info *denali = mtd_to_denali(mtd);
  919. struct nand_chip *chip = mtd_to_nand(mtd);
  920. if (section)
  921. return -ERANGE;
  922. oobregion->offset = denali->oob_skip_bytes;
  923. oobregion->length = chip->ecc.total;
  924. return 0;
  925. }
  926. static int denali_ooblayout_free(struct mtd_info *mtd, int section,
  927. struct mtd_oob_region *oobregion)
  928. {
  929. struct denali_nand_info *denali = mtd_to_denali(mtd);
  930. struct nand_chip *chip = mtd_to_nand(mtd);
  931. if (section)
  932. return -ERANGE;
  933. oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
  934. oobregion->length = mtd->oobsize - oobregion->offset;
  935. return 0;
  936. }
  937. static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
  938. .ecc = denali_ooblayout_ecc,
  939. .free = denali_ooblayout_free,
  940. };
  941. static int denali_multidev_fixup(struct denali_nand_info *denali)
  942. {
  943. struct nand_chip *chip = &denali->nand;
  944. struct mtd_info *mtd = nand_to_mtd(chip);
  945. /*
  946. * Support for multi device:
  947. * When the IP configuration is x16 capable and two x8 chips are
  948. * connected in parallel, DEVICES_CONNECTED should be set to 2.
  949. * In this case, the core framework knows nothing about this fact,
  950. * so we should tell it the _logical_ pagesize and anything necessary.
  951. */
  952. denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
  953. /*
  954. * On some SoCs, DEVICES_CONNECTED is not auto-detected.
  955. * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
  956. */
  957. if (denali->devs_per_cs == 0) {
  958. denali->devs_per_cs = 1;
  959. iowrite32(1, denali->reg + DEVICES_CONNECTED);
  960. }
  961. if (denali->devs_per_cs == 1)
  962. return 0;
  963. if (denali->devs_per_cs != 2) {
  964. dev_err(denali->dev, "unsupported number of devices %d\n",
  965. denali->devs_per_cs);
  966. return -EINVAL;
  967. }
  968. /* 2 chips in parallel */
  969. mtd->size <<= 1;
  970. mtd->erasesize <<= 1;
  971. mtd->writesize <<= 1;
  972. mtd->oobsize <<= 1;
  973. chip->chipsize <<= 1;
  974. chip->page_shift += 1;
  975. chip->phys_erase_shift += 1;
  976. chip->bbt_erase_shift += 1;
  977. chip->chip_shift += 1;
  978. chip->pagemask <<= 1;
  979. chip->ecc.size <<= 1;
  980. chip->ecc.bytes <<= 1;
  981. chip->ecc.strength <<= 1;
  982. denali->oob_skip_bytes <<= 1;
  983. return 0;
  984. }
  985. static int denali_attach_chip(struct nand_chip *chip)
  986. {
  987. struct mtd_info *mtd = nand_to_mtd(chip);
  988. struct denali_nand_info *denali = mtd_to_denali(mtd);
  989. int ret;
  990. if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
  991. denali->dma_avail = 1;
  992. if (denali->dma_avail) {
  993. int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
  994. ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
  995. if (ret) {
  996. dev_info(denali->dev,
  997. "Failed to set DMA mask. Disabling DMA.\n");
  998. denali->dma_avail = 0;
  999. }
  1000. }
  1001. if (denali->dma_avail) {
  1002. chip->options |= NAND_USE_BOUNCE_BUFFER;
  1003. chip->buf_align = 16;
  1004. if (denali->caps & DENALI_CAP_DMA_64BIT)
  1005. denali->setup_dma = denali_setup_dma64;
  1006. else
  1007. denali->setup_dma = denali_setup_dma32;
  1008. }
  1009. chip->bbt_options |= NAND_BBT_USE_FLASH;
  1010. chip->bbt_options |= NAND_BBT_NO_OOB;
  1011. chip->ecc.mode = NAND_ECC_HW_SYNDROME;
  1012. chip->options |= NAND_NO_SUBPAGE_WRITE;
  1013. ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
  1014. mtd->oobsize - denali->oob_skip_bytes);
  1015. if (ret) {
  1016. dev_err(denali->dev, "Failed to setup ECC settings.\n");
  1017. return ret;
  1018. }
  1019. dev_dbg(denali->dev,
  1020. "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
  1021. chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
  1022. iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
  1023. FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
  1024. denali->reg + ECC_CORRECTION);
  1025. iowrite32(mtd->erasesize / mtd->writesize,
  1026. denali->reg + PAGES_PER_BLOCK);
  1027. iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
  1028. denali->reg + DEVICE_WIDTH);
  1029. iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
  1030. denali->reg + TWO_ROW_ADDR_CYCLES);
  1031. iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
  1032. iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
  1033. iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
  1034. iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
  1035. /* chip->ecc.steps is set by nand_scan_tail(); not available here */
  1036. iowrite32(mtd->writesize / chip->ecc.size,
  1037. denali->reg + CFG_NUM_DATA_BLOCKS);
  1038. mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
  1039. if (chip->options & NAND_BUSWIDTH_16) {
  1040. chip->read_buf = denali_read_buf16;
  1041. chip->write_buf = denali_write_buf16;
  1042. } else {
  1043. chip->read_buf = denali_read_buf;
  1044. chip->write_buf = denali_write_buf;
  1045. }
  1046. chip->ecc.read_page = denali_read_page;
  1047. chip->ecc.read_page_raw = denali_read_page_raw;
  1048. chip->ecc.write_page = denali_write_page;
  1049. chip->ecc.write_page_raw = denali_write_page_raw;
  1050. chip->ecc.read_oob = denali_read_oob;
  1051. chip->ecc.write_oob = denali_write_oob;
  1052. chip->erase = denali_erase;
  1053. ret = denali_multidev_fixup(denali);
  1054. if (ret)
  1055. return ret;
  1056. /*
  1057. * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
  1058. * use devm_kmalloc() because the memory allocated by devm_ does not
  1059. * guarantee DMA-safe alignment.
  1060. */
  1061. denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
  1062. if (!denali->buf)
  1063. return -ENOMEM;
  1064. return 0;
  1065. }
  1066. static void denali_detach_chip(struct nand_chip *chip)
  1067. {
  1068. struct mtd_info *mtd = nand_to_mtd(chip);
  1069. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1070. kfree(denali->buf);
  1071. }
  1072. static const struct nand_controller_ops denali_controller_ops = {
  1073. .attach_chip = denali_attach_chip,
  1074. .detach_chip = denali_detach_chip,
  1075. };
  1076. int denali_init(struct denali_nand_info *denali)
  1077. {
  1078. struct nand_chip *chip = &denali->nand;
  1079. struct mtd_info *mtd = nand_to_mtd(chip);
  1080. u32 features = ioread32(denali->reg + FEATURES);
  1081. int ret;
  1082. mtd->dev.parent = denali->dev;
  1083. denali_hw_init(denali);
  1084. init_completion(&denali->complete);
  1085. spin_lock_init(&denali->irq_lock);
  1086. denali_clear_irq_all(denali);
  1087. ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
  1088. IRQF_SHARED, DENALI_NAND_NAME, denali);
  1089. if (ret) {
  1090. dev_err(denali->dev, "Unable to request IRQ\n");
  1091. return ret;
  1092. }
  1093. denali_enable_irq(denali);
  1094. denali_reset_banks(denali);
  1095. if (!denali->max_banks) {
  1096. /* Error out earlier if no chip is found for some reasons. */
  1097. ret = -ENODEV;
  1098. goto disable_irq;
  1099. }
  1100. denali->active_bank = DENALI_INVALID_BANK;
  1101. nand_set_flash_node(chip, denali->dev->of_node);
  1102. /* Fallback to the default name if DT did not give "label" property */
  1103. if (!mtd->name)
  1104. mtd->name = "denali-nand";
  1105. chip->select_chip = denali_select_chip;
  1106. chip->read_byte = denali_read_byte;
  1107. chip->write_byte = denali_write_byte;
  1108. chip->read_word = denali_read_word;
  1109. chip->cmd_ctrl = denali_cmd_ctrl;
  1110. chip->dev_ready = denali_dev_ready;
  1111. chip->waitfunc = denali_waitfunc;
  1112. if (features & FEATURES__INDEX_ADDR) {
  1113. denali->host_read = denali_indexed_read;
  1114. denali->host_write = denali_indexed_write;
  1115. } else {
  1116. denali->host_read = denali_direct_read;
  1117. denali->host_write = denali_direct_write;
  1118. }
  1119. /* clk rate info is needed for setup_data_interface */
  1120. if (denali->clk_rate && denali->clk_x_rate)
  1121. chip->setup_data_interface = denali_setup_data_interface;
  1122. chip->dummy_controller.ops = &denali_controller_ops;
  1123. ret = nand_scan(chip, denali->max_banks);
  1124. if (ret)
  1125. goto disable_irq;
  1126. ret = mtd_device_register(mtd, NULL, 0);
  1127. if (ret) {
  1128. dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
  1129. goto cleanup_nand;
  1130. }
  1131. return 0;
  1132. cleanup_nand:
  1133. nand_cleanup(chip);
  1134. disable_irq:
  1135. denali_disable_irq(denali);
  1136. return ret;
  1137. }
  1138. EXPORT_SYMBOL(denali_init);
  1139. void denali_remove(struct denali_nand_info *denali)
  1140. {
  1141. nand_release(&denali->nand);
  1142. denali_disable_irq(denali);
  1143. }
  1144. EXPORT_SYMBOL(denali_remove);