denali.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2014 Panasonic Corporation
  4. * Copyright (C) 2013-2014, Altera Corporation <www.altera.com>
  5. * Copyright (C) 2009-2010, Intel Corporation and its suppliers.
  6. */
  7. #include <dm.h>
  8. #include <nand.h>
  9. #include <linux/bitfield.h>
  10. #include <linux/dma-direction.h>
  11. #include <linux/errno.h>
  12. #include <linux/io.h>
  13. #include <linux/mtd/mtd.h>
  14. #include <linux/mtd/rawnand.h>
  15. #include "denali.h"
  16. static dma_addr_t dma_map_single(void *dev, void *ptr, size_t size,
  17. enum dma_data_direction dir)
  18. {
  19. unsigned long addr = (unsigned long)ptr;
  20. if (dir == DMA_FROM_DEVICE)
  21. invalidate_dcache_range(addr, addr + size);
  22. else
  23. flush_dcache_range(addr, addr + size);
  24. return addr;
  25. }
  26. static void dma_unmap_single(void *dev, dma_addr_t addr, size_t size,
  27. enum dma_data_direction dir)
  28. {
  29. if (dir != DMA_TO_DEVICE)
  30. invalidate_dcache_range(addr, addr + size);
  31. }
  32. static int dma_mapping_error(void *dev, dma_addr_t addr)
  33. {
  34. return 0;
  35. }
  36. #define DENALI_NAND_NAME "denali-nand"
  37. /* for Indexed Addressing */
  38. #define DENALI_INDEXED_CTRL 0x00
  39. #define DENALI_INDEXED_DATA 0x10
  40. #define DENALI_MAP00 (0 << 26) /* direct access to buffer */
  41. #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
  42. #define DENALI_MAP10 (2 << 26) /* high-level control plane */
  43. #define DENALI_MAP11 (3 << 26) /* direct controller access */
  44. /* MAP11 access cycle type */
  45. #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
  46. #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
  47. #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
  48. /* MAP10 commands */
  49. #define DENALI_ERASE 0x01
  50. #define DENALI_BANK(denali) ((denali)->active_bank << 24)
  51. #define DENALI_INVALID_BANK -1
  52. #define DENALI_NR_BANKS 4
  53. /*
  54. * The bus interface clock, clk_x, is phase aligned with the core clock. The
  55. * clk_x is an integral multiple N of the core clk. The value N is configured
  56. * at IP delivery time, and its available value is 4, 5, or 6. We need to align
  57. * to the largest value to make it work with any possible configuration.
  58. */
  59. #define DENALI_CLK_X_MULT 6
  60. static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
  61. {
  62. return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
  63. }
  64. /*
  65. * Direct Addressing - the slave address forms the control information (command
  66. * type, bank, block, and page address). The slave data is the actual data to
  67. * be transferred. This mode requires 28 bits of address region allocated.
  68. */
  69. static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
  70. {
  71. return ioread32(denali->host + addr);
  72. }
  73. static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
  74. u32 data)
  75. {
  76. iowrite32(data, denali->host + addr);
  77. }
  78. /*
  79. * Indexed Addressing - address translation module intervenes in passing the
  80. * control information. This mode reduces the required address range. The
  81. * control information and transferred data are latched by the registers in
  82. * the translation module.
  83. */
  84. static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
  85. {
  86. iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  87. return ioread32(denali->host + DENALI_INDEXED_DATA);
  88. }
  89. static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
  90. u32 data)
  91. {
  92. iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
  93. iowrite32(data, denali->host + DENALI_INDEXED_DATA);
  94. }
  95. /*
  96. * Use the configuration feature register to determine the maximum number of
  97. * banks that the hardware supports.
  98. */
  99. static void denali_detect_max_banks(struct denali_nand_info *denali)
  100. {
  101. uint32_t features = ioread32(denali->reg + FEATURES);
  102. denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
  103. /* the encoding changed from rev 5.0 to 5.1 */
  104. if (denali->revision < 0x0501)
  105. denali->max_banks <<= 1;
  106. }
  107. static void __maybe_unused denali_enable_irq(struct denali_nand_info *denali)
  108. {
  109. int i;
  110. for (i = 0; i < DENALI_NR_BANKS; i++)
  111. iowrite32(U32_MAX, denali->reg + INTR_EN(i));
  112. iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
  113. }
  114. static void __maybe_unused denali_disable_irq(struct denali_nand_info *denali)
  115. {
  116. int i;
  117. for (i = 0; i < DENALI_NR_BANKS; i++)
  118. iowrite32(0, denali->reg + INTR_EN(i));
  119. iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
  120. }
  121. static void denali_clear_irq(struct denali_nand_info *denali,
  122. int bank, uint32_t irq_status)
  123. {
  124. /* write one to clear bits */
  125. iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
  126. }
  127. static void denali_clear_irq_all(struct denali_nand_info *denali)
  128. {
  129. int i;
  130. for (i = 0; i < DENALI_NR_BANKS; i++)
  131. denali_clear_irq(denali, i, U32_MAX);
  132. }
  133. static void __denali_check_irq(struct denali_nand_info *denali)
  134. {
  135. uint32_t irq_status;
  136. int i;
  137. for (i = 0; i < DENALI_NR_BANKS; i++) {
  138. irq_status = ioread32(denali->reg + INTR_STATUS(i));
  139. denali_clear_irq(denali, i, irq_status);
  140. if (i != denali->active_bank)
  141. continue;
  142. denali->irq_status |= irq_status;
  143. }
  144. }
  145. static void denali_reset_irq(struct denali_nand_info *denali)
  146. {
  147. denali->irq_status = 0;
  148. denali->irq_mask = 0;
  149. }
  150. static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
  151. uint32_t irq_mask)
  152. {
  153. unsigned long time_left = 1000000;
  154. while (time_left) {
  155. __denali_check_irq(denali);
  156. if (irq_mask & denali->irq_status)
  157. return denali->irq_status;
  158. udelay(1);
  159. time_left--;
  160. }
  161. if (!time_left) {
  162. dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
  163. irq_mask);
  164. return 0;
  165. }
  166. return denali->irq_status;
  167. }
  168. static uint32_t denali_check_irq(struct denali_nand_info *denali)
  169. {
  170. __denali_check_irq(denali);
  171. return denali->irq_status;
  172. }
  173. static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  174. {
  175. struct denali_nand_info *denali = mtd_to_denali(mtd);
  176. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  177. int i;
  178. for (i = 0; i < len; i++)
  179. buf[i] = denali->host_read(denali, addr);
  180. }
  181. static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
  182. {
  183. struct denali_nand_info *denali = mtd_to_denali(mtd);
  184. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  185. int i;
  186. for (i = 0; i < len; i++)
  187. denali->host_write(denali, addr, buf[i]);
  188. }
  189. static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
  190. {
  191. struct denali_nand_info *denali = mtd_to_denali(mtd);
  192. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  193. uint16_t *buf16 = (uint16_t *)buf;
  194. int i;
  195. for (i = 0; i < len / 2; i++)
  196. buf16[i] = denali->host_read(denali, addr);
  197. }
  198. static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
  199. int len)
  200. {
  201. struct denali_nand_info *denali = mtd_to_denali(mtd);
  202. u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
  203. const uint16_t *buf16 = (const uint16_t *)buf;
  204. int i;
  205. for (i = 0; i < len / 2; i++)
  206. denali->host_write(denali, addr, buf16[i]);
  207. }
  208. static uint8_t denali_read_byte(struct mtd_info *mtd)
  209. {
  210. uint8_t byte;
  211. denali_read_buf(mtd, &byte, 1);
  212. return byte;
  213. }
  214. static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
  215. {
  216. denali_write_buf(mtd, &byte, 1);
  217. }
  218. static uint16_t denali_read_word(struct mtd_info *mtd)
  219. {
  220. uint16_t word;
  221. denali_read_buf16(mtd, (uint8_t *)&word, 2);
  222. return word;
  223. }
  224. static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
  225. {
  226. struct denali_nand_info *denali = mtd_to_denali(mtd);
  227. uint32_t type;
  228. if (ctrl & NAND_CLE)
  229. type = DENALI_MAP11_CMD;
  230. else if (ctrl & NAND_ALE)
  231. type = DENALI_MAP11_ADDR;
  232. else
  233. return;
  234. /*
  235. * Some commands are followed by chip->dev_ready or chip->waitfunc.
  236. * irq_status must be cleared here to catch the R/B# interrupt later.
  237. */
  238. if (ctrl & NAND_CTRL_CHANGE)
  239. denali_reset_irq(denali);
  240. denali->host_write(denali, DENALI_BANK(denali) | type, dat);
  241. }
  242. static int denali_dev_ready(struct mtd_info *mtd)
  243. {
  244. struct denali_nand_info *denali = mtd_to_denali(mtd);
  245. return !!(denali_check_irq(denali) & INTR__INT_ACT);
  246. }
  247. static int denali_check_erased_page(struct mtd_info *mtd,
  248. struct nand_chip *chip, uint8_t *buf,
  249. unsigned long uncor_ecc_flags,
  250. unsigned int max_bitflips)
  251. {
  252. uint8_t *ecc_code = chip->buffers->ecccode;
  253. int ecc_steps = chip->ecc.steps;
  254. int ecc_size = chip->ecc.size;
  255. int ecc_bytes = chip->ecc.bytes;
  256. int i, ret, stat;
  257. ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
  258. chip->ecc.total);
  259. if (ret)
  260. return ret;
  261. for (i = 0; i < ecc_steps; i++) {
  262. if (!(uncor_ecc_flags & BIT(i)))
  263. continue;
  264. stat = nand_check_erased_ecc_chunk(buf, ecc_size,
  265. ecc_code, ecc_bytes,
  266. NULL, 0,
  267. chip->ecc.strength);
  268. if (stat < 0) {
  269. mtd->ecc_stats.failed++;
  270. } else {
  271. mtd->ecc_stats.corrected += stat;
  272. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  273. }
  274. buf += ecc_size;
  275. ecc_code += ecc_bytes;
  276. }
  277. return max_bitflips;
  278. }
  279. static int denali_hw_ecc_fixup(struct mtd_info *mtd,
  280. struct denali_nand_info *denali,
  281. unsigned long *uncor_ecc_flags)
  282. {
  283. struct nand_chip *chip = mtd_to_nand(mtd);
  284. int bank = denali->active_bank;
  285. uint32_t ecc_cor;
  286. unsigned int max_bitflips;
  287. ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
  288. ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
  289. if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
  290. /*
  291. * This flag is set when uncorrectable error occurs at least in
  292. * one ECC sector. We can not know "how many sectors", or
  293. * "which sector(s)". We need erase-page check for all sectors.
  294. */
  295. *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
  296. return 0;
  297. }
  298. max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
  299. /*
  300. * The register holds the maximum of per-sector corrected bitflips.
  301. * This is suitable for the return value of the ->read_page() callback.
  302. * Unfortunately, we can not know the total number of corrected bits in
  303. * the page. Increase the stats by max_bitflips. (compromised solution)
  304. */
  305. mtd->ecc_stats.corrected += max_bitflips;
  306. return max_bitflips;
  307. }
  308. static int denali_sw_ecc_fixup(struct mtd_info *mtd,
  309. struct denali_nand_info *denali,
  310. unsigned long *uncor_ecc_flags, uint8_t *buf)
  311. {
  312. unsigned int ecc_size = denali->nand.ecc.size;
  313. unsigned int bitflips = 0;
  314. unsigned int max_bitflips = 0;
  315. uint32_t err_addr, err_cor_info;
  316. unsigned int err_byte, err_sector, err_device;
  317. uint8_t err_cor_value;
  318. unsigned int prev_sector = 0;
  319. uint32_t irq_status;
  320. denali_reset_irq(denali);
  321. do {
  322. err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
  323. err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
  324. err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
  325. err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
  326. err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
  327. err_cor_info);
  328. err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
  329. err_cor_info);
  330. /* reset the bitflip counter when crossing ECC sector */
  331. if (err_sector != prev_sector)
  332. bitflips = 0;
  333. if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
  334. /*
  335. * Check later if this is a real ECC error, or
  336. * an erased sector.
  337. */
  338. *uncor_ecc_flags |= BIT(err_sector);
  339. } else if (err_byte < ecc_size) {
  340. /*
  341. * If err_byte is larger than ecc_size, means error
  342. * happened in OOB, so we ignore it. It's no need for
  343. * us to correct it err_device is represented the NAND
  344. * error bits are happened in if there are more than
  345. * one NAND connected.
  346. */
  347. int offset;
  348. unsigned int flips_in_byte;
  349. offset = (err_sector * ecc_size + err_byte) *
  350. denali->devs_per_cs + err_device;
  351. /* correct the ECC error */
  352. flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
  353. buf[offset] ^= err_cor_value;
  354. mtd->ecc_stats.corrected += flips_in_byte;
  355. bitflips += flips_in_byte;
  356. max_bitflips = max(max_bitflips, bitflips);
  357. }
  358. prev_sector = err_sector;
  359. } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
  360. /*
  361. * Once handle all ECC errors, controller will trigger an
  362. * ECC_TRANSACTION_DONE interrupt.
  363. */
  364. irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
  365. if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
  366. return -EIO;
  367. return max_bitflips;
  368. }
  369. static void denali_setup_dma64(struct denali_nand_info *denali,
  370. dma_addr_t dma_addr, int page, int write)
  371. {
  372. uint32_t mode;
  373. const int page_count = 1;
  374. mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
  375. /* DMA is a three step process */
  376. /*
  377. * 1. setup transfer type, interrupt when complete,
  378. * burst len = 64 bytes, the number of pages
  379. */
  380. denali->host_write(denali, mode,
  381. 0x01002000 | (64 << 16) | (write << 8) | page_count);
  382. /* 2. set memory low address */
  383. denali->host_write(denali, mode, lower_32_bits(dma_addr));
  384. /* 3. set memory high address */
  385. denali->host_write(denali, mode, upper_32_bits(dma_addr));
  386. }
  387. static void denali_setup_dma32(struct denali_nand_info *denali,
  388. dma_addr_t dma_addr, int page, int write)
  389. {
  390. uint32_t mode;
  391. const int page_count = 1;
  392. mode = DENALI_MAP10 | DENALI_BANK(denali);
  393. /* DMA is a four step process */
  394. /* 1. setup transfer type and # of pages */
  395. denali->host_write(denali, mode | page,
  396. 0x2000 | (write << 8) | page_count);
  397. /* 2. set memory high address bits 23:8 */
  398. denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
  399. /* 3. set memory low address bits 23:8 */
  400. denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
  401. /* 4. interrupt when complete, burst len = 64 bytes */
  402. denali->host_write(denali, mode | 0x14000, 0x2400);
  403. }
  404. static int denali_pio_read(struct denali_nand_info *denali, void *buf,
  405. size_t size, int page, int raw)
  406. {
  407. u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
  408. uint32_t *buf32 = (uint32_t *)buf;
  409. uint32_t irq_status, ecc_err_mask;
  410. int i;
  411. if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
  412. ecc_err_mask = INTR__ECC_UNCOR_ERR;
  413. else
  414. ecc_err_mask = INTR__ECC_ERR;
  415. denali_reset_irq(denali);
  416. for (i = 0; i < size / 4; i++)
  417. *buf32++ = denali->host_read(denali, addr);
  418. irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
  419. if (!(irq_status & INTR__PAGE_XFER_INC))
  420. return -EIO;
  421. if (irq_status & INTR__ERASED_PAGE)
  422. memset(buf, 0xff, size);
  423. return irq_status & ecc_err_mask ? -EBADMSG : 0;
  424. }
  425. static int denali_pio_write(struct denali_nand_info *denali,
  426. const void *buf, size_t size, int page, int raw)
  427. {
  428. u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
  429. const uint32_t *buf32 = (uint32_t *)buf;
  430. uint32_t irq_status;
  431. int i;
  432. denali_reset_irq(denali);
  433. for (i = 0; i < size / 4; i++)
  434. denali->host_write(denali, addr, *buf32++);
  435. irq_status = denali_wait_for_irq(denali,
  436. INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
  437. if (!(irq_status & INTR__PROGRAM_COMP))
  438. return -EIO;
  439. return 0;
  440. }
  441. static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
  442. size_t size, int page, int raw, int write)
  443. {
  444. if (write)
  445. return denali_pio_write(denali, buf, size, page, raw);
  446. else
  447. return denali_pio_read(denali, buf, size, page, raw);
  448. }
  449. static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
  450. size_t size, int page, int raw, int write)
  451. {
  452. dma_addr_t dma_addr;
  453. uint32_t irq_mask, irq_status, ecc_err_mask;
  454. enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  455. int ret = 0;
  456. dma_addr = dma_map_single(denali->dev, buf, size, dir);
  457. if (dma_mapping_error(denali->dev, dma_addr)) {
  458. dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
  459. return denali_pio_xfer(denali, buf, size, page, raw, write);
  460. }
  461. if (write) {
  462. /*
  463. * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
  464. * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
  465. * when the page program is completed.
  466. */
  467. irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
  468. ecc_err_mask = 0;
  469. } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
  470. irq_mask = INTR__DMA_CMD_COMP;
  471. ecc_err_mask = INTR__ECC_UNCOR_ERR;
  472. } else {
  473. irq_mask = INTR__DMA_CMD_COMP;
  474. ecc_err_mask = INTR__ECC_ERR;
  475. }
  476. iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
  477. denali_reset_irq(denali);
  478. denali->setup_dma(denali, dma_addr, page, write);
  479. irq_status = denali_wait_for_irq(denali, irq_mask);
  480. if (!(irq_status & INTR__DMA_CMD_COMP))
  481. ret = -EIO;
  482. else if (irq_status & ecc_err_mask)
  483. ret = -EBADMSG;
  484. iowrite32(0, denali->reg + DMA_ENABLE);
  485. dma_unmap_single(denali->dev, dma_addr, size, dir);
  486. if (irq_status & INTR__ERASED_PAGE)
  487. memset(buf, 0xff, size);
  488. return ret;
  489. }
  490. static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
  491. size_t size, int page, int raw, int write)
  492. {
  493. iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
  494. iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
  495. denali->reg + TRANSFER_SPARE_REG);
  496. if (denali->dma_avail)
  497. return denali_dma_xfer(denali, buf, size, page, raw, write);
  498. else
  499. return denali_pio_xfer(denali, buf, size, page, raw, write);
  500. }
  501. static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
  502. int page, int write)
  503. {
  504. struct denali_nand_info *denali = mtd_to_denali(mtd);
  505. unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0;
  506. unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT;
  507. int writesize = mtd->writesize;
  508. int oobsize = mtd->oobsize;
  509. uint8_t *bufpoi = chip->oob_poi;
  510. int ecc_steps = chip->ecc.steps;
  511. int ecc_size = chip->ecc.size;
  512. int ecc_bytes = chip->ecc.bytes;
  513. int oob_skip = denali->oob_skip_bytes;
  514. size_t size = writesize + oobsize;
  515. int i, pos, len;
  516. /* BBM at the beginning of the OOB area */
  517. chip->cmdfunc(mtd, start_cmd, writesize, page);
  518. if (write)
  519. chip->write_buf(mtd, bufpoi, oob_skip);
  520. else
  521. chip->read_buf(mtd, bufpoi, oob_skip);
  522. bufpoi += oob_skip;
  523. /* OOB ECC */
  524. for (i = 0; i < ecc_steps; i++) {
  525. pos = ecc_size + i * (ecc_size + ecc_bytes);
  526. len = ecc_bytes;
  527. if (pos >= writesize)
  528. pos += oob_skip;
  529. else if (pos + len > writesize)
  530. len = writesize - pos;
  531. chip->cmdfunc(mtd, rnd_cmd, pos, -1);
  532. if (write)
  533. chip->write_buf(mtd, bufpoi, len);
  534. else
  535. chip->read_buf(mtd, bufpoi, len);
  536. bufpoi += len;
  537. if (len < ecc_bytes) {
  538. len = ecc_bytes - len;
  539. chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1);
  540. if (write)
  541. chip->write_buf(mtd, bufpoi, len);
  542. else
  543. chip->read_buf(mtd, bufpoi, len);
  544. bufpoi += len;
  545. }
  546. }
  547. /* OOB free */
  548. len = oobsize - (bufpoi - chip->oob_poi);
  549. chip->cmdfunc(mtd, rnd_cmd, size - len, -1);
  550. if (write)
  551. chip->write_buf(mtd, bufpoi, len);
  552. else
  553. chip->read_buf(mtd, bufpoi, len);
  554. }
  555. static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  556. uint8_t *buf, int oob_required, int page)
  557. {
  558. struct denali_nand_info *denali = mtd_to_denali(mtd);
  559. int writesize = mtd->writesize;
  560. int oobsize = mtd->oobsize;
  561. int ecc_steps = chip->ecc.steps;
  562. int ecc_size = chip->ecc.size;
  563. int ecc_bytes = chip->ecc.bytes;
  564. void *tmp_buf = denali->buf;
  565. int oob_skip = denali->oob_skip_bytes;
  566. size_t size = writesize + oobsize;
  567. int ret, i, pos, len;
  568. ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
  569. if (ret)
  570. return ret;
  571. /* Arrange the buffer for syndrome payload/ecc layout */
  572. if (buf) {
  573. for (i = 0; i < ecc_steps; i++) {
  574. pos = i * (ecc_size + ecc_bytes);
  575. len = ecc_size;
  576. if (pos >= writesize)
  577. pos += oob_skip;
  578. else if (pos + len > writesize)
  579. len = writesize - pos;
  580. memcpy(buf, tmp_buf + pos, len);
  581. buf += len;
  582. if (len < ecc_size) {
  583. len = ecc_size - len;
  584. memcpy(buf, tmp_buf + writesize + oob_skip,
  585. len);
  586. buf += len;
  587. }
  588. }
  589. }
  590. if (oob_required) {
  591. uint8_t *oob = chip->oob_poi;
  592. /* BBM at the beginning of the OOB area */
  593. memcpy(oob, tmp_buf + writesize, oob_skip);
  594. oob += oob_skip;
  595. /* OOB ECC */
  596. for (i = 0; i < ecc_steps; i++) {
  597. pos = ecc_size + i * (ecc_size + ecc_bytes);
  598. len = ecc_bytes;
  599. if (pos >= writesize)
  600. pos += oob_skip;
  601. else if (pos + len > writesize)
  602. len = writesize - pos;
  603. memcpy(oob, tmp_buf + pos, len);
  604. oob += len;
  605. if (len < ecc_bytes) {
  606. len = ecc_bytes - len;
  607. memcpy(oob, tmp_buf + writesize + oob_skip,
  608. len);
  609. oob += len;
  610. }
  611. }
  612. /* OOB free */
  613. len = oobsize - (oob - chip->oob_poi);
  614. memcpy(oob, tmp_buf + size - len, len);
  615. }
  616. return 0;
  617. }
  618. static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  619. int page)
  620. {
  621. denali_oob_xfer(mtd, chip, page, 0);
  622. return 0;
  623. }
  624. static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  625. int page)
  626. {
  627. struct denali_nand_info *denali = mtd_to_denali(mtd);
  628. int status;
  629. denali_reset_irq(denali);
  630. denali_oob_xfer(mtd, chip, page, 1);
  631. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  632. status = chip->waitfunc(mtd, chip);
  633. return status & NAND_STATUS_FAIL ? -EIO : 0;
  634. }
  635. static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  636. uint8_t *buf, int oob_required, int page)
  637. {
  638. struct denali_nand_info *denali = mtd_to_denali(mtd);
  639. unsigned long uncor_ecc_flags = 0;
  640. int stat = 0;
  641. int ret;
  642. ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
  643. if (ret && ret != -EBADMSG)
  644. return ret;
  645. if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
  646. stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
  647. else if (ret == -EBADMSG)
  648. stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
  649. if (stat < 0)
  650. return stat;
  651. if (uncor_ecc_flags) {
  652. ret = denali_read_oob(mtd, chip, page);
  653. if (ret)
  654. return ret;
  655. stat = denali_check_erased_page(mtd, chip, buf,
  656. uncor_ecc_flags, stat);
  657. }
  658. return stat;
  659. }
  660. static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  661. const uint8_t *buf, int oob_required, int page)
  662. {
  663. struct denali_nand_info *denali = mtd_to_denali(mtd);
  664. int writesize = mtd->writesize;
  665. int oobsize = mtd->oobsize;
  666. int ecc_steps = chip->ecc.steps;
  667. int ecc_size = chip->ecc.size;
  668. int ecc_bytes = chip->ecc.bytes;
  669. void *tmp_buf = denali->buf;
  670. int oob_skip = denali->oob_skip_bytes;
  671. size_t size = writesize + oobsize;
  672. int i, pos, len;
  673. /*
  674. * Fill the buffer with 0xff first except the full page transfer.
  675. * This simplifies the logic.
  676. */
  677. if (!buf || !oob_required)
  678. memset(tmp_buf, 0xff, size);
  679. /* Arrange the buffer for syndrome payload/ecc layout */
  680. if (buf) {
  681. for (i = 0; i < ecc_steps; i++) {
  682. pos = i * (ecc_size + ecc_bytes);
  683. len = ecc_size;
  684. if (pos >= writesize)
  685. pos += oob_skip;
  686. else if (pos + len > writesize)
  687. len = writesize - pos;
  688. memcpy(tmp_buf + pos, buf, len);
  689. buf += len;
  690. if (len < ecc_size) {
  691. len = ecc_size - len;
  692. memcpy(tmp_buf + writesize + oob_skip, buf,
  693. len);
  694. buf += len;
  695. }
  696. }
  697. }
  698. if (oob_required) {
  699. const uint8_t *oob = chip->oob_poi;
  700. /* BBM at the beginning of the OOB area */
  701. memcpy(tmp_buf + writesize, oob, oob_skip);
  702. oob += oob_skip;
  703. /* OOB ECC */
  704. for (i = 0; i < ecc_steps; i++) {
  705. pos = ecc_size + i * (ecc_size + ecc_bytes);
  706. len = ecc_bytes;
  707. if (pos >= writesize)
  708. pos += oob_skip;
  709. else if (pos + len > writesize)
  710. len = writesize - pos;
  711. memcpy(tmp_buf + pos, oob, len);
  712. oob += len;
  713. if (len < ecc_bytes) {
  714. len = ecc_bytes - len;
  715. memcpy(tmp_buf + writesize + oob_skip, oob,
  716. len);
  717. oob += len;
  718. }
  719. }
  720. /* OOB free */
  721. len = oobsize - (oob - chip->oob_poi);
  722. memcpy(tmp_buf + size - len, oob, len);
  723. }
  724. return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
  725. }
  726. static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  727. const uint8_t *buf, int oob_required, int page)
  728. {
  729. struct denali_nand_info *denali = mtd_to_denali(mtd);
  730. return denali_data_xfer(denali, (void *)buf, mtd->writesize,
  731. page, 0, 1);
  732. }
  733. static void denali_select_chip(struct mtd_info *mtd, int chip)
  734. {
  735. struct denali_nand_info *denali = mtd_to_denali(mtd);
  736. denali->active_bank = chip;
  737. }
  738. static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
  739. {
  740. struct denali_nand_info *denali = mtd_to_denali(mtd);
  741. uint32_t irq_status;
  742. /* R/B# pin transitioned from low to high? */
  743. irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
  744. return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
  745. }
  746. static int denali_erase(struct mtd_info *mtd, int page)
  747. {
  748. struct denali_nand_info *denali = mtd_to_denali(mtd);
  749. uint32_t irq_status;
  750. denali_reset_irq(denali);
  751. denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
  752. DENALI_ERASE);
  753. /* wait for erase to complete or failure to occur */
  754. irq_status = denali_wait_for_irq(denali,
  755. INTR__ERASE_COMP | INTR__ERASE_FAIL);
  756. return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
  757. }
  758. static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
  759. const struct nand_data_interface *conf)
  760. {
  761. struct denali_nand_info *denali = mtd_to_denali(mtd);
  762. const struct nand_sdr_timings *timings;
  763. unsigned long t_clk;
  764. int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
  765. int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
  766. int addr_2_data_mask;
  767. uint32_t tmp;
  768. timings = nand_get_sdr_timings(conf);
  769. if (IS_ERR(timings))
  770. return PTR_ERR(timings);
  771. /* clk_x period in picoseconds */
  772. t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
  773. if (!t_clk)
  774. return -EINVAL;
  775. if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
  776. return 0;
  777. /* tREA -> ACC_CLKS */
  778. acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk);
  779. acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
  780. tmp = ioread32(denali->reg + ACC_CLKS);
  781. tmp &= ~ACC_CLKS__VALUE;
  782. tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
  783. iowrite32(tmp, denali->reg + ACC_CLKS);
  784. /* tRWH -> RE_2_WE */
  785. re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk);
  786. re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
  787. tmp = ioread32(denali->reg + RE_2_WE);
  788. tmp &= ~RE_2_WE__VALUE;
  789. tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
  790. iowrite32(tmp, denali->reg + RE_2_WE);
  791. /* tRHZ -> RE_2_RE */
  792. re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk);
  793. re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
  794. tmp = ioread32(denali->reg + RE_2_RE);
  795. tmp &= ~RE_2_RE__VALUE;
  796. tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
  797. iowrite32(tmp, denali->reg + RE_2_RE);
  798. /*
  799. * tCCS, tWHR -> WE_2_RE
  800. *
  801. * With WE_2_RE properly set, the Denali controller automatically takes
  802. * care of the delay; the driver need not set NAND_WAIT_TCCS.
  803. */
  804. we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min),
  805. t_clk);
  806. we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
  807. tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
  808. tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
  809. tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
  810. iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
  811. /* tADL -> ADDR_2_DATA */
  812. /* for older versions, ADDR_2_DATA is only 6 bit wide */
  813. addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
  814. if (denali->revision < 0x0501)
  815. addr_2_data_mask >>= 1;
  816. addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk);
  817. addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
  818. tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
  819. tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
  820. tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
  821. iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
  822. /* tREH, tWH -> RDWR_EN_HI_CNT */
  823. rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
  824. t_clk);
  825. rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
  826. tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
  827. tmp &= ~RDWR_EN_HI_CNT__VALUE;
  828. tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
  829. iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
  830. /* tRP, tWP -> RDWR_EN_LO_CNT */
  831. rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min),
  832. t_clk);
  833. rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
  834. t_clk);
  835. rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT);
  836. rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
  837. rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
  838. tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
  839. tmp &= ~RDWR_EN_LO_CNT__VALUE;
  840. tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
  841. iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
  842. /* tCS, tCEA -> CS_SETUP_CNT */
  843. cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo,
  844. (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks,
  845. 0);
  846. cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
  847. tmp = ioread32(denali->reg + CS_SETUP_CNT);
  848. tmp &= ~CS_SETUP_CNT__VALUE;
  849. tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
  850. iowrite32(tmp, denali->reg + CS_SETUP_CNT);
  851. return 0;
  852. }
  853. static void denali_reset_banks(struct denali_nand_info *denali)
  854. {
  855. u32 irq_status;
  856. int i;
  857. for (i = 0; i < denali->max_banks; i++) {
  858. denali->active_bank = i;
  859. denali_reset_irq(denali);
  860. iowrite32(DEVICE_RESET__BANK(i),
  861. denali->reg + DEVICE_RESET);
  862. irq_status = denali_wait_for_irq(denali,
  863. INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
  864. if (!(irq_status & INTR__INT_ACT))
  865. break;
  866. }
  867. dev_dbg(denali->dev, "%d chips connected\n", i);
  868. denali->max_banks = i;
  869. }
  870. static void denali_hw_init(struct denali_nand_info *denali)
  871. {
  872. /*
  873. * The REVISION register may not be reliable. Platforms are allowed to
  874. * override it.
  875. */
  876. if (!denali->revision)
  877. denali->revision = swab16(ioread32(denali->reg + REVISION));
  878. /*
  879. * tell driver how many bit controller will skip before writing
  880. * ECC code in OOB. This is normally used for bad block marker
  881. */
  882. denali->oob_skip_bytes = CONFIG_NAND_DENALI_SPARE_AREA_SKIP_BYTES;
  883. iowrite32(denali->oob_skip_bytes, denali->reg + SPARE_AREA_SKIP_BYTES);
  884. denali_detect_max_banks(denali);
  885. iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
  886. iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
  887. iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
  888. }
  889. int denali_calc_ecc_bytes(int step_size, int strength)
  890. {
  891. /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
  892. return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
  893. }
  894. EXPORT_SYMBOL(denali_calc_ecc_bytes);
  895. static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
  896. struct denali_nand_info *denali)
  897. {
  898. int oobavail = mtd->oobsize - denali->oob_skip_bytes;
  899. int ret;
  900. /*
  901. * If .size and .strength are already set (usually by DT),
  902. * check if they are supported by this controller.
  903. */
  904. if (chip->ecc.size && chip->ecc.strength)
  905. return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
  906. /*
  907. * We want .size and .strength closest to the chip's requirement
  908. * unless NAND_ECC_MAXIMIZE is requested.
  909. */
  910. if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
  911. ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
  912. if (!ret)
  913. return 0;
  914. }
  915. /* Max ECC strength is the last thing we can do */
  916. return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
  917. }
  918. static struct nand_ecclayout nand_oob;
  919. static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
  920. struct mtd_oob_region *oobregion)
  921. {
  922. struct denali_nand_info *denali = mtd_to_denali(mtd);
  923. struct nand_chip *chip = mtd_to_nand(mtd);
  924. if (section)
  925. return -ERANGE;
  926. oobregion->offset = denali->oob_skip_bytes;
  927. oobregion->length = chip->ecc.total;
  928. return 0;
  929. }
  930. static int denali_ooblayout_free(struct mtd_info *mtd, int section,
  931. struct mtd_oob_region *oobregion)
  932. {
  933. struct denali_nand_info *denali = mtd_to_denali(mtd);
  934. struct nand_chip *chip = mtd_to_nand(mtd);
  935. if (section)
  936. return -ERANGE;
  937. oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
  938. oobregion->length = mtd->oobsize - oobregion->offset;
  939. return 0;
  940. }
  941. static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
  942. .ecc = denali_ooblayout_ecc,
  943. .free = denali_ooblayout_free,
  944. };
  945. static int denali_multidev_fixup(struct denali_nand_info *denali)
  946. {
  947. struct nand_chip *chip = &denali->nand;
  948. struct mtd_info *mtd = nand_to_mtd(chip);
  949. /*
  950. * Support for multi device:
  951. * When the IP configuration is x16 capable and two x8 chips are
  952. * connected in parallel, DEVICES_CONNECTED should be set to 2.
  953. * In this case, the core framework knows nothing about this fact,
  954. * so we should tell it the _logical_ pagesize and anything necessary.
  955. */
  956. denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
  957. /*
  958. * On some SoCs, DEVICES_CONNECTED is not auto-detected.
  959. * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
  960. */
  961. if (denali->devs_per_cs == 0) {
  962. denali->devs_per_cs = 1;
  963. iowrite32(1, denali->reg + DEVICES_CONNECTED);
  964. }
  965. if (denali->devs_per_cs == 1)
  966. return 0;
  967. if (denali->devs_per_cs != 2) {
  968. dev_err(denali->dev, "unsupported number of devices %d\n",
  969. denali->devs_per_cs);
  970. return -EINVAL;
  971. }
  972. /* 2 chips in parallel */
  973. mtd->size <<= 1;
  974. mtd->erasesize <<= 1;
  975. mtd->writesize <<= 1;
  976. mtd->oobsize <<= 1;
  977. chip->chipsize <<= 1;
  978. chip->page_shift += 1;
  979. chip->phys_erase_shift += 1;
  980. chip->bbt_erase_shift += 1;
  981. chip->chip_shift += 1;
  982. chip->pagemask <<= 1;
  983. chip->ecc.size <<= 1;
  984. chip->ecc.bytes <<= 1;
  985. chip->ecc.strength <<= 1;
  986. denali->oob_skip_bytes <<= 1;
  987. return 0;
  988. }
  989. int denali_init(struct denali_nand_info *denali)
  990. {
  991. struct nand_chip *chip = &denali->nand;
  992. struct mtd_info *mtd = nand_to_mtd(chip);
  993. u32 features = ioread32(denali->reg + FEATURES);
  994. int ret;
  995. denali_hw_init(denali);
  996. denali_clear_irq_all(denali);
  997. denali_reset_banks(denali);
  998. denali->active_bank = DENALI_INVALID_BANK;
  999. chip->flash_node = dev_of_offset(denali->dev);
  1000. /* Fallback to the default name if DT did not give "label" property */
  1001. if (!mtd->name)
  1002. mtd->name = "denali-nand";
  1003. chip->select_chip = denali_select_chip;
  1004. chip->read_byte = denali_read_byte;
  1005. chip->write_byte = denali_write_byte;
  1006. chip->read_word = denali_read_word;
  1007. chip->cmd_ctrl = denali_cmd_ctrl;
  1008. chip->dev_ready = denali_dev_ready;
  1009. chip->waitfunc = denali_waitfunc;
  1010. if (features & FEATURES__INDEX_ADDR) {
  1011. denali->host_read = denali_indexed_read;
  1012. denali->host_write = denali_indexed_write;
  1013. } else {
  1014. denali->host_read = denali_direct_read;
  1015. denali->host_write = denali_direct_write;
  1016. }
  1017. /* clk rate info is needed for setup_data_interface */
  1018. if (denali->clk_x_rate)
  1019. chip->setup_data_interface = denali_setup_data_interface;
  1020. ret = nand_scan_ident(mtd, denali->max_banks, NULL);
  1021. if (ret)
  1022. return ret;
  1023. if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
  1024. denali->dma_avail = 1;
  1025. if (denali->dma_avail) {
  1026. chip->buf_align = 16;
  1027. if (denali->caps & DENALI_CAP_DMA_64BIT)
  1028. denali->setup_dma = denali_setup_dma64;
  1029. else
  1030. denali->setup_dma = denali_setup_dma32;
  1031. } else {
  1032. chip->buf_align = 4;
  1033. }
  1034. chip->options |= NAND_USE_BOUNCE_BUFFER;
  1035. chip->bbt_options |= NAND_BBT_USE_FLASH;
  1036. chip->bbt_options |= NAND_BBT_NO_OOB;
  1037. denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
  1038. /* no subpage writes on denali */
  1039. chip->options |= NAND_NO_SUBPAGE_WRITE;
  1040. ret = denali_ecc_setup(mtd, chip, denali);
  1041. if (ret) {
  1042. dev_err(denali->dev, "Failed to setup ECC settings.\n");
  1043. return ret;
  1044. }
  1045. dev_dbg(denali->dev,
  1046. "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
  1047. chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
  1048. iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
  1049. FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
  1050. denali->reg + ECC_CORRECTION);
  1051. iowrite32(mtd->erasesize / mtd->writesize,
  1052. denali->reg + PAGES_PER_BLOCK);
  1053. iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
  1054. denali->reg + DEVICE_WIDTH);
  1055. iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
  1056. denali->reg + TWO_ROW_ADDR_CYCLES);
  1057. iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
  1058. iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
  1059. iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
  1060. iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
  1061. /* chip->ecc.steps is set by nand_scan_tail(); not available here */
  1062. iowrite32(mtd->writesize / chip->ecc.size,
  1063. denali->reg + CFG_NUM_DATA_BLOCKS);
  1064. mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
  1065. nand_oob.eccbytes = denali->nand.ecc.bytes;
  1066. denali->nand.ecc.layout = &nand_oob;
  1067. if (chip->options & NAND_BUSWIDTH_16) {
  1068. chip->read_buf = denali_read_buf16;
  1069. chip->write_buf = denali_write_buf16;
  1070. } else {
  1071. chip->read_buf = denali_read_buf;
  1072. chip->write_buf = denali_write_buf;
  1073. }
  1074. chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
  1075. chip->ecc.read_page = denali_read_page;
  1076. chip->ecc.read_page_raw = denali_read_page_raw;
  1077. chip->ecc.write_page = denali_write_page;
  1078. chip->ecc.write_page_raw = denali_write_page_raw;
  1079. chip->ecc.read_oob = denali_read_oob;
  1080. chip->ecc.write_oob = denali_write_oob;
  1081. chip->erase = denali_erase;
  1082. ret = denali_multidev_fixup(denali);
  1083. if (ret)
  1084. return ret;
  1085. /*
  1086. * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
  1087. * use devm_kmalloc() because the memory allocated by devm_ does not
  1088. * guarantee DMA-safe alignment.
  1089. */
  1090. denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
  1091. if (!denali->buf)
  1092. return -ENOMEM;
  1093. ret = nand_scan_tail(mtd);
  1094. if (ret)
  1095. goto free_buf;
  1096. ret = nand_register(0, mtd);
  1097. if (ret) {
  1098. dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
  1099. goto free_buf;
  1100. }
  1101. return 0;
  1102. free_buf:
  1103. kfree(denali->buf);
  1104. return ret;
  1105. }