omap2.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632
  1. /*
  2. * OneNAND driver for OMAP2 / OMAP3
  3. *
  4. * Copyright © 2005-2006 Nokia Corporation
  5. *
  6. * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
  7. * IRQ and DMA support written by Timo Teras
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License version 2 as published by
  11. * the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along with
  19. * this program; see the file COPYING. If not, write to the Free Software
  20. * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  21. *
  22. */
  23. #include <linux/device.h>
  24. #include <linux/module.h>
  25. #include <linux/mtd/mtd.h>
  26. #include <linux/mtd/onenand.h>
  27. #include <linux/mtd/partitions.h>
  28. #include <linux/of_device.h>
  29. #include <linux/omap-gpmc.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/delay.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/dmaengine.h>
  35. #include <linux/io.h>
  36. #include <linux/slab.h>
  37. #include <linux/gpio/consumer.h>
  38. #include <asm/mach/flash.h>
  39. #define DRIVER_NAME "omap2-onenand"
  40. #define ONENAND_BUFRAM_SIZE (1024 * 5)
  41. struct omap2_onenand {
  42. struct platform_device *pdev;
  43. int gpmc_cs;
  44. unsigned long phys_base;
  45. struct gpio_desc *int_gpiod;
  46. struct mtd_info mtd;
  47. struct onenand_chip onenand;
  48. struct completion irq_done;
  49. struct completion dma_done;
  50. struct dma_chan *dma_chan;
  51. };
  52. static void omap2_onenand_dma_complete_func(void *completion)
  53. {
  54. complete(completion);
  55. }
  56. static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
  57. {
  58. struct omap2_onenand *c = dev_id;
  59. complete(&c->irq_done);
  60. return IRQ_HANDLED;
  61. }
  62. static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
  63. {
  64. return readw(c->onenand.base + reg);
  65. }
  66. static inline void write_reg(struct omap2_onenand *c, unsigned short value,
  67. int reg)
  68. {
  69. writew(value, c->onenand.base + reg);
  70. }
  71. static int omap2_onenand_set_cfg(struct omap2_onenand *c,
  72. bool sr, bool sw,
  73. int latency, int burst_len)
  74. {
  75. unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
  76. reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
  77. switch (burst_len) {
  78. case 0: /* continuous */
  79. break;
  80. case 4:
  81. reg |= ONENAND_SYS_CFG1_BL_4;
  82. break;
  83. case 8:
  84. reg |= ONENAND_SYS_CFG1_BL_8;
  85. break;
  86. case 16:
  87. reg |= ONENAND_SYS_CFG1_BL_16;
  88. break;
  89. case 32:
  90. reg |= ONENAND_SYS_CFG1_BL_32;
  91. break;
  92. default:
  93. return -EINVAL;
  94. }
  95. if (latency > 5)
  96. reg |= ONENAND_SYS_CFG1_HF;
  97. if (latency > 7)
  98. reg |= ONENAND_SYS_CFG1_VHF;
  99. if (sr)
  100. reg |= ONENAND_SYS_CFG1_SYNC_READ;
  101. if (sw)
  102. reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
  103. write_reg(c, reg, ONENAND_REG_SYS_CFG1);
  104. return 0;
  105. }
  106. static int omap2_onenand_get_freq(int ver)
  107. {
  108. switch ((ver >> 4) & 0xf) {
  109. case 0:
  110. return 40;
  111. case 1:
  112. return 54;
  113. case 2:
  114. return 66;
  115. case 3:
  116. return 83;
  117. case 4:
  118. return 104;
  119. }
  120. return -EINVAL;
  121. }
  122. static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
  123. {
  124. printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
  125. msg, state, ctrl, intr);
  126. }
  127. static void wait_warn(char *msg, int state, unsigned int ctrl,
  128. unsigned int intr)
  129. {
  130. printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
  131. "intr 0x%04x\n", msg, state, ctrl, intr);
  132. }
  133. static int omap2_onenand_wait(struct mtd_info *mtd, int state)
  134. {
  135. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  136. struct onenand_chip *this = mtd->priv;
  137. unsigned int intr = 0;
  138. unsigned int ctrl, ctrl_mask;
  139. unsigned long timeout;
  140. u32 syscfg;
  141. if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
  142. state == FL_VERIFYING_ERASE) {
  143. int i = 21;
  144. unsigned int intr_flags = ONENAND_INT_MASTER;
  145. switch (state) {
  146. case FL_RESETING:
  147. intr_flags |= ONENAND_INT_RESET;
  148. break;
  149. case FL_PREPARING_ERASE:
  150. intr_flags |= ONENAND_INT_ERASE;
  151. break;
  152. case FL_VERIFYING_ERASE:
  153. i = 101;
  154. break;
  155. }
  156. while (--i) {
  157. udelay(1);
  158. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  159. if (intr & ONENAND_INT_MASTER)
  160. break;
  161. }
  162. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  163. if (ctrl & ONENAND_CTRL_ERROR) {
  164. wait_err("controller error", state, ctrl, intr);
  165. return -EIO;
  166. }
  167. if ((intr & intr_flags) == intr_flags)
  168. return 0;
  169. /* Continue in wait for interrupt branch */
  170. }
  171. if (state != FL_READING) {
  172. int result;
  173. /* Turn interrupts on */
  174. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  175. if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
  176. syscfg |= ONENAND_SYS_CFG1_IOBE;
  177. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  178. /* Add a delay to let GPIO settle */
  179. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  180. }
  181. reinit_completion(&c->irq_done);
  182. result = gpiod_get_value(c->int_gpiod);
  183. if (result < 0) {
  184. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  185. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  186. wait_err("gpio error", state, ctrl, intr);
  187. return result;
  188. } else if (result == 0) {
  189. int retry_cnt = 0;
  190. retry:
  191. if (!wait_for_completion_io_timeout(&c->irq_done,
  192. msecs_to_jiffies(20))) {
  193. /* Timeout after 20ms */
  194. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  195. if (ctrl & ONENAND_CTRL_ONGO &&
  196. !this->ongoing) {
  197. /*
  198. * The operation seems to be still going
  199. * so give it some more time.
  200. */
  201. retry_cnt += 1;
  202. if (retry_cnt < 3)
  203. goto retry;
  204. intr = read_reg(c,
  205. ONENAND_REG_INTERRUPT);
  206. wait_err("timeout", state, ctrl, intr);
  207. return -EIO;
  208. }
  209. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  210. if ((intr & ONENAND_INT_MASTER) == 0)
  211. wait_warn("timeout", state, ctrl, intr);
  212. }
  213. }
  214. } else {
  215. int retry_cnt = 0;
  216. /* Turn interrupts off */
  217. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  218. syscfg &= ~ONENAND_SYS_CFG1_IOBE;
  219. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  220. timeout = jiffies + msecs_to_jiffies(20);
  221. while (1) {
  222. if (time_before(jiffies, timeout)) {
  223. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  224. if (intr & ONENAND_INT_MASTER)
  225. break;
  226. } else {
  227. /* Timeout after 20ms */
  228. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  229. if (ctrl & ONENAND_CTRL_ONGO) {
  230. /*
  231. * The operation seems to be still going
  232. * so give it some more time.
  233. */
  234. retry_cnt += 1;
  235. if (retry_cnt < 3) {
  236. timeout = jiffies +
  237. msecs_to_jiffies(20);
  238. continue;
  239. }
  240. }
  241. break;
  242. }
  243. }
  244. }
  245. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  246. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  247. if (intr & ONENAND_INT_READ) {
  248. int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
  249. if (ecc) {
  250. unsigned int addr1, addr8;
  251. addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
  252. addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
  253. if (ecc & ONENAND_ECC_2BIT_ALL) {
  254. printk(KERN_ERR "onenand_wait: ECC error = "
  255. "0x%04x, addr1 %#x, addr8 %#x\n",
  256. ecc, addr1, addr8);
  257. mtd->ecc_stats.failed++;
  258. return -EBADMSG;
  259. } else if (ecc & ONENAND_ECC_1BIT_ALL) {
  260. printk(KERN_NOTICE "onenand_wait: correctable "
  261. "ECC error = 0x%04x, addr1 %#x, "
  262. "addr8 %#x\n", ecc, addr1, addr8);
  263. mtd->ecc_stats.corrected++;
  264. }
  265. }
  266. } else if (state == FL_READING) {
  267. wait_err("timeout", state, ctrl, intr);
  268. return -EIO;
  269. }
  270. if (ctrl & ONENAND_CTRL_ERROR) {
  271. wait_err("controller error", state, ctrl, intr);
  272. if (ctrl & ONENAND_CTRL_LOCK)
  273. printk(KERN_ERR "onenand_wait: "
  274. "Device is write protected!!!\n");
  275. return -EIO;
  276. }
  277. ctrl_mask = 0xFE9F;
  278. if (this->ongoing)
  279. ctrl_mask &= ~0x8000;
  280. if (ctrl & ctrl_mask)
  281. wait_warn("unexpected controller status", state, ctrl, intr);
  282. return 0;
  283. }
  284. static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
  285. {
  286. struct onenand_chip *this = mtd->priv;
  287. if (ONENAND_CURRENT_BUFFERRAM(this)) {
  288. if (area == ONENAND_DATARAM)
  289. return this->writesize;
  290. if (area == ONENAND_SPARERAM)
  291. return mtd->oobsize;
  292. }
  293. return 0;
  294. }
  295. static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
  296. dma_addr_t src, dma_addr_t dst,
  297. size_t count)
  298. {
  299. struct dma_async_tx_descriptor *tx;
  300. dma_cookie_t cookie;
  301. tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
  302. DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
  303. if (!tx) {
  304. dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
  305. return -EIO;
  306. }
  307. reinit_completion(&c->dma_done);
  308. tx->callback = omap2_onenand_dma_complete_func;
  309. tx->callback_param = &c->dma_done;
  310. cookie = tx->tx_submit(tx);
  311. if (dma_submit_error(cookie)) {
  312. dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
  313. return -EIO;
  314. }
  315. dma_async_issue_pending(c->dma_chan);
  316. if (!wait_for_completion_io_timeout(&c->dma_done,
  317. msecs_to_jiffies(20))) {
  318. dmaengine_terminate_sync(c->dma_chan);
  319. return -ETIMEDOUT;
  320. }
  321. return 0;
  322. }
  323. static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
  324. unsigned char *buffer, int offset,
  325. size_t count)
  326. {
  327. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  328. struct onenand_chip *this = mtd->priv;
  329. struct device *dev = &c->pdev->dev;
  330. void *buf = (void *)buffer;
  331. dma_addr_t dma_src, dma_dst;
  332. int bram_offset, err;
  333. size_t xtra;
  334. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  335. /*
  336. * If the buffer address is not DMA-able, len is not long enough to make
  337. * DMA transfers profitable or panic_write() may be in an interrupt
  338. * context fallback to PIO mode.
  339. */
  340. if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
  341. count < 384 || in_interrupt() || oops_in_progress )
  342. goto out_copy;
  343. xtra = count & 3;
  344. if (xtra) {
  345. count -= xtra;
  346. memcpy(buf + count, this->base + bram_offset + count, xtra);
  347. }
  348. dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
  349. dma_src = c->phys_base + bram_offset;
  350. if (dma_mapping_error(dev, dma_dst)) {
  351. dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
  352. goto out_copy;
  353. }
  354. err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
  355. dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
  356. if (!err)
  357. return 0;
  358. dev_err(dev, "timeout waiting for DMA\n");
  359. out_copy:
  360. memcpy(buf, this->base + bram_offset, count);
  361. return 0;
  362. }
  363. static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
  364. const unsigned char *buffer,
  365. int offset, size_t count)
  366. {
  367. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  368. struct onenand_chip *this = mtd->priv;
  369. struct device *dev = &c->pdev->dev;
  370. void *buf = (void *)buffer;
  371. dma_addr_t dma_src, dma_dst;
  372. int bram_offset, err;
  373. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  374. /*
  375. * If the buffer address is not DMA-able, len is not long enough to make
  376. * DMA transfers profitable or panic_write() may be in an interrupt
  377. * context fallback to PIO mode.
  378. */
  379. if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
  380. count < 384 || in_interrupt() || oops_in_progress )
  381. goto out_copy;
  382. dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
  383. dma_dst = c->phys_base + bram_offset;
  384. if (dma_mapping_error(dev, dma_src)) {
  385. dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
  386. goto out_copy;
  387. }
  388. err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
  389. dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
  390. if (!err)
  391. return 0;
  392. dev_err(dev, "timeout waiting for DMA\n");
  393. out_copy:
  394. memcpy(this->base + bram_offset, buf, count);
  395. return 0;
  396. }
  397. static void omap2_onenand_shutdown(struct platform_device *pdev)
  398. {
  399. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  400. /* With certain content in the buffer RAM, the OMAP boot ROM code
  401. * can recognize the flash chip incorrectly. Zero it out before
  402. * soft reset.
  403. */
  404. memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
  405. }
  406. static int omap2_onenand_probe(struct platform_device *pdev)
  407. {
  408. u32 val;
  409. dma_cap_mask_t mask;
  410. int freq, latency, r;
  411. struct resource *res;
  412. struct omap2_onenand *c;
  413. struct gpmc_onenand_info info;
  414. struct device *dev = &pdev->dev;
  415. struct device_node *np = dev->of_node;
  416. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  417. if (!res) {
  418. dev_err(dev, "error getting memory resource\n");
  419. return -EINVAL;
  420. }
  421. r = of_property_read_u32(np, "reg", &val);
  422. if (r) {
  423. dev_err(dev, "reg not found in DT\n");
  424. return r;
  425. }
  426. c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
  427. if (!c)
  428. return -ENOMEM;
  429. init_completion(&c->irq_done);
  430. init_completion(&c->dma_done);
  431. c->gpmc_cs = val;
  432. c->phys_base = res->start;
  433. c->onenand.base = devm_ioremap_resource(dev, res);
  434. if (IS_ERR(c->onenand.base))
  435. return PTR_ERR(c->onenand.base);
  436. c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
  437. if (IS_ERR(c->int_gpiod)) {
  438. r = PTR_ERR(c->int_gpiod);
  439. /* Just try again if this happens */
  440. if (r != -EPROBE_DEFER)
  441. dev_err(dev, "error getting gpio: %d\n", r);
  442. return r;
  443. }
  444. if (c->int_gpiod) {
  445. r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
  446. omap2_onenand_interrupt,
  447. IRQF_TRIGGER_RISING, "onenand", c);
  448. if (r)
  449. return r;
  450. c->onenand.wait = omap2_onenand_wait;
  451. }
  452. dma_cap_zero(mask);
  453. dma_cap_set(DMA_MEMCPY, mask);
  454. c->dma_chan = dma_request_channel(mask, NULL, NULL);
  455. if (c->dma_chan) {
  456. c->onenand.read_bufferram = omap2_onenand_read_bufferram;
  457. c->onenand.write_bufferram = omap2_onenand_write_bufferram;
  458. }
  459. c->pdev = pdev;
  460. c->mtd.priv = &c->onenand;
  461. c->mtd.dev.parent = dev;
  462. mtd_set_of_node(&c->mtd, dev->of_node);
  463. dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
  464. c->gpmc_cs, c->phys_base, c->onenand.base,
  465. c->dma_chan ? "DMA" : "PIO");
  466. if ((r = onenand_scan(&c->mtd, 1)) < 0)
  467. goto err_release_dma;
  468. freq = omap2_onenand_get_freq(c->onenand.version_id);
  469. if (freq > 0) {
  470. switch (freq) {
  471. case 104:
  472. latency = 7;
  473. break;
  474. case 83:
  475. latency = 6;
  476. break;
  477. case 66:
  478. latency = 5;
  479. break;
  480. case 56:
  481. latency = 4;
  482. break;
  483. default: /* 40 MHz or lower */
  484. latency = 3;
  485. break;
  486. }
  487. r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
  488. freq, latency, &info);
  489. if (r)
  490. goto err_release_onenand;
  491. r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
  492. latency, info.burst_len);
  493. if (r)
  494. goto err_release_onenand;
  495. if (info.sync_read || info.sync_write)
  496. dev_info(dev, "optimized timings for %d MHz\n", freq);
  497. }
  498. r = mtd_device_register(&c->mtd, NULL, 0);
  499. if (r)
  500. goto err_release_onenand;
  501. platform_set_drvdata(pdev, c);
  502. return 0;
  503. err_release_onenand:
  504. onenand_release(&c->mtd);
  505. err_release_dma:
  506. if (c->dma_chan)
  507. dma_release_channel(c->dma_chan);
  508. return r;
  509. }
  510. static int omap2_onenand_remove(struct platform_device *pdev)
  511. {
  512. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  513. onenand_release(&c->mtd);
  514. if (c->dma_chan)
  515. dma_release_channel(c->dma_chan);
  516. omap2_onenand_shutdown(pdev);
  517. return 0;
  518. }
  519. static const struct of_device_id omap2_onenand_id_table[] = {
  520. { .compatible = "ti,omap2-onenand", },
  521. {},
  522. };
  523. MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
  524. static struct platform_driver omap2_onenand_driver = {
  525. .probe = omap2_onenand_probe,
  526. .remove = omap2_onenand_remove,
  527. .shutdown = omap2_onenand_shutdown,
  528. .driver = {
  529. .name = DRIVER_NAME,
  530. .of_match_table = omap2_onenand_id_table,
  531. },
  532. };
  533. module_platform_driver(omap2_onenand_driver);
  534. MODULE_ALIAS("platform:" DRIVER_NAME);
  535. MODULE_LICENSE("GPL");
  536. MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
  537. MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");