vmu-flash.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* vmu-flash.c
  3. * Driver for SEGA Dreamcast Visual Memory Unit
  4. *
  5. * Copyright (c) Adrian McMenamin 2002 - 2009
  6. * Copyright (c) Paul Mundt 2001
  7. */
  8. #include <linux/init.h>
  9. #include <linux/slab.h>
  10. #include <linux/sched.h>
  11. #include <linux/delay.h>
  12. #include <linux/maple.h>
  13. #include <linux/mtd/mtd.h>
  14. #include <linux/mtd/map.h>
  15. struct vmu_cache {
  16. unsigned char *buffer; /* Cache */
  17. unsigned int block; /* Which block was cached */
  18. unsigned long jiffies_atc; /* When was it cached? */
  19. int valid;
  20. };
  21. struct mdev_part {
  22. struct maple_device *mdev;
  23. int partition;
  24. };
  25. struct vmupart {
  26. u16 user_blocks;
  27. u16 root_block;
  28. u16 numblocks;
  29. char *name;
  30. struct vmu_cache *pcache;
  31. };
  32. struct memcard {
  33. u16 tempA;
  34. u16 tempB;
  35. u32 partitions;
  36. u32 blocklen;
  37. u32 writecnt;
  38. u32 readcnt;
  39. u32 removable;
  40. int partition;
  41. int read;
  42. unsigned char *blockread;
  43. struct vmupart *parts;
  44. struct mtd_info *mtd;
  45. };
  46. struct vmu_block {
  47. unsigned int num; /* block number */
  48. unsigned int ofs; /* block offset */
  49. };
  50. static struct vmu_block *ofs_to_block(unsigned long src_ofs,
  51. struct mtd_info *mtd, int partition)
  52. {
  53. struct vmu_block *vblock;
  54. struct maple_device *mdev;
  55. struct memcard *card;
  56. struct mdev_part *mpart;
  57. int num;
  58. mpart = mtd->priv;
  59. mdev = mpart->mdev;
  60. card = maple_get_drvdata(mdev);
  61. if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
  62. goto failed;
  63. num = src_ofs / card->blocklen;
  64. if (num > card->parts[partition].numblocks)
  65. goto failed;
  66. vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
  67. if (!vblock)
  68. goto failed;
  69. vblock->num = num;
  70. vblock->ofs = src_ofs % card->blocklen;
  71. return vblock;
  72. failed:
  73. return NULL;
  74. }
  75. /* Maple bus callback function for reads */
  76. static void vmu_blockread(struct mapleq *mq)
  77. {
  78. struct maple_device *mdev;
  79. struct memcard *card;
  80. mdev = mq->dev;
  81. card = maple_get_drvdata(mdev);
  82. /* copy the read in data */
  83. if (unlikely(!card->blockread))
  84. return;
  85. memcpy(card->blockread, mq->recvbuf->buf + 12,
  86. card->blocklen/card->readcnt);
  87. }
  88. /* Interface with maple bus to read blocks
  89. * caching the results so that other parts
  90. * of the driver can access block reads */
  91. static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
  92. struct mtd_info *mtd)
  93. {
  94. struct memcard *card;
  95. struct mdev_part *mpart;
  96. struct maple_device *mdev;
  97. int partition, error = 0, x, wait;
  98. unsigned char *blockread = NULL;
  99. struct vmu_cache *pcache;
  100. __be32 sendbuf;
  101. mpart = mtd->priv;
  102. mdev = mpart->mdev;
  103. partition = mpart->partition;
  104. card = maple_get_drvdata(mdev);
  105. pcache = card->parts[partition].pcache;
  106. pcache->valid = 0;
  107. /* prepare the cache for this block */
  108. if (!pcache->buffer) {
  109. pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
  110. if (!pcache->buffer) {
  111. dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
  112. " to lack of memory\n", mdev->port,
  113. mdev->unit);
  114. error = -ENOMEM;
  115. goto outB;
  116. }
  117. }
  118. /*
  119. * Reads may be phased - again the hardware spec
  120. * supports this - though may not be any devices in
  121. * the wild that implement it, but we will here
  122. */
  123. for (x = 0; x < card->readcnt; x++) {
  124. sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
  125. if (atomic_read(&mdev->busy) == 1) {
  126. wait_event_interruptible_timeout(mdev->maple_wait,
  127. atomic_read(&mdev->busy) == 0, HZ);
  128. if (atomic_read(&mdev->busy) == 1) {
  129. dev_notice(&mdev->dev, "VMU at (%d, %d)"
  130. " is busy\n", mdev->port, mdev->unit);
  131. error = -EAGAIN;
  132. goto outB;
  133. }
  134. }
  135. atomic_set(&mdev->busy, 1);
  136. blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
  137. if (!blockread) {
  138. error = -ENOMEM;
  139. atomic_set(&mdev->busy, 0);
  140. goto outB;
  141. }
  142. card->blockread = blockread;
  143. maple_getcond_callback(mdev, vmu_blockread, 0,
  144. MAPLE_FUNC_MEMCARD);
  145. error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  146. MAPLE_COMMAND_BREAD, 2, &sendbuf);
  147. /* Very long timeouts seem to be needed when box is stressed */
  148. wait = wait_event_interruptible_timeout(mdev->maple_wait,
  149. (atomic_read(&mdev->busy) == 0 ||
  150. atomic_read(&mdev->busy) == 2), HZ * 3);
  151. /*
  152. * MTD layer does not handle hotplugging well
  153. * so have to return errors when VMU is unplugged
  154. * in the middle of a read (busy == 2)
  155. */
  156. if (error || atomic_read(&mdev->busy) == 2) {
  157. if (atomic_read(&mdev->busy) == 2)
  158. error = -ENXIO;
  159. atomic_set(&mdev->busy, 0);
  160. card->blockread = NULL;
  161. goto outA;
  162. }
  163. if (wait == 0 || wait == -ERESTARTSYS) {
  164. card->blockread = NULL;
  165. atomic_set(&mdev->busy, 0);
  166. error = -EIO;
  167. list_del_init(&(mdev->mq->list));
  168. kfree(mdev->mq->sendbuf);
  169. mdev->mq->sendbuf = NULL;
  170. if (wait == -ERESTARTSYS) {
  171. dev_warn(&mdev->dev, "VMU read on (%d, %d)"
  172. " interrupted on block 0x%X\n",
  173. mdev->port, mdev->unit, num);
  174. } else
  175. dev_notice(&mdev->dev, "VMU read on (%d, %d)"
  176. " timed out on block 0x%X\n",
  177. mdev->port, mdev->unit, num);
  178. goto outA;
  179. }
  180. memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
  181. card->blocklen/card->readcnt);
  182. memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
  183. card->blockread, card->blocklen/card->readcnt);
  184. card->blockread = NULL;
  185. pcache->block = num;
  186. pcache->jiffies_atc = jiffies;
  187. pcache->valid = 1;
  188. kfree(blockread);
  189. }
  190. return error;
  191. outA:
  192. kfree(blockread);
  193. outB:
  194. return error;
  195. }
  196. /* communicate with maple bus for phased writing */
  197. static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
  198. struct mtd_info *mtd)
  199. {
  200. struct memcard *card;
  201. struct mdev_part *mpart;
  202. struct maple_device *mdev;
  203. int partition, error, locking, x, phaselen, wait;
  204. __be32 *sendbuf;
  205. mpart = mtd->priv;
  206. mdev = mpart->mdev;
  207. partition = mpart->partition;
  208. card = maple_get_drvdata(mdev);
  209. phaselen = card->blocklen/card->writecnt;
  210. sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
  211. if (!sendbuf) {
  212. error = -ENOMEM;
  213. goto fail_nosendbuf;
  214. }
  215. for (x = 0; x < card->writecnt; x++) {
  216. sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
  217. memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
  218. /* wait until the device is not busy doing something else
  219. * or 1 second - which ever is longer */
  220. if (atomic_read(&mdev->busy) == 1) {
  221. wait_event_interruptible_timeout(mdev->maple_wait,
  222. atomic_read(&mdev->busy) == 0, HZ);
  223. if (atomic_read(&mdev->busy) == 1) {
  224. error = -EBUSY;
  225. dev_notice(&mdev->dev, "VMU write at (%d, %d)"
  226. "failed - device is busy\n",
  227. mdev->port, mdev->unit);
  228. goto fail_nolock;
  229. }
  230. }
  231. atomic_set(&mdev->busy, 1);
  232. locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  233. MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
  234. wait = wait_event_interruptible_timeout(mdev->maple_wait,
  235. atomic_read(&mdev->busy) == 0, HZ/10);
  236. if (locking) {
  237. error = -EIO;
  238. atomic_set(&mdev->busy, 0);
  239. goto fail_nolock;
  240. }
  241. if (atomic_read(&mdev->busy) == 2) {
  242. atomic_set(&mdev->busy, 0);
  243. } else if (wait == 0 || wait == -ERESTARTSYS) {
  244. error = -EIO;
  245. dev_warn(&mdev->dev, "Write at (%d, %d) of block"
  246. " 0x%X at phase %d failed: could not"
  247. " communicate with VMU", mdev->port,
  248. mdev->unit, num, x);
  249. atomic_set(&mdev->busy, 0);
  250. kfree(mdev->mq->sendbuf);
  251. mdev->mq->sendbuf = NULL;
  252. list_del_init(&(mdev->mq->list));
  253. goto fail_nolock;
  254. }
  255. }
  256. kfree(sendbuf);
  257. return card->blocklen;
  258. fail_nolock:
  259. kfree(sendbuf);
  260. fail_nosendbuf:
  261. dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
  262. mdev->unit);
  263. return error;
  264. }
  265. /* mtd function to simulate reading byte by byte */
  266. static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
  267. struct mtd_info *mtd)
  268. {
  269. struct vmu_block *vblock;
  270. struct memcard *card;
  271. struct mdev_part *mpart;
  272. struct maple_device *mdev;
  273. unsigned char *buf, ret;
  274. int partition, error;
  275. mpart = mtd->priv;
  276. mdev = mpart->mdev;
  277. partition = mpart->partition;
  278. card = maple_get_drvdata(mdev);
  279. *retval = 0;
  280. buf = kmalloc(card->blocklen, GFP_KERNEL);
  281. if (!buf) {
  282. *retval = 1;
  283. ret = -ENOMEM;
  284. goto finish;
  285. }
  286. vblock = ofs_to_block(ofs, mtd, partition);
  287. if (!vblock) {
  288. *retval = 3;
  289. ret = -ENOMEM;
  290. goto out_buf;
  291. }
  292. error = maple_vmu_read_block(vblock->num, buf, mtd);
  293. if (error) {
  294. ret = error;
  295. *retval = 2;
  296. goto out_vblock;
  297. }
  298. ret = buf[vblock->ofs];
  299. out_vblock:
  300. kfree(vblock);
  301. out_buf:
  302. kfree(buf);
  303. finish:
  304. return ret;
  305. }
  306. /* mtd higher order function to read flash */
  307. static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
  308. size_t *retlen, u_char *buf)
  309. {
  310. struct maple_device *mdev;
  311. struct memcard *card;
  312. struct mdev_part *mpart;
  313. struct vmu_cache *pcache;
  314. struct vmu_block *vblock;
  315. int index = 0, retval, partition, leftover, numblocks;
  316. unsigned char cx;
  317. mpart = mtd->priv;
  318. mdev = mpart->mdev;
  319. partition = mpart->partition;
  320. card = maple_get_drvdata(mdev);
  321. numblocks = card->parts[partition].numblocks;
  322. if (from + len > numblocks * card->blocklen)
  323. len = numblocks * card->blocklen - from;
  324. if (len == 0)
  325. return -EIO;
  326. /* Have we cached this bit already? */
  327. pcache = card->parts[partition].pcache;
  328. do {
  329. vblock = ofs_to_block(from + index, mtd, partition);
  330. if (!vblock)
  331. return -ENOMEM;
  332. /* Have we cached this and is the cache valid and timely? */
  333. if (pcache->valid &&
  334. time_before(jiffies, pcache->jiffies_atc + HZ) &&
  335. (pcache->block == vblock->num)) {
  336. /* we have cached it, so do necessary copying */
  337. leftover = card->blocklen - vblock->ofs;
  338. if (vblock->ofs + len - index < card->blocklen) {
  339. /* only a bit of this block to copy */
  340. memcpy(buf + index,
  341. pcache->buffer + vblock->ofs,
  342. len - index);
  343. index = len;
  344. } else {
  345. /* otherwise copy remainder of whole block */
  346. memcpy(buf + index, pcache->buffer +
  347. vblock->ofs, leftover);
  348. index += leftover;
  349. }
  350. } else {
  351. /*
  352. * Not cached so read one byte -
  353. * but cache the rest of the block
  354. */
  355. cx = vmu_flash_read_char(from + index, &retval, mtd);
  356. if (retval) {
  357. *retlen = index;
  358. kfree(vblock);
  359. return cx;
  360. }
  361. memset(buf + index, cx, 1);
  362. index++;
  363. }
  364. kfree(vblock);
  365. } while (len > index);
  366. *retlen = index;
  367. return 0;
  368. }
  369. static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
  370. size_t *retlen, const u_char *buf)
  371. {
  372. struct maple_device *mdev;
  373. struct memcard *card;
  374. struct mdev_part *mpart;
  375. int index = 0, partition, error = 0, numblocks;
  376. struct vmu_cache *pcache;
  377. struct vmu_block *vblock;
  378. unsigned char *buffer;
  379. mpart = mtd->priv;
  380. mdev = mpart->mdev;
  381. partition = mpart->partition;
  382. card = maple_get_drvdata(mdev);
  383. numblocks = card->parts[partition].numblocks;
  384. if (to + len > numblocks * card->blocklen)
  385. len = numblocks * card->blocklen - to;
  386. if (len == 0) {
  387. error = -EIO;
  388. goto failed;
  389. }
  390. vblock = ofs_to_block(to, mtd, partition);
  391. if (!vblock) {
  392. error = -ENOMEM;
  393. goto failed;
  394. }
  395. buffer = kmalloc(card->blocklen, GFP_KERNEL);
  396. if (!buffer) {
  397. error = -ENOMEM;
  398. goto fail_buffer;
  399. }
  400. do {
  401. /* Read in the block we are to write to */
  402. error = maple_vmu_read_block(vblock->num, buffer, mtd);
  403. if (error)
  404. goto fail_io;
  405. do {
  406. buffer[vblock->ofs] = buf[index];
  407. vblock->ofs++;
  408. index++;
  409. if (index >= len)
  410. break;
  411. } while (vblock->ofs < card->blocklen);
  412. /* write out new buffer */
  413. error = maple_vmu_write_block(vblock->num, buffer, mtd);
  414. /* invalidate the cache */
  415. pcache = card->parts[partition].pcache;
  416. pcache->valid = 0;
  417. if (error != card->blocklen)
  418. goto fail_io;
  419. vblock->num++;
  420. vblock->ofs = 0;
  421. } while (len > index);
  422. kfree(buffer);
  423. *retlen = index;
  424. kfree(vblock);
  425. return 0;
  426. fail_io:
  427. kfree(buffer);
  428. fail_buffer:
  429. kfree(vblock);
  430. failed:
  431. dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
  432. return error;
  433. }
  434. static void vmu_flash_sync(struct mtd_info *mtd)
  435. {
  436. /* Do nothing here */
  437. }
  438. /* Maple bus callback function to recursively query hardware details */
  439. static void vmu_queryblocks(struct mapleq *mq)
  440. {
  441. struct maple_device *mdev;
  442. unsigned short *res;
  443. struct memcard *card;
  444. __be32 partnum;
  445. struct vmu_cache *pcache;
  446. struct mdev_part *mpart;
  447. struct mtd_info *mtd_cur;
  448. struct vmupart *part_cur;
  449. int error;
  450. mdev = mq->dev;
  451. card = maple_get_drvdata(mdev);
  452. res = (unsigned short *) (mq->recvbuf->buf);
  453. card->tempA = res[12];
  454. card->tempB = res[6];
  455. dev_info(&mdev->dev, "VMU device at partition %d has %d user "
  456. "blocks with a root block at %d\n", card->partition,
  457. card->tempA, card->tempB);
  458. part_cur = &card->parts[card->partition];
  459. part_cur->user_blocks = card->tempA;
  460. part_cur->root_block = card->tempB;
  461. part_cur->numblocks = card->tempB + 1;
  462. part_cur->name = kmalloc(12, GFP_KERNEL);
  463. if (!part_cur->name)
  464. goto fail_name;
  465. sprintf(part_cur->name, "vmu%d.%d.%d",
  466. mdev->port, mdev->unit, card->partition);
  467. mtd_cur = &card->mtd[card->partition];
  468. mtd_cur->name = part_cur->name;
  469. mtd_cur->type = 8;
  470. mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
  471. mtd_cur->size = part_cur->numblocks * card->blocklen;
  472. mtd_cur->erasesize = card->blocklen;
  473. mtd_cur->_write = vmu_flash_write;
  474. mtd_cur->_read = vmu_flash_read;
  475. mtd_cur->_sync = vmu_flash_sync;
  476. mtd_cur->writesize = card->blocklen;
  477. mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
  478. if (!mpart)
  479. goto fail_mpart;
  480. mpart->mdev = mdev;
  481. mpart->partition = card->partition;
  482. mtd_cur->priv = mpart;
  483. mtd_cur->owner = THIS_MODULE;
  484. pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
  485. if (!pcache)
  486. goto fail_cache_create;
  487. part_cur->pcache = pcache;
  488. error = mtd_device_register(mtd_cur, NULL, 0);
  489. if (error)
  490. goto fail_mtd_register;
  491. maple_getcond_callback(mdev, NULL, 0,
  492. MAPLE_FUNC_MEMCARD);
  493. /*
  494. * Set up a recursive call to the (probably theoretical)
  495. * second or more partition
  496. */
  497. if (++card->partition < card->partitions) {
  498. partnum = cpu_to_be32(card->partition << 24);
  499. maple_getcond_callback(mdev, vmu_queryblocks, 0,
  500. MAPLE_FUNC_MEMCARD);
  501. maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  502. MAPLE_COMMAND_GETMINFO, 2, &partnum);
  503. }
  504. return;
  505. fail_mtd_register:
  506. dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
  507. "error is 0x%X\n", mdev->port, mdev->unit, error);
  508. for (error = 0; error <= card->partition; error++) {
  509. kfree(((card->parts)[error]).pcache);
  510. ((card->parts)[error]).pcache = NULL;
  511. }
  512. fail_cache_create:
  513. fail_mpart:
  514. for (error = 0; error <= card->partition; error++) {
  515. kfree(((card->mtd)[error]).priv);
  516. ((card->mtd)[error]).priv = NULL;
  517. }
  518. maple_getcond_callback(mdev, NULL, 0,
  519. MAPLE_FUNC_MEMCARD);
  520. kfree(part_cur->name);
  521. fail_name:
  522. return;
  523. }
  524. /* Handles very basic info about the flash, queries for details */
  525. static int vmu_connect(struct maple_device *mdev)
  526. {
  527. unsigned long test_flash_data, basic_flash_data;
  528. int c, error;
  529. struct memcard *card;
  530. u32 partnum = 0;
  531. test_flash_data = be32_to_cpu(mdev->devinfo.function);
  532. /* Need to count how many bits are set - to find out which
  533. * function_data element has details of the memory card
  534. */
  535. c = hweight_long(test_flash_data);
  536. basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
  537. card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
  538. if (!card) {
  539. error = -ENOMEM;
  540. goto fail_nomem;
  541. }
  542. card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
  543. card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
  544. card->writecnt = basic_flash_data >> 12 & 0xF;
  545. card->readcnt = basic_flash_data >> 8 & 0xF;
  546. card->removable = basic_flash_data >> 7 & 1;
  547. card->partition = 0;
  548. /*
  549. * Not sure there are actually any multi-partition devices in the
  550. * real world, but the hardware supports them, so, so will we
  551. */
  552. card->parts = kmalloc_array(card->partitions, sizeof(struct vmupart),
  553. GFP_KERNEL);
  554. if (!card->parts) {
  555. error = -ENOMEM;
  556. goto fail_partitions;
  557. }
  558. card->mtd = kmalloc_array(card->partitions, sizeof(struct mtd_info),
  559. GFP_KERNEL);
  560. if (!card->mtd) {
  561. error = -ENOMEM;
  562. goto fail_mtd_info;
  563. }
  564. maple_set_drvdata(mdev, card);
  565. /*
  566. * We want to trap meminfo not get cond
  567. * so set interval to zero, but rely on maple bus
  568. * driver to pass back the results of the meminfo
  569. */
  570. maple_getcond_callback(mdev, vmu_queryblocks, 0,
  571. MAPLE_FUNC_MEMCARD);
  572. /* Make sure we are clear to go */
  573. if (atomic_read(&mdev->busy) == 1) {
  574. wait_event_interruptible_timeout(mdev->maple_wait,
  575. atomic_read(&mdev->busy) == 0, HZ);
  576. if (atomic_read(&mdev->busy) == 1) {
  577. dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
  578. mdev->port, mdev->unit);
  579. error = -EAGAIN;
  580. goto fail_device_busy;
  581. }
  582. }
  583. atomic_set(&mdev->busy, 1);
  584. /*
  585. * Set up the minfo call: vmu_queryblocks will handle
  586. * the information passed back
  587. */
  588. error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
  589. MAPLE_COMMAND_GETMINFO, 2, &partnum);
  590. if (error) {
  591. dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
  592. " error is 0x%X\n", mdev->port, mdev->unit, error);
  593. goto fail_mtd_info;
  594. }
  595. return 0;
  596. fail_device_busy:
  597. kfree(card->mtd);
  598. fail_mtd_info:
  599. kfree(card->parts);
  600. fail_partitions:
  601. kfree(card);
  602. fail_nomem:
  603. return error;
  604. }
  605. static void vmu_disconnect(struct maple_device *mdev)
  606. {
  607. struct memcard *card;
  608. struct mdev_part *mpart;
  609. int x;
  610. mdev->callback = NULL;
  611. card = maple_get_drvdata(mdev);
  612. for (x = 0; x < card->partitions; x++) {
  613. mpart = ((card->mtd)[x]).priv;
  614. mpart->mdev = NULL;
  615. mtd_device_unregister(&((card->mtd)[x]));
  616. kfree(((card->parts)[x]).name);
  617. }
  618. kfree(card->parts);
  619. kfree(card->mtd);
  620. kfree(card);
  621. }
  622. /* Callback to handle eccentricities of both mtd subsystem
  623. * and general flakyness of Dreamcast VMUs
  624. */
  625. static int vmu_can_unload(struct maple_device *mdev)
  626. {
  627. struct memcard *card;
  628. int x;
  629. struct mtd_info *mtd;
  630. card = maple_get_drvdata(mdev);
  631. for (x = 0; x < card->partitions; x++) {
  632. mtd = &((card->mtd)[x]);
  633. if (kref_read(&mtd->refcnt))
  634. return 0;
  635. }
  636. return 1;
  637. }
  638. #define ERRSTR "VMU at (%d, %d) file error -"
  639. static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
  640. {
  641. enum maple_file_errors error = ((int *)recvbuf)[1];
  642. switch (error) {
  643. case MAPLE_FILEERR_INVALID_PARTITION:
  644. dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
  645. mdev->port, mdev->unit);
  646. break;
  647. case MAPLE_FILEERR_PHASE_ERROR:
  648. dev_notice(&mdev->dev, ERRSTR " phase error\n",
  649. mdev->port, mdev->unit);
  650. break;
  651. case MAPLE_FILEERR_INVALID_BLOCK:
  652. dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
  653. mdev->port, mdev->unit);
  654. break;
  655. case MAPLE_FILEERR_WRITE_ERROR:
  656. dev_notice(&mdev->dev, ERRSTR " write error\n",
  657. mdev->port, mdev->unit);
  658. break;
  659. case MAPLE_FILEERR_INVALID_WRITE_LENGTH:
  660. dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
  661. mdev->port, mdev->unit);
  662. break;
  663. case MAPLE_FILEERR_BAD_CRC:
  664. dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
  665. mdev->port, mdev->unit);
  666. break;
  667. default:
  668. dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
  669. mdev->port, mdev->unit, error);
  670. }
  671. }
  672. static int probe_maple_vmu(struct device *dev)
  673. {
  674. struct maple_device *mdev = to_maple_dev(dev);
  675. struct maple_driver *mdrv = to_maple_driver(dev->driver);
  676. mdev->can_unload = vmu_can_unload;
  677. mdev->fileerr_handler = vmu_file_error;
  678. mdev->driver = mdrv;
  679. return vmu_connect(mdev);
  680. }
  681. static int remove_maple_vmu(struct device *dev)
  682. {
  683. struct maple_device *mdev = to_maple_dev(dev);
  684. vmu_disconnect(mdev);
  685. return 0;
  686. }
  687. static struct maple_driver vmu_flash_driver = {
  688. .function = MAPLE_FUNC_MEMCARD,
  689. .drv = {
  690. .name = "Dreamcast_visual_memory",
  691. .probe = probe_maple_vmu,
  692. .remove = remove_maple_vmu,
  693. },
  694. };
  695. static int __init vmu_flash_map_init(void)
  696. {
  697. return maple_driver_register(&vmu_flash_driver);
  698. }
  699. static void __exit vmu_flash_map_exit(void)
  700. {
  701. maple_driver_unregister(&vmu_flash_driver);
  702. }
  703. module_init(vmu_flash_map_init);
  704. module_exit(vmu_flash_map_exit);
  705. MODULE_LICENSE("GPL");
  706. MODULE_AUTHOR("Adrian McMenamin");
  707. MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");