swim3.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Driver for the SWIM3 (Super Woz Integrated Machine 3)
  4. * floppy controller found on Power Macintoshes.
  5. *
  6. * Copyright (C) 1996 Paul Mackerras.
  7. */
  8. /*
  9. * TODO:
  10. * handle 2 drives
  11. * handle GCR disks
  12. */
  13. #undef DEBUG
  14. #include <linux/stddef.h>
  15. #include <linux/kernel.h>
  16. #include <linux/sched/signal.h>
  17. #include <linux/timer.h>
  18. #include <linux/delay.h>
  19. #include <linux/fd.h>
  20. #include <linux/ioctl.h>
  21. #include <linux/blk-mq.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/mutex.h>
  24. #include <linux/module.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/wait.h>
  27. #include <linux/major.h>
  28. #include <asm/io.h>
  29. #include <asm/dbdma.h>
  30. #include <asm/prom.h>
  31. #include <linux/uaccess.h>
  32. #include <asm/mediabay.h>
  33. #include <asm/machdep.h>
  34. #include <asm/pmac_feature.h>
  35. #define MAX_FLOPPIES 2
  36. static DEFINE_MUTEX(swim3_mutex);
  37. static struct gendisk *disks[MAX_FLOPPIES];
  38. enum swim_state {
  39. idle,
  40. locating,
  41. seeking,
  42. settling,
  43. do_transfer,
  44. jogging,
  45. available,
  46. revalidating,
  47. ejecting
  48. };
  49. #define REG(x) unsigned char x; char x ## _pad[15];
  50. /*
  51. * The names for these registers mostly represent speculation on my part.
  52. * It will be interesting to see how close they are to the names Apple uses.
  53. */
  54. struct swim3 {
  55. REG(data);
  56. REG(timer); /* counts down at 1MHz */
  57. REG(error);
  58. REG(mode);
  59. REG(select); /* controls CA0, CA1, CA2 and LSTRB signals */
  60. REG(setup);
  61. REG(control); /* writing bits clears them */
  62. REG(status); /* writing bits sets them in control */
  63. REG(intr);
  64. REG(nseek); /* # tracks to seek */
  65. REG(ctrack); /* current track number */
  66. REG(csect); /* current sector number */
  67. REG(gap3); /* size of gap 3 in track format */
  68. REG(sector); /* sector # to read or write */
  69. REG(nsect); /* # sectors to read or write */
  70. REG(intr_enable);
  71. };
  72. #define control_bic control
  73. #define control_bis status
  74. /* Bits in select register */
  75. #define CA_MASK 7
  76. #define LSTRB 8
  77. /* Bits in control register */
  78. #define DO_SEEK 0x80
  79. #define FORMAT 0x40
  80. #define SELECT 0x20
  81. #define WRITE_SECTORS 0x10
  82. #define DO_ACTION 0x08
  83. #define DRIVE2_ENABLE 0x04
  84. #define DRIVE_ENABLE 0x02
  85. #define INTR_ENABLE 0x01
  86. /* Bits in status register */
  87. #define FIFO_1BYTE 0x80
  88. #define FIFO_2BYTE 0x40
  89. #define ERROR 0x20
  90. #define DATA 0x08
  91. #define RDDATA 0x04
  92. #define INTR_PENDING 0x02
  93. #define MARK_BYTE 0x01
  94. /* Bits in intr and intr_enable registers */
  95. #define ERROR_INTR 0x20
  96. #define DATA_CHANGED 0x10
  97. #define TRANSFER_DONE 0x08
  98. #define SEEN_SECTOR 0x04
  99. #define SEEK_DONE 0x02
  100. #define TIMER_DONE 0x01
  101. /* Bits in error register */
  102. #define ERR_DATA_CRC 0x80
  103. #define ERR_ADDR_CRC 0x40
  104. #define ERR_OVERRUN 0x04
  105. #define ERR_UNDERRUN 0x01
  106. /* Bits in setup register */
  107. #define S_SW_RESET 0x80
  108. #define S_GCR_WRITE 0x40
  109. #define S_IBM_DRIVE 0x20
  110. #define S_TEST_MODE 0x10
  111. #define S_FCLK_DIV2 0x08
  112. #define S_GCR 0x04
  113. #define S_COPY_PROT 0x02
  114. #define S_INV_WDATA 0x01
  115. /* Select values for swim3_action */
  116. #define SEEK_POSITIVE 0
  117. #define SEEK_NEGATIVE 4
  118. #define STEP 1
  119. #define MOTOR_ON 2
  120. #define MOTOR_OFF 6
  121. #define INDEX 3
  122. #define EJECT 7
  123. #define SETMFM 9
  124. #define SETGCR 13
  125. /* Select values for swim3_select and swim3_readbit */
  126. #define STEP_DIR 0
  127. #define STEPPING 1
  128. #define MOTOR_ON 2
  129. #define RELAX 3 /* also eject in progress */
  130. #define READ_DATA_0 4
  131. #define ONEMEG_DRIVE 5
  132. #define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
  133. #define DRIVE_PRESENT 7
  134. #define DISK_IN 8
  135. #define WRITE_PROT 9
  136. #define TRACK_ZERO 10
  137. #define TACHO 11
  138. #define READ_DATA_1 12
  139. #define GCR_MODE 13
  140. #define SEEK_COMPLETE 14
  141. #define TWOMEG_MEDIA 15
  142. /* Definitions of values used in writing and formatting */
  143. #define DATA_ESCAPE 0x99
  144. #define GCR_SYNC_EXC 0x3f
  145. #define GCR_SYNC_CONV 0x80
  146. #define GCR_FIRST_MARK 0xd5
  147. #define GCR_SECOND_MARK 0xaa
  148. #define GCR_ADDR_MARK "\xd5\xaa\x00"
  149. #define GCR_DATA_MARK "\xd5\xaa\x0b"
  150. #define GCR_SLIP_BYTE "\x27\xaa"
  151. #define GCR_SELF_SYNC "\x3f\xbf\x1e\x34\x3c\x3f"
  152. #define DATA_99 "\x99\x99"
  153. #define MFM_ADDR_MARK "\x99\xa1\x99\xa1\x99\xa1\x99\xfe"
  154. #define MFM_INDEX_MARK "\x99\xc2\x99\xc2\x99\xc2\x99\xfc"
  155. #define MFM_GAP_LEN 12
  156. struct floppy_state {
  157. enum swim_state state;
  158. struct swim3 __iomem *swim3; /* hardware registers */
  159. struct dbdma_regs __iomem *dma; /* DMA controller registers */
  160. int swim3_intr; /* interrupt number for SWIM3 */
  161. int dma_intr; /* interrupt number for DMA channel */
  162. int cur_cyl; /* cylinder head is on, or -1 */
  163. int cur_sector; /* last sector we saw go past */
  164. int req_cyl; /* the cylinder for the current r/w request */
  165. int head; /* head number ditto */
  166. int req_sector; /* sector number ditto */
  167. int scount; /* # sectors we're transferring at present */
  168. int retries;
  169. int settle_time;
  170. int secpercyl; /* disk geometry information */
  171. int secpertrack;
  172. int total_secs;
  173. int write_prot; /* 1 if write-protected, 0 if not, -1 dunno */
  174. struct dbdma_cmd *dma_cmd;
  175. int ref_count;
  176. int expect_cyl;
  177. struct timer_list timeout;
  178. int timeout_pending;
  179. int ejected;
  180. wait_queue_head_t wait;
  181. int wanted;
  182. struct macio_dev *mdev;
  183. char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
  184. int index;
  185. struct request *cur_req;
  186. struct blk_mq_tag_set tag_set;
  187. };
  188. #define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
  189. #define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
  190. #define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
  191. #ifdef DEBUG
  192. #define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
  193. #else
  194. #define swim3_dbg(fmt, arg...) do { } while(0)
  195. #endif
  196. static struct floppy_state floppy_states[MAX_FLOPPIES];
  197. static int floppy_count = 0;
  198. static DEFINE_SPINLOCK(swim3_lock);
  199. static unsigned short write_preamble[] = {
  200. 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, /* gap field */
  201. 0, 0, 0, 0, 0, 0, /* sync field */
  202. 0x99a1, 0x99a1, 0x99a1, 0x99fb, /* data address mark */
  203. 0x990f /* no escape for 512 bytes */
  204. };
  205. static unsigned short write_postamble[] = {
  206. 0x9904, /* insert CRC */
  207. 0x4e4e, 0x4e4e,
  208. 0x9908, /* stop writing */
  209. 0, 0, 0, 0, 0, 0
  210. };
  211. static void seek_track(struct floppy_state *fs, int n);
  212. static void act(struct floppy_state *fs);
  213. static void scan_timeout(struct timer_list *t);
  214. static void seek_timeout(struct timer_list *t);
  215. static void settle_timeout(struct timer_list *t);
  216. static void xfer_timeout(struct timer_list *t);
  217. static irqreturn_t swim3_interrupt(int irq, void *dev_id);
  218. /*static void fd_dma_interrupt(int irq, void *dev_id);*/
  219. static int grab_drive(struct floppy_state *fs, enum swim_state state,
  220. int interruptible);
  221. static void release_drive(struct floppy_state *fs);
  222. static int fd_eject(struct floppy_state *fs);
  223. static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
  224. unsigned int cmd, unsigned long param);
  225. static int floppy_open(struct gendisk *disk, blk_mode_t mode);
  226. static unsigned int floppy_check_events(struct gendisk *disk,
  227. unsigned int clearing);
  228. static int floppy_revalidate(struct gendisk *disk);
  229. static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes)
  230. {
  231. struct request *req = fs->cur_req;
  232. swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
  233. err, nr_bytes, req);
  234. if (err)
  235. nr_bytes = blk_rq_cur_bytes(req);
  236. if (blk_update_request(req, err, nr_bytes))
  237. return true;
  238. __blk_mq_end_request(req, err);
  239. fs->cur_req = NULL;
  240. return false;
  241. }
  242. static void swim3_select(struct floppy_state *fs, int sel)
  243. {
  244. struct swim3 __iomem *sw = fs->swim3;
  245. out_8(&sw->select, RELAX);
  246. if (sel & 8)
  247. out_8(&sw->control_bis, SELECT);
  248. else
  249. out_8(&sw->control_bic, SELECT);
  250. out_8(&sw->select, sel & CA_MASK);
  251. }
  252. static void swim3_action(struct floppy_state *fs, int action)
  253. {
  254. struct swim3 __iomem *sw = fs->swim3;
  255. swim3_select(fs, action);
  256. udelay(1);
  257. out_8(&sw->select, sw->select | LSTRB);
  258. udelay(2);
  259. out_8(&sw->select, sw->select & ~LSTRB);
  260. udelay(1);
  261. }
  262. static int swim3_readbit(struct floppy_state *fs, int bit)
  263. {
  264. struct swim3 __iomem *sw = fs->swim3;
  265. int stat;
  266. swim3_select(fs, bit);
  267. udelay(1);
  268. stat = in_8(&sw->status);
  269. return (stat & DATA) == 0;
  270. }
  271. static blk_status_t swim3_queue_rq(struct blk_mq_hw_ctx *hctx,
  272. const struct blk_mq_queue_data *bd)
  273. {
  274. struct floppy_state *fs = hctx->queue->queuedata;
  275. struct request *req = bd->rq;
  276. unsigned long x;
  277. spin_lock_irq(&swim3_lock);
  278. if (fs->cur_req || fs->state != idle) {
  279. spin_unlock_irq(&swim3_lock);
  280. return BLK_STS_DEV_RESOURCE;
  281. }
  282. blk_mq_start_request(req);
  283. fs->cur_req = req;
  284. if (fs->mdev->media_bay &&
  285. check_media_bay(fs->mdev->media_bay) != MB_FD) {
  286. swim3_dbg("%s", " media bay absent, dropping req\n");
  287. swim3_end_request(fs, BLK_STS_IOERR, 0);
  288. goto out;
  289. }
  290. if (fs->ejected) {
  291. swim3_dbg("%s", " disk ejected\n");
  292. swim3_end_request(fs, BLK_STS_IOERR, 0);
  293. goto out;
  294. }
  295. if (rq_data_dir(req) == WRITE) {
  296. if (fs->write_prot < 0)
  297. fs->write_prot = swim3_readbit(fs, WRITE_PROT);
  298. if (fs->write_prot) {
  299. swim3_dbg("%s", " try to write, disk write protected\n");
  300. swim3_end_request(fs, BLK_STS_IOERR, 0);
  301. goto out;
  302. }
  303. }
  304. /*
  305. * Do not remove the cast. blk_rq_pos(req) is now a sector_t and can be
  306. * 64 bits, but it will never go past 32 bits for this driver anyway, so
  307. * we can safely cast it down and not have to do a 64/32 division
  308. */
  309. fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
  310. x = ((long)blk_rq_pos(req)) % fs->secpercyl;
  311. fs->head = x / fs->secpertrack;
  312. fs->req_sector = x % fs->secpertrack + 1;
  313. fs->state = do_transfer;
  314. fs->retries = 0;
  315. act(fs);
  316. out:
  317. spin_unlock_irq(&swim3_lock);
  318. return BLK_STS_OK;
  319. }
  320. static void set_timeout(struct floppy_state *fs, int nticks,
  321. void (*proc)(struct timer_list *t))
  322. {
  323. if (fs->timeout_pending)
  324. del_timer(&fs->timeout);
  325. fs->timeout.expires = jiffies + nticks;
  326. fs->timeout.function = proc;
  327. add_timer(&fs->timeout);
  328. fs->timeout_pending = 1;
  329. }
  330. static inline void scan_track(struct floppy_state *fs)
  331. {
  332. struct swim3 __iomem *sw = fs->swim3;
  333. swim3_select(fs, READ_DATA_0);
  334. in_8(&sw->intr); /* clear SEEN_SECTOR bit */
  335. in_8(&sw->error);
  336. out_8(&sw->intr_enable, SEEN_SECTOR);
  337. out_8(&sw->control_bis, DO_ACTION);
  338. /* enable intr when track found */
  339. set_timeout(fs, HZ, scan_timeout); /* enable timeout */
  340. }
  341. static inline void seek_track(struct floppy_state *fs, int n)
  342. {
  343. struct swim3 __iomem *sw = fs->swim3;
  344. if (n >= 0) {
  345. swim3_action(fs, SEEK_POSITIVE);
  346. sw->nseek = n;
  347. } else {
  348. swim3_action(fs, SEEK_NEGATIVE);
  349. sw->nseek = -n;
  350. }
  351. fs->expect_cyl = (fs->cur_cyl >= 0)? fs->cur_cyl + n: -1;
  352. swim3_select(fs, STEP);
  353. in_8(&sw->error);
  354. /* enable intr when seek finished */
  355. out_8(&sw->intr_enable, SEEK_DONE);
  356. out_8(&sw->control_bis, DO_SEEK);
  357. set_timeout(fs, 3*HZ, seek_timeout); /* enable timeout */
  358. fs->settle_time = 0;
  359. }
  360. /*
  361. * XXX: this is a horrible hack, but at least allows ppc32 to get
  362. * out of defining virt_to_bus, and this driver out of using the
  363. * deprecated block layer bounce buffering for highmem addresses
  364. * for no good reason.
  365. */
  366. static unsigned long swim3_phys_to_bus(phys_addr_t paddr)
  367. {
  368. return paddr + PCI_DRAM_OFFSET;
  369. }
  370. static phys_addr_t swim3_bio_phys(struct bio *bio)
  371. {
  372. return page_to_phys(bio_page(bio)) + bio_offset(bio);
  373. }
  374. static inline void init_dma(struct dbdma_cmd *cp, int cmd,
  375. phys_addr_t paddr, int count)
  376. {
  377. cp->req_count = cpu_to_le16(count);
  378. cp->command = cpu_to_le16(cmd);
  379. cp->phy_addr = cpu_to_le32(swim3_phys_to_bus(paddr));
  380. cp->xfer_status = 0;
  381. }
  382. static inline void setup_transfer(struct floppy_state *fs)
  383. {
  384. int n;
  385. struct swim3 __iomem *sw = fs->swim3;
  386. struct dbdma_cmd *cp = fs->dma_cmd;
  387. struct dbdma_regs __iomem *dr = fs->dma;
  388. struct request *req = fs->cur_req;
  389. if (blk_rq_cur_sectors(req) <= 0) {
  390. swim3_warn("%s", "Transfer 0 sectors ?\n");
  391. return;
  392. }
  393. if (rq_data_dir(req) == WRITE)
  394. n = 1;
  395. else {
  396. n = fs->secpertrack - fs->req_sector + 1;
  397. if (n > blk_rq_cur_sectors(req))
  398. n = blk_rq_cur_sectors(req);
  399. }
  400. swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
  401. fs->req_sector, fs->secpertrack, fs->head, n);
  402. fs->scount = n;
  403. swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
  404. out_8(&sw->sector, fs->req_sector);
  405. out_8(&sw->nsect, n);
  406. out_8(&sw->gap3, 0);
  407. out_le32(&dr->cmdptr, swim3_phys_to_bus(virt_to_phys(cp)));
  408. if (rq_data_dir(req) == WRITE) {
  409. /* Set up 3 dma commands: write preamble, data, postamble */
  410. init_dma(cp, OUTPUT_MORE, virt_to_phys(write_preamble),
  411. sizeof(write_preamble));
  412. ++cp;
  413. init_dma(cp, OUTPUT_MORE, swim3_bio_phys(req->bio), 512);
  414. ++cp;
  415. init_dma(cp, OUTPUT_LAST, virt_to_phys(write_postamble),
  416. sizeof(write_postamble));
  417. } else {
  418. init_dma(cp, INPUT_LAST, swim3_bio_phys(req->bio), n * 512);
  419. }
  420. ++cp;
  421. out_le16(&cp->command, DBDMA_STOP);
  422. out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
  423. in_8(&sw->error);
  424. out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
  425. if (rq_data_dir(req) == WRITE)
  426. out_8(&sw->control_bis, WRITE_SECTORS);
  427. in_8(&sw->intr);
  428. out_le32(&dr->control, (RUN << 16) | RUN);
  429. /* enable intr when transfer complete */
  430. out_8(&sw->intr_enable, TRANSFER_DONE);
  431. out_8(&sw->control_bis, DO_ACTION);
  432. set_timeout(fs, 2*HZ, xfer_timeout); /* enable timeout */
  433. }
  434. static void act(struct floppy_state *fs)
  435. {
  436. for (;;) {
  437. swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
  438. fs->state, fs->req_cyl, fs->cur_cyl);
  439. switch (fs->state) {
  440. case idle:
  441. return; /* XXX shouldn't get here */
  442. case locating:
  443. if (swim3_readbit(fs, TRACK_ZERO)) {
  444. swim3_dbg("%s", " locate track 0\n");
  445. fs->cur_cyl = 0;
  446. if (fs->req_cyl == 0)
  447. fs->state = do_transfer;
  448. else
  449. fs->state = seeking;
  450. break;
  451. }
  452. scan_track(fs);
  453. return;
  454. case seeking:
  455. if (fs->cur_cyl < 0) {
  456. fs->expect_cyl = -1;
  457. fs->state = locating;
  458. break;
  459. }
  460. if (fs->req_cyl == fs->cur_cyl) {
  461. swim3_warn("%s", "Whoops, seeking 0\n");
  462. fs->state = do_transfer;
  463. break;
  464. }
  465. seek_track(fs, fs->req_cyl - fs->cur_cyl);
  466. return;
  467. case settling:
  468. /* check for SEEK_COMPLETE after 30ms */
  469. fs->settle_time = (HZ + 32) / 33;
  470. set_timeout(fs, fs->settle_time, settle_timeout);
  471. return;
  472. case do_transfer:
  473. if (fs->cur_cyl != fs->req_cyl) {
  474. if (fs->retries > 5) {
  475. swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
  476. fs->req_cyl, fs->cur_cyl);
  477. swim3_end_request(fs, BLK_STS_IOERR, 0);
  478. fs->state = idle;
  479. return;
  480. }
  481. fs->state = seeking;
  482. break;
  483. }
  484. setup_transfer(fs);
  485. return;
  486. case jogging:
  487. seek_track(fs, -5);
  488. return;
  489. default:
  490. swim3_err("Unknown state %d\n", fs->state);
  491. return;
  492. }
  493. }
  494. }
  495. static void scan_timeout(struct timer_list *t)
  496. {
  497. struct floppy_state *fs = from_timer(fs, t, timeout);
  498. struct swim3 __iomem *sw = fs->swim3;
  499. unsigned long flags;
  500. swim3_dbg("* scan timeout, state=%d\n", fs->state);
  501. spin_lock_irqsave(&swim3_lock, flags);
  502. fs->timeout_pending = 0;
  503. out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
  504. out_8(&sw->select, RELAX);
  505. out_8(&sw->intr_enable, 0);
  506. fs->cur_cyl = -1;
  507. if (fs->retries > 5) {
  508. swim3_end_request(fs, BLK_STS_IOERR, 0);
  509. fs->state = idle;
  510. } else {
  511. fs->state = jogging;
  512. act(fs);
  513. }
  514. spin_unlock_irqrestore(&swim3_lock, flags);
  515. }
  516. static void seek_timeout(struct timer_list *t)
  517. {
  518. struct floppy_state *fs = from_timer(fs, t, timeout);
  519. struct swim3 __iomem *sw = fs->swim3;
  520. unsigned long flags;
  521. swim3_dbg("* seek timeout, state=%d\n", fs->state);
  522. spin_lock_irqsave(&swim3_lock, flags);
  523. fs->timeout_pending = 0;
  524. out_8(&sw->control_bic, DO_SEEK);
  525. out_8(&sw->select, RELAX);
  526. out_8(&sw->intr_enable, 0);
  527. swim3_err("%s", "Seek timeout\n");
  528. swim3_end_request(fs, BLK_STS_IOERR, 0);
  529. fs->state = idle;
  530. spin_unlock_irqrestore(&swim3_lock, flags);
  531. }
  532. static void settle_timeout(struct timer_list *t)
  533. {
  534. struct floppy_state *fs = from_timer(fs, t, timeout);
  535. struct swim3 __iomem *sw = fs->swim3;
  536. unsigned long flags;
  537. swim3_dbg("* settle timeout, state=%d\n", fs->state);
  538. spin_lock_irqsave(&swim3_lock, flags);
  539. fs->timeout_pending = 0;
  540. if (swim3_readbit(fs, SEEK_COMPLETE)) {
  541. out_8(&sw->select, RELAX);
  542. fs->state = locating;
  543. act(fs);
  544. goto unlock;
  545. }
  546. out_8(&sw->select, RELAX);
  547. if (fs->settle_time < 2*HZ) {
  548. ++fs->settle_time;
  549. set_timeout(fs, 1, settle_timeout);
  550. goto unlock;
  551. }
  552. swim3_err("%s", "Seek settle timeout\n");
  553. swim3_end_request(fs, BLK_STS_IOERR, 0);
  554. fs->state = idle;
  555. unlock:
  556. spin_unlock_irqrestore(&swim3_lock, flags);
  557. }
  558. static void xfer_timeout(struct timer_list *t)
  559. {
  560. struct floppy_state *fs = from_timer(fs, t, timeout);
  561. struct swim3 __iomem *sw = fs->swim3;
  562. struct dbdma_regs __iomem *dr = fs->dma;
  563. unsigned long flags;
  564. int n;
  565. swim3_dbg("* xfer timeout, state=%d\n", fs->state);
  566. spin_lock_irqsave(&swim3_lock, flags);
  567. fs->timeout_pending = 0;
  568. out_le32(&dr->control, RUN << 16);
  569. /* We must wait a bit for dbdma to stop */
  570. for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++)
  571. udelay(1);
  572. out_8(&sw->intr_enable, 0);
  573. out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
  574. out_8(&sw->select, RELAX);
  575. swim3_err("Timeout %sing sector %ld\n",
  576. (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
  577. (long)blk_rq_pos(fs->cur_req));
  578. swim3_end_request(fs, BLK_STS_IOERR, 0);
  579. fs->state = idle;
  580. spin_unlock_irqrestore(&swim3_lock, flags);
  581. }
  582. static irqreturn_t swim3_interrupt(int irq, void *dev_id)
  583. {
  584. struct floppy_state *fs = (struct floppy_state *) dev_id;
  585. struct swim3 __iomem *sw = fs->swim3;
  586. int intr, err, n;
  587. int stat, resid;
  588. struct dbdma_regs __iomem *dr;
  589. struct dbdma_cmd *cp;
  590. unsigned long flags;
  591. struct request *req = fs->cur_req;
  592. swim3_dbg("* interrupt, state=%d\n", fs->state);
  593. spin_lock_irqsave(&swim3_lock, flags);
  594. intr = in_8(&sw->intr);
  595. err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
  596. if ((intr & ERROR_INTR) && fs->state != do_transfer)
  597. swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
  598. fs->state, rq_data_dir(req), intr, err);
  599. switch (fs->state) {
  600. case locating:
  601. if (intr & SEEN_SECTOR) {
  602. out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
  603. out_8(&sw->select, RELAX);
  604. out_8(&sw->intr_enable, 0);
  605. del_timer(&fs->timeout);
  606. fs->timeout_pending = 0;
  607. if (sw->ctrack == 0xff) {
  608. swim3_err("%s", "Seen sector but cyl=ff?\n");
  609. fs->cur_cyl = -1;
  610. if (fs->retries > 5) {
  611. swim3_end_request(fs, BLK_STS_IOERR, 0);
  612. fs->state = idle;
  613. } else {
  614. fs->state = jogging;
  615. act(fs);
  616. }
  617. break;
  618. }
  619. fs->cur_cyl = sw->ctrack;
  620. fs->cur_sector = sw->csect;
  621. if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
  622. swim3_err("Expected cyl %d, got %d\n",
  623. fs->expect_cyl, fs->cur_cyl);
  624. fs->state = do_transfer;
  625. act(fs);
  626. }
  627. break;
  628. case seeking:
  629. case jogging:
  630. if (sw->nseek == 0) {
  631. out_8(&sw->control_bic, DO_SEEK);
  632. out_8(&sw->select, RELAX);
  633. out_8(&sw->intr_enable, 0);
  634. del_timer(&fs->timeout);
  635. fs->timeout_pending = 0;
  636. if (fs->state == seeking)
  637. ++fs->retries;
  638. fs->state = settling;
  639. act(fs);
  640. }
  641. break;
  642. case settling:
  643. out_8(&sw->intr_enable, 0);
  644. del_timer(&fs->timeout);
  645. fs->timeout_pending = 0;
  646. act(fs);
  647. break;
  648. case do_transfer:
  649. if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)
  650. break;
  651. out_8(&sw->intr_enable, 0);
  652. out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
  653. out_8(&sw->select, RELAX);
  654. del_timer(&fs->timeout);
  655. fs->timeout_pending = 0;
  656. dr = fs->dma;
  657. cp = fs->dma_cmd;
  658. if (rq_data_dir(req) == WRITE)
  659. ++cp;
  660. /*
  661. * Check that the main data transfer has finished.
  662. * On writing, the swim3 sometimes doesn't use
  663. * up all the bytes of the postamble, so we can still
  664. * see DMA active here. That doesn't matter as long
  665. * as all the sector data has been transferred.
  666. */
  667. if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {
  668. /* wait a little while for DMA to complete */
  669. for (n = 0; n < 100; ++n) {
  670. if (cp->xfer_status != 0)
  671. break;
  672. udelay(1);
  673. barrier();
  674. }
  675. }
  676. /* turn off DMA */
  677. out_le32(&dr->control, (RUN | PAUSE) << 16);
  678. stat = le16_to_cpu(cp->xfer_status);
  679. resid = le16_to_cpu(cp->res_count);
  680. if (intr & ERROR_INTR) {
  681. n = fs->scount - 1 - resid / 512;
  682. if (n > 0) {
  683. blk_update_request(req, 0, n << 9);
  684. fs->req_sector += n;
  685. }
  686. if (fs->retries < 5) {
  687. ++fs->retries;
  688. act(fs);
  689. } else {
  690. swim3_err("Error %sing block %ld (err=%x)\n",
  691. rq_data_dir(req) == WRITE? "writ": "read",
  692. (long)blk_rq_pos(req), err);
  693. swim3_end_request(fs, BLK_STS_IOERR, 0);
  694. fs->state = idle;
  695. }
  696. } else {
  697. if ((stat & ACTIVE) == 0 || resid != 0) {
  698. /* musta been an error */
  699. swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
  700. swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
  701. fs->state, rq_data_dir(req), intr, err);
  702. swim3_end_request(fs, BLK_STS_IOERR, 0);
  703. fs->state = idle;
  704. break;
  705. }
  706. fs->retries = 0;
  707. if (swim3_end_request(fs, 0, fs->scount << 9)) {
  708. fs->req_sector += fs->scount;
  709. if (fs->req_sector > fs->secpertrack) {
  710. fs->req_sector -= fs->secpertrack;
  711. if (++fs->head > 1) {
  712. fs->head = 0;
  713. ++fs->req_cyl;
  714. }
  715. }
  716. act(fs);
  717. } else
  718. fs->state = idle;
  719. }
  720. break;
  721. default:
  722. swim3_err("Don't know what to do in state %d\n", fs->state);
  723. }
  724. spin_unlock_irqrestore(&swim3_lock, flags);
  725. return IRQ_HANDLED;
  726. }
  727. /*
  728. static void fd_dma_interrupt(int irq, void *dev_id)
  729. {
  730. }
  731. */
  732. /* Called under the mutex to grab exclusive access to a drive */
  733. static int grab_drive(struct floppy_state *fs, enum swim_state state,
  734. int interruptible)
  735. {
  736. unsigned long flags;
  737. swim3_dbg("%s", "-> grab drive\n");
  738. spin_lock_irqsave(&swim3_lock, flags);
  739. if (fs->state != idle && fs->state != available) {
  740. ++fs->wanted;
  741. /* this will enable irqs in order to sleep */
  742. if (!interruptible)
  743. wait_event_lock_irq(fs->wait,
  744. fs->state == available,
  745. swim3_lock);
  746. else if (wait_event_interruptible_lock_irq(fs->wait,
  747. fs->state == available,
  748. swim3_lock)) {
  749. --fs->wanted;
  750. spin_unlock_irqrestore(&swim3_lock, flags);
  751. return -EINTR;
  752. }
  753. --fs->wanted;
  754. }
  755. fs->state = state;
  756. spin_unlock_irqrestore(&swim3_lock, flags);
  757. return 0;
  758. }
  759. static void release_drive(struct floppy_state *fs)
  760. {
  761. struct request_queue *q = disks[fs->index]->queue;
  762. unsigned long flags;
  763. swim3_dbg("%s", "-> release drive\n");
  764. spin_lock_irqsave(&swim3_lock, flags);
  765. fs->state = idle;
  766. spin_unlock_irqrestore(&swim3_lock, flags);
  767. blk_mq_freeze_queue(q);
  768. blk_mq_quiesce_queue(q);
  769. blk_mq_unquiesce_queue(q);
  770. blk_mq_unfreeze_queue(q);
  771. }
  772. static int fd_eject(struct floppy_state *fs)
  773. {
  774. int err, n;
  775. err = grab_drive(fs, ejecting, 1);
  776. if (err)
  777. return err;
  778. swim3_action(fs, EJECT);
  779. for (n = 20; n > 0; --n) {
  780. if (signal_pending(current)) {
  781. err = -EINTR;
  782. break;
  783. }
  784. swim3_select(fs, RELAX);
  785. schedule_timeout_interruptible(1);
  786. if (swim3_readbit(fs, DISK_IN) == 0)
  787. break;
  788. }
  789. swim3_select(fs, RELAX);
  790. udelay(150);
  791. fs->ejected = 1;
  792. release_drive(fs);
  793. return err;
  794. }
  795. static struct floppy_struct floppy_type =
  796. { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
  797. static int floppy_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
  798. unsigned int cmd, unsigned long param)
  799. {
  800. struct floppy_state *fs = bdev->bd_disk->private_data;
  801. int err;
  802. if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
  803. return -EPERM;
  804. if (fs->mdev->media_bay &&
  805. check_media_bay(fs->mdev->media_bay) != MB_FD)
  806. return -ENXIO;
  807. switch (cmd) {
  808. case FDEJECT:
  809. if (fs->ref_count != 1)
  810. return -EBUSY;
  811. err = fd_eject(fs);
  812. return err;
  813. case FDGETPRM:
  814. if (copy_to_user((void __user *) param, &floppy_type,
  815. sizeof(struct floppy_struct)))
  816. return -EFAULT;
  817. return 0;
  818. }
  819. return -ENOTTY;
  820. }
  821. static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
  822. unsigned int cmd, unsigned long param)
  823. {
  824. int ret;
  825. mutex_lock(&swim3_mutex);
  826. ret = floppy_locked_ioctl(bdev, mode, cmd, param);
  827. mutex_unlock(&swim3_mutex);
  828. return ret;
  829. }
  830. static int floppy_open(struct gendisk *disk, blk_mode_t mode)
  831. {
  832. struct floppy_state *fs = disk->private_data;
  833. struct swim3 __iomem *sw = fs->swim3;
  834. int n, err = 0;
  835. if (fs->ref_count == 0) {
  836. if (fs->mdev->media_bay &&
  837. check_media_bay(fs->mdev->media_bay) != MB_FD)
  838. return -ENXIO;
  839. out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
  840. out_8(&sw->control_bic, 0xff);
  841. out_8(&sw->mode, 0x95);
  842. udelay(10);
  843. out_8(&sw->intr_enable, 0);
  844. out_8(&sw->control_bis, DRIVE_ENABLE | INTR_ENABLE);
  845. swim3_action(fs, MOTOR_ON);
  846. fs->write_prot = -1;
  847. fs->cur_cyl = -1;
  848. for (n = 0; n < 2 * HZ; ++n) {
  849. if (n >= HZ/30 && swim3_readbit(fs, SEEK_COMPLETE))
  850. break;
  851. if (signal_pending(current)) {
  852. err = -EINTR;
  853. break;
  854. }
  855. swim3_select(fs, RELAX);
  856. schedule_timeout_interruptible(1);
  857. }
  858. if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0
  859. || swim3_readbit(fs, DISK_IN) == 0))
  860. err = -ENXIO;
  861. swim3_action(fs, SETMFM);
  862. swim3_select(fs, RELAX);
  863. } else if (fs->ref_count == -1 || mode & BLK_OPEN_EXCL)
  864. return -EBUSY;
  865. if (err == 0 && !(mode & BLK_OPEN_NDELAY) &&
  866. (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE))) {
  867. if (disk_check_media_change(disk))
  868. floppy_revalidate(disk);
  869. if (fs->ejected)
  870. err = -ENXIO;
  871. }
  872. if (err == 0 && (mode & BLK_OPEN_WRITE)) {
  873. if (fs->write_prot < 0)
  874. fs->write_prot = swim3_readbit(fs, WRITE_PROT);
  875. if (fs->write_prot)
  876. err = -EROFS;
  877. }
  878. if (err) {
  879. if (fs->ref_count == 0) {
  880. swim3_action(fs, MOTOR_OFF);
  881. out_8(&sw->control_bic, DRIVE_ENABLE | INTR_ENABLE);
  882. swim3_select(fs, RELAX);
  883. }
  884. return err;
  885. }
  886. if (mode & BLK_OPEN_EXCL)
  887. fs->ref_count = -1;
  888. else
  889. ++fs->ref_count;
  890. return 0;
  891. }
  892. static int floppy_unlocked_open(struct gendisk *disk, blk_mode_t mode)
  893. {
  894. int ret;
  895. mutex_lock(&swim3_mutex);
  896. ret = floppy_open(disk, mode);
  897. mutex_unlock(&swim3_mutex);
  898. return ret;
  899. }
  900. static void floppy_release(struct gendisk *disk)
  901. {
  902. struct floppy_state *fs = disk->private_data;
  903. struct swim3 __iomem *sw = fs->swim3;
  904. mutex_lock(&swim3_mutex);
  905. if (fs->ref_count > 0)
  906. --fs->ref_count;
  907. else if (fs->ref_count == -1)
  908. fs->ref_count = 0;
  909. if (fs->ref_count == 0) {
  910. swim3_action(fs, MOTOR_OFF);
  911. out_8(&sw->control_bic, 0xff);
  912. swim3_select(fs, RELAX);
  913. }
  914. mutex_unlock(&swim3_mutex);
  915. }
  916. static unsigned int floppy_check_events(struct gendisk *disk,
  917. unsigned int clearing)
  918. {
  919. struct floppy_state *fs = disk->private_data;
  920. return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0;
  921. }
  922. static int floppy_revalidate(struct gendisk *disk)
  923. {
  924. struct floppy_state *fs = disk->private_data;
  925. struct swim3 __iomem *sw;
  926. int ret, n;
  927. if (fs->mdev->media_bay &&
  928. check_media_bay(fs->mdev->media_bay) != MB_FD)
  929. return -ENXIO;
  930. sw = fs->swim3;
  931. grab_drive(fs, revalidating, 0);
  932. out_8(&sw->intr_enable, 0);
  933. out_8(&sw->control_bis, DRIVE_ENABLE);
  934. swim3_action(fs, MOTOR_ON); /* necessary? */
  935. fs->write_prot = -1;
  936. fs->cur_cyl = -1;
  937. mdelay(1);
  938. for (n = HZ; n > 0; --n) {
  939. if (swim3_readbit(fs, SEEK_COMPLETE))
  940. break;
  941. if (signal_pending(current))
  942. break;
  943. swim3_select(fs, RELAX);
  944. schedule_timeout_interruptible(1);
  945. }
  946. ret = swim3_readbit(fs, SEEK_COMPLETE) == 0
  947. || swim3_readbit(fs, DISK_IN) == 0;
  948. if (ret)
  949. swim3_action(fs, MOTOR_OFF);
  950. else {
  951. fs->ejected = 0;
  952. swim3_action(fs, SETMFM);
  953. }
  954. swim3_select(fs, RELAX);
  955. release_drive(fs);
  956. return ret;
  957. }
  958. static const struct block_device_operations floppy_fops = {
  959. .open = floppy_unlocked_open,
  960. .release = floppy_release,
  961. .ioctl = floppy_ioctl,
  962. .check_events = floppy_check_events,
  963. };
  964. static const struct blk_mq_ops swim3_mq_ops = {
  965. .queue_rq = swim3_queue_rq,
  966. };
  967. static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
  968. {
  969. struct floppy_state *fs = macio_get_drvdata(mdev);
  970. struct swim3 __iomem *sw;
  971. if (!fs)
  972. return;
  973. sw = fs->swim3;
  974. if (mb_state != MB_FD)
  975. return;
  976. /* Clear state */
  977. out_8(&sw->intr_enable, 0);
  978. in_8(&sw->intr);
  979. in_8(&sw->error);
  980. }
  981. static int swim3_add_device(struct macio_dev *mdev, int index)
  982. {
  983. struct device_node *swim = mdev->ofdev.dev.of_node;
  984. struct floppy_state *fs = &floppy_states[index];
  985. int rc = -EBUSY;
  986. fs->mdev = mdev;
  987. fs->index = index;
  988. /* Check & Request resources */
  989. if (macio_resource_count(mdev) < 2) {
  990. swim3_err("%s", "No address in device-tree\n");
  991. return -ENXIO;
  992. }
  993. if (macio_irq_count(mdev) < 1) {
  994. swim3_err("%s", "No interrupt in device-tree\n");
  995. return -ENXIO;
  996. }
  997. if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
  998. swim3_err("%s", "Can't request mmio resource\n");
  999. return -EBUSY;
  1000. }
  1001. if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
  1002. swim3_err("%s", "Can't request dma resource\n");
  1003. macio_release_resource(mdev, 0);
  1004. return -EBUSY;
  1005. }
  1006. dev_set_drvdata(&mdev->ofdev.dev, fs);
  1007. if (mdev->media_bay == NULL)
  1008. pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
  1009. fs->state = idle;
  1010. fs->swim3 = (struct swim3 __iomem *)
  1011. ioremap(macio_resource_start(mdev, 0), 0x200);
  1012. if (fs->swim3 == NULL) {
  1013. swim3_err("%s", "Couldn't map mmio registers\n");
  1014. rc = -ENOMEM;
  1015. goto out_release;
  1016. }
  1017. fs->dma = (struct dbdma_regs __iomem *)
  1018. ioremap(macio_resource_start(mdev, 1), 0x200);
  1019. if (fs->dma == NULL) {
  1020. swim3_err("%s", "Couldn't map dma registers\n");
  1021. iounmap(fs->swim3);
  1022. rc = -ENOMEM;
  1023. goto out_release;
  1024. }
  1025. fs->swim3_intr = macio_irq(mdev, 0);
  1026. fs->dma_intr = macio_irq(mdev, 1);
  1027. fs->cur_cyl = -1;
  1028. fs->cur_sector = -1;
  1029. fs->secpercyl = 36;
  1030. fs->secpertrack = 18;
  1031. fs->total_secs = 2880;
  1032. init_waitqueue_head(&fs->wait);
  1033. fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
  1034. memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
  1035. fs->dma_cmd[1].command = cpu_to_le16(DBDMA_STOP);
  1036. if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
  1037. swim3_mb_event(mdev, MB_FD);
  1038. if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
  1039. swim3_err("%s", "Couldn't request interrupt\n");
  1040. pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
  1041. goto out_unmap;
  1042. }
  1043. timer_setup(&fs->timeout, NULL, 0);
  1044. swim3_info("SWIM3 floppy controller %s\n",
  1045. mdev->media_bay ? "in media bay" : "");
  1046. return 0;
  1047. out_unmap:
  1048. iounmap(fs->dma);
  1049. iounmap(fs->swim3);
  1050. out_release:
  1051. macio_release_resource(mdev, 0);
  1052. macio_release_resource(mdev, 1);
  1053. return rc;
  1054. }
  1055. static int swim3_attach(struct macio_dev *mdev,
  1056. const struct of_device_id *match)
  1057. {
  1058. struct queue_limits lim = {
  1059. .features = BLK_FEAT_ROTATIONAL,
  1060. };
  1061. struct floppy_state *fs;
  1062. struct gendisk *disk;
  1063. int rc;
  1064. if (floppy_count >= MAX_FLOPPIES)
  1065. return -ENXIO;
  1066. if (floppy_count == 0) {
  1067. rc = register_blkdev(FLOPPY_MAJOR, "fd");
  1068. if (rc)
  1069. return rc;
  1070. }
  1071. fs = &floppy_states[floppy_count];
  1072. memset(fs, 0, sizeof(*fs));
  1073. rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2,
  1074. BLK_MQ_F_SHOULD_MERGE);
  1075. if (rc)
  1076. goto out_unregister;
  1077. disk = blk_mq_alloc_disk(&fs->tag_set, &lim, fs);
  1078. if (IS_ERR(disk)) {
  1079. rc = PTR_ERR(disk);
  1080. goto out_free_tag_set;
  1081. }
  1082. rc = swim3_add_device(mdev, floppy_count);
  1083. if (rc)
  1084. goto out_cleanup_disk;
  1085. disk->major = FLOPPY_MAJOR;
  1086. disk->first_minor = floppy_count;
  1087. disk->minors = 1;
  1088. disk->fops = &floppy_fops;
  1089. disk->private_data = fs;
  1090. disk->events = DISK_EVENT_MEDIA_CHANGE;
  1091. disk->flags |= GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
  1092. sprintf(disk->disk_name, "fd%d", floppy_count);
  1093. set_capacity(disk, 2880);
  1094. rc = add_disk(disk);
  1095. if (rc)
  1096. goto out_cleanup_disk;
  1097. disks[floppy_count++] = disk;
  1098. return 0;
  1099. out_cleanup_disk:
  1100. put_disk(disk);
  1101. out_free_tag_set:
  1102. blk_mq_free_tag_set(&fs->tag_set);
  1103. out_unregister:
  1104. if (floppy_count == 0)
  1105. unregister_blkdev(FLOPPY_MAJOR, "fd");
  1106. return rc;
  1107. }
  1108. static const struct of_device_id swim3_match[] =
  1109. {
  1110. {
  1111. .name = "swim3",
  1112. },
  1113. {
  1114. .compatible = "ohare-swim3"
  1115. },
  1116. {
  1117. .compatible = "swim3"
  1118. },
  1119. { /* end of list */ }
  1120. };
  1121. static struct macio_driver swim3_driver =
  1122. {
  1123. .driver = {
  1124. .name = "swim3",
  1125. .of_match_table = swim3_match,
  1126. },
  1127. .probe = swim3_attach,
  1128. #ifdef CONFIG_PMAC_MEDIABAY
  1129. .mediabay_event = swim3_mb_event,
  1130. #endif
  1131. #if 0
  1132. .suspend = swim3_suspend,
  1133. .resume = swim3_resume,
  1134. #endif
  1135. };
  1136. static int swim3_init(void)
  1137. {
  1138. macio_register_driver(&swim3_driver);
  1139. return 0;
  1140. }
  1141. module_init(swim3_init)
  1142. MODULE_LICENSE("GPL");
  1143. MODULE_AUTHOR("Paul Mackerras");
  1144. MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);