vmur.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Linux driver for System z and s390 unit record devices
  4. * (z/VM virtual punch, reader, printer)
  5. *
  6. * Copyright IBM Corp. 2001, 2009
  7. * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
  8. * Michael Holzheu <holzheu@de.ibm.com>
  9. * Frank Munzert <munzert@de.ibm.com>
  10. */
  11. #define KMSG_COMPONENT "vmur"
  12. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13. #include <linux/cdev.h>
  14. #include <linux/slab.h>
  15. #include <linux/module.h>
  16. #include <linux/uaccess.h>
  17. #include <asm/cio.h>
  18. #include <asm/ccwdev.h>
  19. #include <asm/debug.h>
  20. #include <asm/diag.h>
  21. #include "vmur.h"
  22. /*
  23. * Driver overview
  24. *
  25. * Unit record device support is implemented as a character device driver.
  26. * We can fit at least 16 bits into a device minor number and use the
  27. * simple method of mapping a character device number with minor abcd
  28. * to the unit record device with devno abcd.
  29. * I/O to virtual unit record devices is handled as follows:
  30. * Reads: Diagnose code 0x14 (input spool file manipulation)
  31. * is used to read spool data page-wise.
  32. * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
  33. * is available by reading sysfs attr reclen. Each write() to the device
  34. * must specify an integral multiple (maximal 511) of reclen.
  35. */
  36. static char ur_banner[] = "z/VM virtual unit record device driver";
  37. MODULE_AUTHOR("IBM Corporation");
  38. MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
  39. MODULE_LICENSE("GPL");
  40. static dev_t ur_first_dev_maj_min;
  41. static struct class *vmur_class;
  42. static struct debug_info *vmur_dbf;
  43. /* We put the device's record length (for writes) in the driver_info field */
  44. static struct ccw_device_id ur_ids[] = {
  45. { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
  46. { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
  47. { /* end of list */ }
  48. };
  49. MODULE_DEVICE_TABLE(ccw, ur_ids);
  50. static int ur_probe(struct ccw_device *cdev);
  51. static void ur_remove(struct ccw_device *cdev);
  52. static int ur_set_online(struct ccw_device *cdev);
  53. static int ur_set_offline(struct ccw_device *cdev);
  54. static int ur_pm_suspend(struct ccw_device *cdev);
  55. static struct ccw_driver ur_driver = {
  56. .driver = {
  57. .name = "vmur",
  58. .owner = THIS_MODULE,
  59. },
  60. .ids = ur_ids,
  61. .probe = ur_probe,
  62. .remove = ur_remove,
  63. .set_online = ur_set_online,
  64. .set_offline = ur_set_offline,
  65. .freeze = ur_pm_suspend,
  66. .int_class = IRQIO_VMR,
  67. };
  68. static DEFINE_MUTEX(vmur_mutex);
  69. /*
  70. * Allocation, freeing, getting and putting of urdev structures
  71. *
  72. * Each ur device (urd) contains a reference to its corresponding ccw device
  73. * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
  74. * ur device using dev_get_drvdata(&cdev->dev) pointer.
  75. *
  76. * urd references:
  77. * - ur_probe gets a urd reference, ur_remove drops the reference
  78. * dev_get_drvdata(&cdev->dev)
  79. * - ur_open gets a urd reference, ur_release drops the reference
  80. * (urf->urd)
  81. *
  82. * cdev references:
  83. * - urdev_alloc get a cdev reference (urd->cdev)
  84. * - urdev_free drops the cdev reference (urd->cdev)
  85. *
  86. * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
  87. */
  88. static struct urdev *urdev_alloc(struct ccw_device *cdev)
  89. {
  90. struct urdev *urd;
  91. urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
  92. if (!urd)
  93. return NULL;
  94. urd->reclen = cdev->id.driver_info;
  95. ccw_device_get_id(cdev, &urd->dev_id);
  96. mutex_init(&urd->io_mutex);
  97. init_waitqueue_head(&urd->wait);
  98. spin_lock_init(&urd->open_lock);
  99. refcount_set(&urd->ref_count, 1);
  100. urd->cdev = cdev;
  101. get_device(&cdev->dev);
  102. return urd;
  103. }
  104. static void urdev_free(struct urdev *urd)
  105. {
  106. TRACE("urdev_free: %p\n", urd);
  107. if (urd->cdev)
  108. put_device(&urd->cdev->dev);
  109. kfree(urd);
  110. }
  111. static void urdev_get(struct urdev *urd)
  112. {
  113. refcount_inc(&urd->ref_count);
  114. }
  115. static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
  116. {
  117. struct urdev *urd;
  118. unsigned long flags;
  119. spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
  120. urd = dev_get_drvdata(&cdev->dev);
  121. if (urd)
  122. urdev_get(urd);
  123. spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
  124. return urd;
  125. }
  126. static struct urdev *urdev_get_from_devno(u16 devno)
  127. {
  128. char bus_id[16];
  129. struct ccw_device *cdev;
  130. struct urdev *urd;
  131. sprintf(bus_id, "0.0.%04x", devno);
  132. cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
  133. if (!cdev)
  134. return NULL;
  135. urd = urdev_get_from_cdev(cdev);
  136. put_device(&cdev->dev);
  137. return urd;
  138. }
  139. static void urdev_put(struct urdev *urd)
  140. {
  141. if (refcount_dec_and_test(&urd->ref_count))
  142. urdev_free(urd);
  143. }
  144. /*
  145. * State and contents of ur devices can be changed by class D users issuing
  146. * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended.
  147. * Also the Linux guest might be logged off, which causes all active spool
  148. * files to be closed.
  149. * So we cannot guarantee that spool files are still the same when the Linux
  150. * guest is resumed. In order to avoid unpredictable results at resume time
  151. * we simply refuse to suspend if a ur device node is open.
  152. */
  153. static int ur_pm_suspend(struct ccw_device *cdev)
  154. {
  155. struct urdev *urd = dev_get_drvdata(&cdev->dev);
  156. TRACE("ur_pm_suspend: cdev=%p\n", cdev);
  157. if (urd->open_flag) {
  158. pr_err("Unit record device %s is busy, %s refusing to "
  159. "suspend.\n", dev_name(&cdev->dev), ur_banner);
  160. return -EBUSY;
  161. }
  162. return 0;
  163. }
  164. /*
  165. * Low-level functions to do I/O to a ur device.
  166. * alloc_chan_prog
  167. * free_chan_prog
  168. * do_ur_io
  169. * ur_int_handler
  170. *
  171. * alloc_chan_prog allocates and builds the channel program
  172. * free_chan_prog frees memory of the channel program
  173. *
  174. * do_ur_io issues the channel program to the device and blocks waiting
  175. * on a completion event it publishes at urd->io_done. The function
  176. * serialises itself on the device's mutex so that only one I/O
  177. * is issued at a time (and that I/O is synchronous).
  178. *
  179. * ur_int_handler catches the "I/O done" interrupt, writes the
  180. * subchannel status word into the scsw member of the urdev structure
  181. * and complete()s the io_done to wake the waiting do_ur_io.
  182. *
  183. * The caller of do_ur_io is responsible for kfree()ing the channel program
  184. * address pointer that alloc_chan_prog returned.
  185. */
  186. static void free_chan_prog(struct ccw1 *cpa)
  187. {
  188. struct ccw1 *ptr = cpa;
  189. while (ptr->cda) {
  190. kfree((void *)(addr_t) ptr->cda);
  191. ptr++;
  192. }
  193. kfree(cpa);
  194. }
  195. /*
  196. * alloc_chan_prog
  197. * The channel program we use is write commands chained together
  198. * with a final NOP CCW command-chained on (which ensures that CE and DE
  199. * are presented together in a single interrupt instead of as separate
  200. * interrupts unless an incorrect length indication kicks in first). The
  201. * data length in each CCW is reclen.
  202. */
  203. static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
  204. int reclen)
  205. {
  206. struct ccw1 *cpa;
  207. void *kbuf;
  208. int i;
  209. TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
  210. /*
  211. * We chain a NOP onto the writes to force CE+DE together.
  212. * That means we allocate room for CCWs to cover count/reclen
  213. * records plus a NOP.
  214. */
  215. cpa = kcalloc(rec_count + 1, sizeof(struct ccw1),
  216. GFP_KERNEL | GFP_DMA);
  217. if (!cpa)
  218. return ERR_PTR(-ENOMEM);
  219. for (i = 0; i < rec_count; i++) {
  220. cpa[i].cmd_code = WRITE_CCW_CMD;
  221. cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
  222. cpa[i].count = reclen;
  223. kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
  224. if (!kbuf) {
  225. free_chan_prog(cpa);
  226. return ERR_PTR(-ENOMEM);
  227. }
  228. cpa[i].cda = (u32)(addr_t) kbuf;
  229. if (copy_from_user(kbuf, ubuf, reclen)) {
  230. free_chan_prog(cpa);
  231. return ERR_PTR(-EFAULT);
  232. }
  233. ubuf += reclen;
  234. }
  235. /* The following NOP CCW forces CE+DE to be presented together */
  236. cpa[i].cmd_code = CCW_CMD_NOOP;
  237. return cpa;
  238. }
  239. static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
  240. {
  241. int rc;
  242. struct ccw_device *cdev = urd->cdev;
  243. DECLARE_COMPLETION_ONSTACK(event);
  244. TRACE("do_ur_io: cpa=%p\n", cpa);
  245. rc = mutex_lock_interruptible(&urd->io_mutex);
  246. if (rc)
  247. return rc;
  248. urd->io_done = &event;
  249. spin_lock_irq(get_ccwdev_lock(cdev));
  250. rc = ccw_device_start(cdev, cpa, 1, 0, 0);
  251. spin_unlock_irq(get_ccwdev_lock(cdev));
  252. TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
  253. if (rc)
  254. goto out;
  255. wait_for_completion(&event);
  256. TRACE("do_ur_io: I/O complete\n");
  257. rc = 0;
  258. out:
  259. mutex_unlock(&urd->io_mutex);
  260. return rc;
  261. }
  262. /*
  263. * ur interrupt handler, called from the ccw_device layer
  264. */
  265. static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
  266. struct irb *irb)
  267. {
  268. struct urdev *urd;
  269. if (!IS_ERR(irb)) {
  270. TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
  271. intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
  272. irb->scsw.cmd.count);
  273. }
  274. if (!intparm) {
  275. TRACE("ur_int_handler: unsolicited interrupt\n");
  276. return;
  277. }
  278. urd = dev_get_drvdata(&cdev->dev);
  279. BUG_ON(!urd);
  280. /* On special conditions irb is an error pointer */
  281. if (IS_ERR(irb))
  282. urd->io_request_rc = PTR_ERR(irb);
  283. else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
  284. urd->io_request_rc = 0;
  285. else
  286. urd->io_request_rc = -EIO;
  287. complete(urd->io_done);
  288. }
  289. /*
  290. * reclen sysfs attribute - The record length to be used for write CCWs
  291. */
  292. static ssize_t ur_attr_reclen_show(struct device *dev,
  293. struct device_attribute *attr, char *buf)
  294. {
  295. struct urdev *urd;
  296. int rc;
  297. urd = urdev_get_from_cdev(to_ccwdev(dev));
  298. if (!urd)
  299. return -ENODEV;
  300. rc = sprintf(buf, "%zu\n", urd->reclen);
  301. urdev_put(urd);
  302. return rc;
  303. }
  304. static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
  305. static int ur_create_attributes(struct device *dev)
  306. {
  307. return device_create_file(dev, &dev_attr_reclen);
  308. }
  309. static void ur_remove_attributes(struct device *dev)
  310. {
  311. device_remove_file(dev, &dev_attr_reclen);
  312. }
  313. /*
  314. * diagnose code 0x210 - retrieve device information
  315. * cc=0 normal completion, we have a real device
  316. * cc=1 CP paging error
  317. * cc=2 The virtual device exists, but is not associated with a real device
  318. * cc=3 Invalid device address, or the virtual device does not exist
  319. */
  320. static int get_urd_class(struct urdev *urd)
  321. {
  322. static struct diag210 ur_diag210;
  323. int cc;
  324. ur_diag210.vrdcdvno = urd->dev_id.devno;
  325. ur_diag210.vrdclen = sizeof(struct diag210);
  326. cc = diag210(&ur_diag210);
  327. switch (cc) {
  328. case 0:
  329. return -EOPNOTSUPP;
  330. case 2:
  331. return ur_diag210.vrdcvcla; /* virtual device class */
  332. case 3:
  333. return -ENODEV;
  334. default:
  335. return -EIO;
  336. }
  337. }
  338. /*
  339. * Allocation and freeing of urfile structures
  340. */
  341. static struct urfile *urfile_alloc(struct urdev *urd)
  342. {
  343. struct urfile *urf;
  344. urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
  345. if (!urf)
  346. return NULL;
  347. urf->urd = urd;
  348. TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
  349. urf->dev_reclen);
  350. return urf;
  351. }
  352. static void urfile_free(struct urfile *urf)
  353. {
  354. TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
  355. kfree(urf);
  356. }
  357. /*
  358. * The fops implementation of the character device driver
  359. */
  360. static ssize_t do_write(struct urdev *urd, const char __user *udata,
  361. size_t count, size_t reclen, loff_t *ppos)
  362. {
  363. struct ccw1 *cpa;
  364. int rc;
  365. cpa = alloc_chan_prog(udata, count / reclen, reclen);
  366. if (IS_ERR(cpa))
  367. return PTR_ERR(cpa);
  368. rc = do_ur_io(urd, cpa);
  369. if (rc)
  370. goto fail_kfree_cpa;
  371. if (urd->io_request_rc) {
  372. rc = urd->io_request_rc;
  373. goto fail_kfree_cpa;
  374. }
  375. *ppos += count;
  376. rc = count;
  377. fail_kfree_cpa:
  378. free_chan_prog(cpa);
  379. return rc;
  380. }
  381. static ssize_t ur_write(struct file *file, const char __user *udata,
  382. size_t count, loff_t *ppos)
  383. {
  384. struct urfile *urf = file->private_data;
  385. TRACE("ur_write: count=%zu\n", count);
  386. if (count == 0)
  387. return 0;
  388. if (count % urf->dev_reclen)
  389. return -EINVAL; /* count must be a multiple of reclen */
  390. if (count > urf->dev_reclen * MAX_RECS_PER_IO)
  391. count = urf->dev_reclen * MAX_RECS_PER_IO;
  392. return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
  393. }
  394. /*
  395. * diagnose code 0x14 subcode 0x0028 - position spool file to designated
  396. * record
  397. * cc=0 normal completion
  398. * cc=2 no file active on the virtual reader or device not ready
  399. * cc=3 record specified is beyond EOF
  400. */
  401. static int diag_position_to_record(int devno, int record)
  402. {
  403. int cc;
  404. cc = diag14(record, devno, 0x28);
  405. switch (cc) {
  406. case 0:
  407. return 0;
  408. case 2:
  409. return -ENOMEDIUM;
  410. case 3:
  411. return -ENODATA; /* position beyond end of file */
  412. default:
  413. return -EIO;
  414. }
  415. }
  416. /*
  417. * diagnose code 0x14 subcode 0x0000 - read next spool file buffer
  418. * cc=0 normal completion
  419. * cc=1 EOF reached
  420. * cc=2 no file active on the virtual reader, and no file eligible
  421. * cc=3 file already active on the virtual reader or specified virtual
  422. * reader does not exist or is not a reader
  423. */
  424. static int diag_read_file(int devno, char *buf)
  425. {
  426. int cc;
  427. cc = diag14((unsigned long) buf, devno, 0x00);
  428. switch (cc) {
  429. case 0:
  430. return 0;
  431. case 1:
  432. return -ENODATA;
  433. case 2:
  434. return -ENOMEDIUM;
  435. default:
  436. return -EIO;
  437. }
  438. }
  439. static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
  440. loff_t *offs)
  441. {
  442. size_t len, copied, res;
  443. char *buf;
  444. int rc;
  445. u16 reclen;
  446. struct urdev *urd;
  447. urd = ((struct urfile *) file->private_data)->urd;
  448. reclen = ((struct urfile *) file->private_data)->file_reclen;
  449. rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
  450. if (rc == -ENODATA)
  451. return 0;
  452. if (rc)
  453. return rc;
  454. len = min((size_t) PAGE_SIZE, count);
  455. buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
  456. if (!buf)
  457. return -ENOMEM;
  458. copied = 0;
  459. res = (size_t) (*offs % PAGE_SIZE);
  460. do {
  461. rc = diag_read_file(urd->dev_id.devno, buf);
  462. if (rc == -ENODATA) {
  463. break;
  464. }
  465. if (rc)
  466. goto fail;
  467. if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
  468. *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
  469. len = min(count - copied, PAGE_SIZE - res);
  470. if (copy_to_user(ubuf + copied, buf + res, len)) {
  471. rc = -EFAULT;
  472. goto fail;
  473. }
  474. res = 0;
  475. copied += len;
  476. } while (copied != count);
  477. *offs += copied;
  478. rc = copied;
  479. fail:
  480. free_page((unsigned long) buf);
  481. return rc;
  482. }
  483. static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
  484. loff_t *offs)
  485. {
  486. struct urdev *urd;
  487. int rc;
  488. TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
  489. if (count == 0)
  490. return 0;
  491. urd = ((struct urfile *) file->private_data)->urd;
  492. rc = mutex_lock_interruptible(&urd->io_mutex);
  493. if (rc)
  494. return rc;
  495. rc = diag14_read(file, ubuf, count, offs);
  496. mutex_unlock(&urd->io_mutex);
  497. return rc;
  498. }
  499. /*
  500. * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
  501. * cc=0 normal completion
  502. * cc=1 no files on reader queue or no subsequent file
  503. * cc=2 spid specified is invalid
  504. */
  505. static int diag_read_next_file_info(struct file_control_block *buf, int spid)
  506. {
  507. int cc;
  508. cc = diag14((unsigned long) buf, spid, 0xfff);
  509. switch (cc) {
  510. case 0:
  511. return 0;
  512. default:
  513. return -ENODATA;
  514. }
  515. }
  516. static int verify_uri_device(struct urdev *urd)
  517. {
  518. struct file_control_block *fcb;
  519. char *buf;
  520. int rc;
  521. fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
  522. if (!fcb)
  523. return -ENOMEM;
  524. /* check for empty reader device (beginning of chain) */
  525. rc = diag_read_next_file_info(fcb, 0);
  526. if (rc)
  527. goto fail_free_fcb;
  528. /* if file is in hold status, we do not read it */
  529. if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
  530. rc = -EPERM;
  531. goto fail_free_fcb;
  532. }
  533. /* open file on virtual reader */
  534. buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
  535. if (!buf) {
  536. rc = -ENOMEM;
  537. goto fail_free_fcb;
  538. }
  539. rc = diag_read_file(urd->dev_id.devno, buf);
  540. if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
  541. goto fail_free_buf;
  542. /* check if the file on top of the queue is open now */
  543. rc = diag_read_next_file_info(fcb, 0);
  544. if (rc)
  545. goto fail_free_buf;
  546. if (!(fcb->file_stat & FLG_IN_USE)) {
  547. rc = -EMFILE;
  548. goto fail_free_buf;
  549. }
  550. rc = 0;
  551. fail_free_buf:
  552. free_page((unsigned long) buf);
  553. fail_free_fcb:
  554. kfree(fcb);
  555. return rc;
  556. }
  557. static int verify_device(struct urdev *urd)
  558. {
  559. switch (urd->class) {
  560. case DEV_CLASS_UR_O:
  561. return 0; /* no check needed here */
  562. case DEV_CLASS_UR_I:
  563. return verify_uri_device(urd);
  564. default:
  565. return -EOPNOTSUPP;
  566. }
  567. }
  568. static int get_uri_file_reclen(struct urdev *urd)
  569. {
  570. struct file_control_block *fcb;
  571. int rc;
  572. fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
  573. if (!fcb)
  574. return -ENOMEM;
  575. rc = diag_read_next_file_info(fcb, 0);
  576. if (rc)
  577. goto fail_free;
  578. if (fcb->file_stat & FLG_CP_DUMP)
  579. rc = 0;
  580. else
  581. rc = fcb->rec_len;
  582. fail_free:
  583. kfree(fcb);
  584. return rc;
  585. }
  586. static int get_file_reclen(struct urdev *urd)
  587. {
  588. switch (urd->class) {
  589. case DEV_CLASS_UR_O:
  590. return 0;
  591. case DEV_CLASS_UR_I:
  592. return get_uri_file_reclen(urd);
  593. default:
  594. return -EOPNOTSUPP;
  595. }
  596. }
  597. static int ur_open(struct inode *inode, struct file *file)
  598. {
  599. u16 devno;
  600. struct urdev *urd;
  601. struct urfile *urf;
  602. unsigned short accmode;
  603. int rc;
  604. accmode = file->f_flags & O_ACCMODE;
  605. if (accmode == O_RDWR)
  606. return -EACCES;
  607. /*
  608. * We treat the minor number as the devno of the ur device
  609. * to find in the driver tree.
  610. */
  611. devno = MINOR(file_inode(file)->i_rdev);
  612. urd = urdev_get_from_devno(devno);
  613. if (!urd) {
  614. rc = -ENXIO;
  615. goto out;
  616. }
  617. spin_lock(&urd->open_lock);
  618. while (urd->open_flag) {
  619. spin_unlock(&urd->open_lock);
  620. if (file->f_flags & O_NONBLOCK) {
  621. rc = -EBUSY;
  622. goto fail_put;
  623. }
  624. if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
  625. rc = -ERESTARTSYS;
  626. goto fail_put;
  627. }
  628. spin_lock(&urd->open_lock);
  629. }
  630. urd->open_flag++;
  631. spin_unlock(&urd->open_lock);
  632. TRACE("ur_open\n");
  633. if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
  634. ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
  635. TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
  636. rc = -EACCES;
  637. goto fail_unlock;
  638. }
  639. rc = verify_device(urd);
  640. if (rc)
  641. goto fail_unlock;
  642. urf = urfile_alloc(urd);
  643. if (!urf) {
  644. rc = -ENOMEM;
  645. goto fail_unlock;
  646. }
  647. urf->dev_reclen = urd->reclen;
  648. rc = get_file_reclen(urd);
  649. if (rc < 0)
  650. goto fail_urfile_free;
  651. urf->file_reclen = rc;
  652. file->private_data = urf;
  653. return 0;
  654. fail_urfile_free:
  655. urfile_free(urf);
  656. fail_unlock:
  657. spin_lock(&urd->open_lock);
  658. urd->open_flag--;
  659. spin_unlock(&urd->open_lock);
  660. fail_put:
  661. urdev_put(urd);
  662. out:
  663. return rc;
  664. }
  665. static int ur_release(struct inode *inode, struct file *file)
  666. {
  667. struct urfile *urf = file->private_data;
  668. TRACE("ur_release\n");
  669. spin_lock(&urf->urd->open_lock);
  670. urf->urd->open_flag--;
  671. spin_unlock(&urf->urd->open_lock);
  672. wake_up_interruptible(&urf->urd->wait);
  673. urdev_put(urf->urd);
  674. urfile_free(urf);
  675. return 0;
  676. }
  677. static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
  678. {
  679. if ((file->f_flags & O_ACCMODE) != O_RDONLY)
  680. return -ESPIPE; /* seek allowed only for reader */
  681. if (offset % PAGE_SIZE)
  682. return -ESPIPE; /* only multiples of 4K allowed */
  683. return no_seek_end_llseek(file, offset, whence);
  684. }
  685. static const struct file_operations ur_fops = {
  686. .owner = THIS_MODULE,
  687. .open = ur_open,
  688. .release = ur_release,
  689. .read = ur_read,
  690. .write = ur_write,
  691. .llseek = ur_llseek,
  692. };
  693. /*
  694. * ccw_device infrastructure:
  695. * ur_probe creates the struct urdev (with refcount = 1), the device
  696. * attributes, sets up the interrupt handler and validates the virtual
  697. * unit record device.
  698. * ur_remove removes the device attributes and drops the reference to
  699. * struct urdev.
  700. *
  701. * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized
  702. * by the vmur_mutex lock.
  703. *
  704. * urd->char_device is used as indication that the online function has
  705. * been completed successfully.
  706. */
  707. static int ur_probe(struct ccw_device *cdev)
  708. {
  709. struct urdev *urd;
  710. int rc;
  711. TRACE("ur_probe: cdev=%p\n", cdev);
  712. mutex_lock(&vmur_mutex);
  713. urd = urdev_alloc(cdev);
  714. if (!urd) {
  715. rc = -ENOMEM;
  716. goto fail_unlock;
  717. }
  718. rc = ur_create_attributes(&cdev->dev);
  719. if (rc) {
  720. rc = -ENOMEM;
  721. goto fail_urdev_put;
  722. }
  723. cdev->handler = ur_int_handler;
  724. /* validate virtual unit record device */
  725. urd->class = get_urd_class(urd);
  726. if (urd->class < 0) {
  727. rc = urd->class;
  728. goto fail_remove_attr;
  729. }
  730. if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
  731. rc = -EOPNOTSUPP;
  732. goto fail_remove_attr;
  733. }
  734. spin_lock_irq(get_ccwdev_lock(cdev));
  735. dev_set_drvdata(&cdev->dev, urd);
  736. spin_unlock_irq(get_ccwdev_lock(cdev));
  737. mutex_unlock(&vmur_mutex);
  738. return 0;
  739. fail_remove_attr:
  740. ur_remove_attributes(&cdev->dev);
  741. fail_urdev_put:
  742. urdev_put(urd);
  743. fail_unlock:
  744. mutex_unlock(&vmur_mutex);
  745. return rc;
  746. }
  747. static int ur_set_online(struct ccw_device *cdev)
  748. {
  749. struct urdev *urd;
  750. int minor, major, rc;
  751. char node_id[16];
  752. TRACE("ur_set_online: cdev=%p\n", cdev);
  753. mutex_lock(&vmur_mutex);
  754. urd = urdev_get_from_cdev(cdev);
  755. if (!urd) {
  756. /* ur_remove already deleted our urd */
  757. rc = -ENODEV;
  758. goto fail_unlock;
  759. }
  760. if (urd->char_device) {
  761. /* Another ur_set_online was faster */
  762. rc = -EBUSY;
  763. goto fail_urdev_put;
  764. }
  765. minor = urd->dev_id.devno;
  766. major = MAJOR(ur_first_dev_maj_min);
  767. urd->char_device = cdev_alloc();
  768. if (!urd->char_device) {
  769. rc = -ENOMEM;
  770. goto fail_urdev_put;
  771. }
  772. urd->char_device->ops = &ur_fops;
  773. urd->char_device->owner = ur_fops.owner;
  774. rc = cdev_add(urd->char_device, MKDEV(major, minor), 1);
  775. if (rc)
  776. goto fail_free_cdev;
  777. if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
  778. if (urd->class == DEV_CLASS_UR_I)
  779. sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
  780. if (urd->class == DEV_CLASS_UR_O)
  781. sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
  782. } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
  783. sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
  784. } else {
  785. rc = -EOPNOTSUPP;
  786. goto fail_free_cdev;
  787. }
  788. urd->device = device_create(vmur_class, &cdev->dev,
  789. urd->char_device->dev, NULL, "%s", node_id);
  790. if (IS_ERR(urd->device)) {
  791. rc = PTR_ERR(urd->device);
  792. TRACE("ur_set_online: device_create rc=%d\n", rc);
  793. goto fail_free_cdev;
  794. }
  795. urdev_put(urd);
  796. mutex_unlock(&vmur_mutex);
  797. return 0;
  798. fail_free_cdev:
  799. cdev_del(urd->char_device);
  800. urd->char_device = NULL;
  801. fail_urdev_put:
  802. urdev_put(urd);
  803. fail_unlock:
  804. mutex_unlock(&vmur_mutex);
  805. return rc;
  806. }
  807. static int ur_set_offline_force(struct ccw_device *cdev, int force)
  808. {
  809. struct urdev *urd;
  810. int rc;
  811. TRACE("ur_set_offline: cdev=%p\n", cdev);
  812. urd = urdev_get_from_cdev(cdev);
  813. if (!urd)
  814. /* ur_remove already deleted our urd */
  815. return -ENODEV;
  816. if (!urd->char_device) {
  817. /* Another ur_set_offline was faster */
  818. rc = -EBUSY;
  819. goto fail_urdev_put;
  820. }
  821. if (!force && (refcount_read(&urd->ref_count) > 2)) {
  822. /* There is still a user of urd (e.g. ur_open) */
  823. TRACE("ur_set_offline: BUSY\n");
  824. rc = -EBUSY;
  825. goto fail_urdev_put;
  826. }
  827. device_destroy(vmur_class, urd->char_device->dev);
  828. cdev_del(urd->char_device);
  829. urd->char_device = NULL;
  830. rc = 0;
  831. fail_urdev_put:
  832. urdev_put(urd);
  833. return rc;
  834. }
  835. static int ur_set_offline(struct ccw_device *cdev)
  836. {
  837. int rc;
  838. mutex_lock(&vmur_mutex);
  839. rc = ur_set_offline_force(cdev, 0);
  840. mutex_unlock(&vmur_mutex);
  841. return rc;
  842. }
  843. static void ur_remove(struct ccw_device *cdev)
  844. {
  845. unsigned long flags;
  846. TRACE("ur_remove\n");
  847. mutex_lock(&vmur_mutex);
  848. if (cdev->online)
  849. ur_set_offline_force(cdev, 1);
  850. ur_remove_attributes(&cdev->dev);
  851. spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
  852. urdev_put(dev_get_drvdata(&cdev->dev));
  853. dev_set_drvdata(&cdev->dev, NULL);
  854. spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
  855. mutex_unlock(&vmur_mutex);
  856. }
  857. /*
  858. * Module initialisation and cleanup
  859. */
  860. static int __init ur_init(void)
  861. {
  862. int rc;
  863. dev_t dev;
  864. if (!MACHINE_IS_VM) {
  865. pr_err("The %s cannot be loaded without z/VM\n",
  866. ur_banner);
  867. return -ENODEV;
  868. }
  869. vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
  870. if (!vmur_dbf)
  871. return -ENOMEM;
  872. rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
  873. if (rc)
  874. goto fail_free_dbf;
  875. debug_set_level(vmur_dbf, 6);
  876. vmur_class = class_create(THIS_MODULE, "vmur");
  877. if (IS_ERR(vmur_class)) {
  878. rc = PTR_ERR(vmur_class);
  879. goto fail_free_dbf;
  880. }
  881. rc = ccw_driver_register(&ur_driver);
  882. if (rc)
  883. goto fail_class_destroy;
  884. rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
  885. if (rc) {
  886. pr_err("Kernel function alloc_chrdev_region failed with "
  887. "error code %d\n", rc);
  888. goto fail_unregister_driver;
  889. }
  890. ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
  891. pr_info("%s loaded.\n", ur_banner);
  892. return 0;
  893. fail_unregister_driver:
  894. ccw_driver_unregister(&ur_driver);
  895. fail_class_destroy:
  896. class_destroy(vmur_class);
  897. fail_free_dbf:
  898. debug_unregister(vmur_dbf);
  899. return rc;
  900. }
  901. static void __exit ur_exit(void)
  902. {
  903. unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
  904. ccw_driver_unregister(&ur_driver);
  905. class_destroy(vmur_class);
  906. debug_unregister(vmur_dbf);
  907. pr_info("%s unloaded.\n", ur_banner);
  908. }
  909. module_init(ur_init);
  910. module_exit(ur_exit);