dasd_eer.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Character device driver for extended error reporting.
  4. *
  5. * Copyright IBM Corp. 2005
  6. * extended error reporting for DASD ECKD devices
  7. * Author(s): Stefan Weinhuber <wein@de.ibm.com>
  8. */
  9. #include <linux/init.h>
  10. #include <linux/fs.h>
  11. #include <linux/kernel.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/module.h>
  14. #include <linux/moduleparam.h>
  15. #include <linux/device.h>
  16. #include <linux/poll.h>
  17. #include <linux/mutex.h>
  18. #include <linux/err.h>
  19. #include <linux/slab.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/atomic.h>
  22. #include <asm/ebcdic.h>
  23. #include "dasd_int.h"
  24. #include "dasd_eckd.h"
  25. /*
  26. * SECTION: the internal buffer
  27. */
  28. /*
  29. * The internal buffer is meant to store obaque blobs of data, so it does
  30. * not know of higher level concepts like triggers.
  31. * It consists of a number of pages that are used as a ringbuffer. Each data
  32. * blob is stored in a simple record that consists of an integer, which
  33. * contains the size of the following data, and the data bytes themselfes.
  34. *
  35. * To allow for multiple independent readers we create one internal buffer
  36. * each time the device is opened and destroy the buffer when the file is
  37. * closed again. The number of pages used for this buffer is determined by
  38. * the module parmeter eer_pages.
  39. *
  40. * One record can be written to a buffer by using the functions
  41. * - dasd_eer_start_record (one time per record to write the size to the
  42. * buffer and reserve the space for the data)
  43. * - dasd_eer_write_buffer (one or more times per record to write the data)
  44. * The data can be written in several steps but you will have to compute
  45. * the total size up front for the invocation of dasd_eer_start_record.
  46. * If the ringbuffer is full, dasd_eer_start_record will remove the required
  47. * number of old records.
  48. *
  49. * A record is typically read in two steps, first read the integer that
  50. * specifies the size of the following data, then read the data.
  51. * Both can be done by
  52. * - dasd_eer_read_buffer
  53. *
  54. * For all mentioned functions you need to get the bufferlock first and keep
  55. * it until a complete record is written or read.
  56. *
  57. * All information necessary to keep track of an internal buffer is kept in
  58. * a struct eerbuffer. The buffer specific to a file pointer is strored in
  59. * the private_data field of that file. To be able to write data to all
  60. * existing buffers, each buffer is also added to the bufferlist.
  61. * If the user does not want to read a complete record in one go, we have to
  62. * keep track of the rest of the record. residual stores the number of bytes
  63. * that are still to deliver. If the rest of the record is invalidated between
  64. * two reads then residual will be set to -1 so that the next read will fail.
  65. * All entries in the eerbuffer structure are protected with the bufferlock.
  66. * To avoid races between writing to a buffer on the one side and creating
  67. * and destroying buffers on the other side, the bufferlock must also be used
  68. * to protect the bufferlist.
  69. */
  70. static int eer_pages = 5;
  71. module_param(eer_pages, int, S_IRUGO|S_IWUSR);
  72. struct eerbuffer {
  73. struct list_head list;
  74. char **buffer;
  75. int buffersize;
  76. int buffer_page_count;
  77. int head;
  78. int tail;
  79. int residual;
  80. };
  81. static LIST_HEAD(bufferlist);
  82. static DEFINE_SPINLOCK(bufferlock);
  83. static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
  84. /*
  85. * How many free bytes are available on the buffer.
  86. * Needs to be called with bufferlock held.
  87. */
  88. static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
  89. {
  90. if (eerb->head < eerb->tail)
  91. return eerb->tail - eerb->head - 1;
  92. return eerb->buffersize - eerb->head + eerb->tail -1;
  93. }
  94. /*
  95. * How many bytes of buffer space are used.
  96. * Needs to be called with bufferlock held.
  97. */
  98. static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
  99. {
  100. if (eerb->head >= eerb->tail)
  101. return eerb->head - eerb->tail;
  102. return eerb->buffersize - eerb->tail + eerb->head;
  103. }
  104. /*
  105. * The dasd_eer_write_buffer function just copies count bytes of data
  106. * to the buffer. Make sure to call dasd_eer_start_record first, to
  107. * make sure that enough free space is available.
  108. * Needs to be called with bufferlock held.
  109. */
  110. static void dasd_eer_write_buffer(struct eerbuffer *eerb,
  111. char *data, int count)
  112. {
  113. unsigned long headindex,localhead;
  114. unsigned long rest, len;
  115. char *nextdata;
  116. nextdata = data;
  117. rest = count;
  118. while (rest > 0) {
  119. headindex = eerb->head / PAGE_SIZE;
  120. localhead = eerb->head % PAGE_SIZE;
  121. len = min(rest, PAGE_SIZE - localhead);
  122. memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
  123. nextdata += len;
  124. rest -= len;
  125. eerb->head += len;
  126. if (eerb->head == eerb->buffersize)
  127. eerb->head = 0; /* wrap around */
  128. BUG_ON(eerb->head > eerb->buffersize);
  129. }
  130. }
  131. /*
  132. * Needs to be called with bufferlock held.
  133. */
  134. static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
  135. {
  136. unsigned long tailindex,localtail;
  137. unsigned long rest, len, finalcount;
  138. char *nextdata;
  139. finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
  140. nextdata = data;
  141. rest = finalcount;
  142. while (rest > 0) {
  143. tailindex = eerb->tail / PAGE_SIZE;
  144. localtail = eerb->tail % PAGE_SIZE;
  145. len = min(rest, PAGE_SIZE - localtail);
  146. memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
  147. nextdata += len;
  148. rest -= len;
  149. eerb->tail += len;
  150. if (eerb->tail == eerb->buffersize)
  151. eerb->tail = 0; /* wrap around */
  152. BUG_ON(eerb->tail > eerb->buffersize);
  153. }
  154. return finalcount;
  155. }
  156. /*
  157. * Whenever you want to write a blob of data to the internal buffer you
  158. * have to start by using this function first. It will write the number
  159. * of bytes that will be written to the buffer. If necessary it will remove
  160. * old records to make room for the new one.
  161. * Needs to be called with bufferlock held.
  162. */
  163. static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
  164. {
  165. int tailcount;
  166. if (count + sizeof(count) > eerb->buffersize)
  167. return -ENOMEM;
  168. while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
  169. if (eerb->residual > 0) {
  170. eerb->tail += eerb->residual;
  171. if (eerb->tail >= eerb->buffersize)
  172. eerb->tail -= eerb->buffersize;
  173. eerb->residual = -1;
  174. }
  175. dasd_eer_read_buffer(eerb, (char *) &tailcount,
  176. sizeof(tailcount));
  177. eerb->tail += tailcount;
  178. if (eerb->tail >= eerb->buffersize)
  179. eerb->tail -= eerb->buffersize;
  180. }
  181. dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
  182. return 0;
  183. };
  184. /*
  185. * Release pages that are not used anymore.
  186. */
  187. static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
  188. {
  189. int i;
  190. for (i = 0; i < no_pages; i++)
  191. free_page((unsigned long) buf[i]);
  192. }
  193. /*
  194. * Allocate a new set of memory pages.
  195. */
  196. static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
  197. {
  198. int i;
  199. for (i = 0; i < no_pages; i++) {
  200. buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
  201. if (!buf[i]) {
  202. dasd_eer_free_buffer_pages(buf, i);
  203. return -ENOMEM;
  204. }
  205. }
  206. return 0;
  207. }
  208. /*
  209. * SECTION: The extended error reporting functionality
  210. */
  211. /*
  212. * When a DASD device driver wants to report an error, it calls the
  213. * function dasd_eer_write and gives the respective trigger ID as
  214. * parameter. Currently there are four kinds of triggers:
  215. *
  216. * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
  217. * DASD_EER_PPRCSUSPEND: PPRC was suspended
  218. * DASD_EER_NOPATH: There is no path to the device left.
  219. * DASD_EER_STATECHANGE: The state of the device has changed.
  220. *
  221. * For the first three triggers all required information can be supplied by
  222. * the caller. For these triggers a record is written by the function
  223. * dasd_eer_write_standard_trigger.
  224. *
  225. * The DASD_EER_STATECHANGE trigger is special since a sense subsystem
  226. * status ccw need to be executed to gather the necessary sense data first.
  227. * The dasd_eer_snss function will queue the SNSS request and the request
  228. * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
  229. * trigger.
  230. *
  231. * To avoid memory allocations at runtime, the necessary memory is allocated
  232. * when the extended error reporting is enabled for a device (by
  233. * dasd_eer_probe). There is one sense subsystem status request for each
  234. * eer enabled DASD device. The presence of the cqr in device->eer_cqr
  235. * indicates that eer is enable for the device. The use of the snss request
  236. * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
  237. * that the cqr is currently in use, dasd_eer_snss cannot start a second
  238. * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
  239. * the SNSS request will check the bit and call dasd_eer_snss again.
  240. */
  241. #define SNSS_DATA_SIZE 44
  242. #define DASD_EER_BUSID_SIZE 10
  243. struct dasd_eer_header {
  244. __u32 total_size;
  245. __u32 trigger;
  246. __u64 tv_sec;
  247. __u64 tv_usec;
  248. char busid[DASD_EER_BUSID_SIZE];
  249. } __attribute__ ((packed));
  250. /*
  251. * The following function can be used for those triggers that have
  252. * all necessary data available when the function is called.
  253. * If the parameter cqr is not NULL, the chain of requests will be searched
  254. * for valid sense data, and all valid sense data sets will be added to
  255. * the triggers data.
  256. */
  257. static void dasd_eer_write_standard_trigger(struct dasd_device *device,
  258. struct dasd_ccw_req *cqr,
  259. int trigger)
  260. {
  261. struct dasd_ccw_req *temp_cqr;
  262. int data_size;
  263. struct timespec64 ts;
  264. struct dasd_eer_header header;
  265. unsigned long flags;
  266. struct eerbuffer *eerb;
  267. char *sense;
  268. /* go through cqr chain and count the valid sense data sets */
  269. data_size = 0;
  270. for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
  271. if (dasd_get_sense(&temp_cqr->irb))
  272. data_size += 32;
  273. header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
  274. header.trigger = trigger;
  275. ktime_get_real_ts64(&ts);
  276. header.tv_sec = ts.tv_sec;
  277. header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
  278. strscpy(header.busid, dev_name(&device->cdev->dev),
  279. DASD_EER_BUSID_SIZE);
  280. spin_lock_irqsave(&bufferlock, flags);
  281. list_for_each_entry(eerb, &bufferlist, list) {
  282. dasd_eer_start_record(eerb, header.total_size);
  283. dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
  284. for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
  285. sense = dasd_get_sense(&temp_cqr->irb);
  286. if (sense)
  287. dasd_eer_write_buffer(eerb, sense, 32);
  288. }
  289. dasd_eer_write_buffer(eerb, "EOR", 4);
  290. }
  291. spin_unlock_irqrestore(&bufferlock, flags);
  292. wake_up_interruptible(&dasd_eer_read_wait_queue);
  293. }
  294. /*
  295. * This function writes a DASD_EER_STATECHANGE trigger.
  296. */
  297. static void dasd_eer_write_snss_trigger(struct dasd_device *device,
  298. struct dasd_ccw_req *cqr,
  299. int trigger)
  300. {
  301. int data_size;
  302. int snss_rc;
  303. struct timespec64 ts;
  304. struct dasd_eer_header header;
  305. unsigned long flags;
  306. struct eerbuffer *eerb;
  307. snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
  308. if (snss_rc)
  309. data_size = 0;
  310. else
  311. data_size = SNSS_DATA_SIZE;
  312. header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
  313. header.trigger = DASD_EER_STATECHANGE;
  314. ktime_get_real_ts64(&ts);
  315. header.tv_sec = ts.tv_sec;
  316. header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
  317. strscpy(header.busid, dev_name(&device->cdev->dev),
  318. DASD_EER_BUSID_SIZE);
  319. spin_lock_irqsave(&bufferlock, flags);
  320. list_for_each_entry(eerb, &bufferlist, list) {
  321. dasd_eer_start_record(eerb, header.total_size);
  322. dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
  323. if (!snss_rc)
  324. dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
  325. dasd_eer_write_buffer(eerb, "EOR", 4);
  326. }
  327. spin_unlock_irqrestore(&bufferlock, flags);
  328. wake_up_interruptible(&dasd_eer_read_wait_queue);
  329. }
  330. /*
  331. * This function is called for all triggers. It calls the appropriate
  332. * function that writes the actual trigger records.
  333. */
  334. void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
  335. unsigned int id)
  336. {
  337. if (!device->eer_cqr)
  338. return;
  339. switch (id) {
  340. case DASD_EER_FATALERROR:
  341. case DASD_EER_PPRCSUSPEND:
  342. dasd_eer_write_standard_trigger(device, cqr, id);
  343. break;
  344. case DASD_EER_NOPATH:
  345. case DASD_EER_NOSPC:
  346. case DASD_EER_AUTOQUIESCE:
  347. dasd_eer_write_standard_trigger(device, NULL, id);
  348. break;
  349. case DASD_EER_STATECHANGE:
  350. dasd_eer_write_snss_trigger(device, cqr, id);
  351. break;
  352. default: /* unknown trigger, so we write it without any sense data */
  353. dasd_eer_write_standard_trigger(device, NULL, id);
  354. break;
  355. }
  356. }
  357. EXPORT_SYMBOL(dasd_eer_write);
  358. /*
  359. * Start a sense subsystem status request.
  360. * Needs to be called with the device held.
  361. */
  362. void dasd_eer_snss(struct dasd_device *device)
  363. {
  364. struct dasd_ccw_req *cqr;
  365. cqr = device->eer_cqr;
  366. if (!cqr) /* Device not eer enabled. */
  367. return;
  368. if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
  369. /* Sense subsystem status request in use. */
  370. set_bit(DASD_FLAG_EER_SNSS, &device->flags);
  371. return;
  372. }
  373. /* cdev is already locked, can't use dasd_add_request_head */
  374. clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
  375. cqr->status = DASD_CQR_QUEUED;
  376. list_add(&cqr->devlist, &device->ccw_queue);
  377. dasd_schedule_device_bh(device);
  378. }
  379. /*
  380. * Callback function for use with sense subsystem status request.
  381. */
  382. static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
  383. {
  384. struct dasd_device *device = cqr->startdev;
  385. unsigned long flags;
  386. dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
  387. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  388. if (device->eer_cqr == cqr) {
  389. clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
  390. if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
  391. /* Another SNSS has been requested in the meantime. */
  392. dasd_eer_snss(device);
  393. cqr = NULL;
  394. }
  395. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  396. if (cqr)
  397. /*
  398. * Extended error recovery has been switched off while
  399. * the SNSS request was running. It could even have
  400. * been switched off and on again in which case there
  401. * is a new ccw in device->eer_cqr. Free the "old"
  402. * snss request now.
  403. */
  404. dasd_sfree_request(cqr, device);
  405. }
  406. /*
  407. * Enable error reporting on a given device.
  408. */
  409. int dasd_eer_enable(struct dasd_device *device)
  410. {
  411. struct dasd_ccw_req *cqr = NULL;
  412. unsigned long flags;
  413. struct ccw1 *ccw;
  414. int rc = 0;
  415. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  416. if (device->eer_cqr)
  417. goto out;
  418. else if (!device->discipline ||
  419. strcmp(device->discipline->name, "ECKD"))
  420. rc = -EMEDIUMTYPE;
  421. else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
  422. rc = -EBUSY;
  423. if (rc)
  424. goto out;
  425. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
  426. SNSS_DATA_SIZE, device, NULL);
  427. if (IS_ERR(cqr)) {
  428. rc = -ENOMEM;
  429. cqr = NULL;
  430. goto out;
  431. }
  432. cqr->startdev = device;
  433. cqr->retries = 255;
  434. cqr->expires = 10 * HZ;
  435. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  436. set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
  437. ccw = cqr->cpaddr;
  438. ccw->cmd_code = DASD_ECKD_CCW_SNSS;
  439. ccw->count = SNSS_DATA_SIZE;
  440. ccw->flags = 0;
  441. ccw->cda = virt_to_dma32(cqr->data);
  442. cqr->buildclk = get_tod_clock();
  443. cqr->status = DASD_CQR_FILLED;
  444. cqr->callback = dasd_eer_snss_cb;
  445. if (!device->eer_cqr) {
  446. device->eer_cqr = cqr;
  447. cqr = NULL;
  448. }
  449. out:
  450. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  451. if (cqr)
  452. dasd_sfree_request(cqr, device);
  453. return rc;
  454. }
  455. /*
  456. * Disable error reporting on a given device.
  457. */
  458. void dasd_eer_disable(struct dasd_device *device)
  459. {
  460. struct dasd_ccw_req *cqr;
  461. unsigned long flags;
  462. int in_use;
  463. if (!device->eer_cqr)
  464. return;
  465. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  466. cqr = device->eer_cqr;
  467. device->eer_cqr = NULL;
  468. clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
  469. in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
  470. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  471. if (cqr && !in_use)
  472. dasd_sfree_request(cqr, device);
  473. }
  474. /*
  475. * SECTION: the device operations
  476. */
  477. /*
  478. * On the one side we need a lock to access our internal buffer, on the
  479. * other side a copy_to_user can sleep. So we need to copy the data we have
  480. * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
  481. */
  482. static char readbuffer[PAGE_SIZE];
  483. static DEFINE_MUTEX(readbuffer_mutex);
  484. static int dasd_eer_open(struct inode *inp, struct file *filp)
  485. {
  486. struct eerbuffer *eerb;
  487. unsigned long flags;
  488. eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
  489. if (!eerb)
  490. return -ENOMEM;
  491. eerb->buffer_page_count = eer_pages;
  492. if (eerb->buffer_page_count < 1 ||
  493. eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
  494. kfree(eerb);
  495. DBF_EVENT(DBF_WARNING, "can't open device since module "
  496. "parameter eer_pages is smaller than 1 or"
  497. " bigger than %d", (int)(INT_MAX / PAGE_SIZE));
  498. return -EINVAL;
  499. }
  500. eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
  501. eerb->buffer = kmalloc_array(eerb->buffer_page_count, sizeof(char *),
  502. GFP_KERNEL);
  503. if (!eerb->buffer) {
  504. kfree(eerb);
  505. return -ENOMEM;
  506. }
  507. if (dasd_eer_allocate_buffer_pages(eerb->buffer,
  508. eerb->buffer_page_count)) {
  509. kfree(eerb->buffer);
  510. kfree(eerb);
  511. return -ENOMEM;
  512. }
  513. filp->private_data = eerb;
  514. spin_lock_irqsave(&bufferlock, flags);
  515. list_add(&eerb->list, &bufferlist);
  516. spin_unlock_irqrestore(&bufferlock, flags);
  517. return nonseekable_open(inp,filp);
  518. }
  519. static int dasd_eer_close(struct inode *inp, struct file *filp)
  520. {
  521. struct eerbuffer *eerb;
  522. unsigned long flags;
  523. eerb = (struct eerbuffer *) filp->private_data;
  524. spin_lock_irqsave(&bufferlock, flags);
  525. list_del(&eerb->list);
  526. spin_unlock_irqrestore(&bufferlock, flags);
  527. dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
  528. kfree(eerb->buffer);
  529. kfree(eerb);
  530. return 0;
  531. }
  532. static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
  533. size_t count, loff_t *ppos)
  534. {
  535. int tc,rc;
  536. int tailcount,effective_count;
  537. unsigned long flags;
  538. struct eerbuffer *eerb;
  539. eerb = (struct eerbuffer *) filp->private_data;
  540. if (mutex_lock_interruptible(&readbuffer_mutex))
  541. return -ERESTARTSYS;
  542. spin_lock_irqsave(&bufferlock, flags);
  543. if (eerb->residual < 0) { /* the remainder of this record */
  544. /* has been deleted */
  545. eerb->residual = 0;
  546. spin_unlock_irqrestore(&bufferlock, flags);
  547. mutex_unlock(&readbuffer_mutex);
  548. return -EIO;
  549. } else if (eerb->residual > 0) {
  550. /* OK we still have a second half of a record to deliver */
  551. effective_count = min(eerb->residual, (int) count);
  552. eerb->residual -= effective_count;
  553. } else {
  554. tc = 0;
  555. while (!tc) {
  556. tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
  557. sizeof(tailcount));
  558. if (!tc) {
  559. /* no data available */
  560. spin_unlock_irqrestore(&bufferlock, flags);
  561. mutex_unlock(&readbuffer_mutex);
  562. if (filp->f_flags & O_NONBLOCK)
  563. return -EAGAIN;
  564. rc = wait_event_interruptible(
  565. dasd_eer_read_wait_queue,
  566. eerb->head != eerb->tail);
  567. if (rc)
  568. return rc;
  569. if (mutex_lock_interruptible(&readbuffer_mutex))
  570. return -ERESTARTSYS;
  571. spin_lock_irqsave(&bufferlock, flags);
  572. }
  573. }
  574. WARN_ON(tc != sizeof(tailcount));
  575. effective_count = min(tailcount,(int)count);
  576. eerb->residual = tailcount - effective_count;
  577. }
  578. tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
  579. WARN_ON(tc != effective_count);
  580. spin_unlock_irqrestore(&bufferlock, flags);
  581. if (copy_to_user(buf, readbuffer, effective_count)) {
  582. mutex_unlock(&readbuffer_mutex);
  583. return -EFAULT;
  584. }
  585. mutex_unlock(&readbuffer_mutex);
  586. return effective_count;
  587. }
  588. static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable)
  589. {
  590. __poll_t mask;
  591. unsigned long flags;
  592. struct eerbuffer *eerb;
  593. eerb = (struct eerbuffer *) filp->private_data;
  594. poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
  595. spin_lock_irqsave(&bufferlock, flags);
  596. if (eerb->head != eerb->tail)
  597. mask = EPOLLIN | EPOLLRDNORM ;
  598. else
  599. mask = 0;
  600. spin_unlock_irqrestore(&bufferlock, flags);
  601. return mask;
  602. }
  603. static const struct file_operations dasd_eer_fops = {
  604. .open = &dasd_eer_open,
  605. .release = &dasd_eer_close,
  606. .read = &dasd_eer_read,
  607. .poll = &dasd_eer_poll,
  608. .owner = THIS_MODULE,
  609. .llseek = noop_llseek,
  610. };
  611. static struct miscdevice *dasd_eer_dev = NULL;
  612. int __init dasd_eer_init(void)
  613. {
  614. int rc;
  615. dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
  616. if (!dasd_eer_dev)
  617. return -ENOMEM;
  618. dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
  619. dasd_eer_dev->name = "dasd_eer";
  620. dasd_eer_dev->fops = &dasd_eer_fops;
  621. rc = misc_register(dasd_eer_dev);
  622. if (rc) {
  623. kfree(dasd_eer_dev);
  624. dasd_eer_dev = NULL;
  625. DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
  626. "register misc device");
  627. return rc;
  628. }
  629. return 0;
  630. }
  631. void dasd_eer_exit(void)
  632. {
  633. if (dasd_eer_dev) {
  634. misc_deregister(dasd_eer_dev);
  635. kfree(dasd_eer_dev);
  636. dasd_eer_dev = NULL;
  637. }
  638. }