zfcp_qdio.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * zfcp device driver
  4. *
  5. * Setup and helper functions to access QDIO.
  6. *
  7. * Copyright IBM Corp. 2002, 2020
  8. */
  9. #define KMSG_COMPONENT "zfcp"
  10. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11. #include <linux/lockdep.h>
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include "zfcp_ext.h"
  15. #include "zfcp_qdio.h"
  16. static bool enable_multibuffer = true;
  17. module_param_named(datarouter, enable_multibuffer, bool, 0400);
  18. MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
  19. #define ZFCP_QDIO_REQUEST_RESCAN_MSECS (MSEC_PER_SEC * 10)
  20. #define ZFCP_QDIO_REQUEST_SCAN_MSECS MSEC_PER_SEC
  21. static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
  22. unsigned int qdio_err)
  23. {
  24. struct zfcp_adapter *adapter = qdio->adapter;
  25. dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
  26. if (qdio_err & QDIO_ERROR_SLSB_STATE) {
  27. zfcp_qdio_siosl(adapter);
  28. zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
  29. return;
  30. }
  31. zfcp_erp_adapter_reopen(adapter,
  32. ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
  33. ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
  34. }
  35. static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
  36. {
  37. int i, sbal_idx;
  38. for (i = first; i < first + cnt; i++) {
  39. sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
  40. memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
  41. }
  42. }
  43. /* this needs to be called prior to updating the queue fill level */
  44. static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
  45. {
  46. unsigned long long now, span;
  47. int used;
  48. now = get_tod_clock_monotonic();
  49. span = (now - qdio->req_q_time) >> 12;
  50. used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
  51. qdio->req_q_util += used * span;
  52. qdio->req_q_time = now;
  53. }
  54. static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
  55. int queue_no, int idx, int count,
  56. unsigned long parm)
  57. {
  58. struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
  59. zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
  60. }
  61. static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
  62. {
  63. struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, request_tasklet);
  64. struct ccw_device *cdev = qdio->adapter->ccw_device;
  65. unsigned int start, error;
  66. int completed;
  67. completed = qdio_inspect_output_queue(cdev, 0, &start, &error);
  68. if (completed > 0) {
  69. if (error) {
  70. zfcp_qdio_handler_error(qdio, "qdreqt1", error);
  71. } else {
  72. /* cleanup all SBALs being program-owned now */
  73. zfcp_qdio_zero_sbals(qdio->req_q, start, completed);
  74. spin_lock_irq(&qdio->stat_lock);
  75. zfcp_qdio_account(qdio);
  76. spin_unlock_irq(&qdio->stat_lock);
  77. atomic_add(completed, &qdio->req_q_free);
  78. wake_up(&qdio->req_q_wq);
  79. }
  80. }
  81. if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
  82. timer_reduce(&qdio->request_timer,
  83. jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_RESCAN_MSECS));
  84. }
  85. static void zfcp_qdio_request_timer(struct timer_list *timer)
  86. {
  87. struct zfcp_qdio *qdio = from_timer(qdio, timer, request_timer);
  88. tasklet_schedule(&qdio->request_tasklet);
  89. }
  90. static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
  91. int queue_no, int idx, int count,
  92. unsigned long parm)
  93. {
  94. struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
  95. struct zfcp_adapter *adapter = qdio->adapter;
  96. int sbal_no, sbal_idx;
  97. if (unlikely(qdio_err)) {
  98. if (zfcp_adapter_multi_buffer_active(adapter)) {
  99. void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
  100. struct qdio_buffer_element *sbale;
  101. u64 req_id;
  102. u8 scount;
  103. memset(pl, 0,
  104. ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
  105. sbale = qdio->res_q[idx]->element;
  106. req_id = dma64_to_u64(sbale->addr);
  107. scount = min(sbale->scount + 1,
  108. ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
  109. /* incl. signaling SBAL */
  110. for (sbal_no = 0; sbal_no < scount; sbal_no++) {
  111. sbal_idx = (idx + sbal_no) %
  112. QDIO_MAX_BUFFERS_PER_Q;
  113. pl[sbal_no] = qdio->res_q[sbal_idx];
  114. }
  115. zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
  116. }
  117. zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
  118. return;
  119. }
  120. /*
  121. * go through all SBALs from input queue currently
  122. * returned by QDIO layer
  123. */
  124. for (sbal_no = 0; sbal_no < count; sbal_no++) {
  125. sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
  126. /* go through all SBALEs of SBAL */
  127. zfcp_fsf_reqid_check(qdio, sbal_idx);
  128. }
  129. /*
  130. * put SBALs back to response queue
  131. */
  132. if (qdio_add_bufs_to_input_queue(cdev, 0, idx, count))
  133. zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
  134. }
  135. static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
  136. {
  137. struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, irq_tasklet);
  138. struct ccw_device *cdev = qdio->adapter->ccw_device;
  139. unsigned int start, error;
  140. int completed;
  141. if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
  142. tasklet_schedule(&qdio->request_tasklet);
  143. /* Check the Response Queue: */
  144. completed = qdio_inspect_input_queue(cdev, 0, &start, &error);
  145. if (completed < 0)
  146. return;
  147. if (completed > 0)
  148. zfcp_qdio_int_resp(cdev, error, 0, start, completed,
  149. (unsigned long) qdio);
  150. if (qdio_start_irq(cdev))
  151. /* More work pending: */
  152. tasklet_schedule(&qdio->irq_tasklet);
  153. }
  154. static void zfcp_qdio_poll(struct ccw_device *cdev, unsigned long data)
  155. {
  156. struct zfcp_qdio *qdio = (struct zfcp_qdio *) data;
  157. tasklet_schedule(&qdio->irq_tasklet);
  158. }
  159. static struct qdio_buffer_element *
  160. zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
  161. {
  162. struct qdio_buffer_element *sbale;
  163. /* set last entry flag in current SBALE of current SBAL */
  164. sbale = zfcp_qdio_sbale_curr(qdio, q_req);
  165. sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
  166. /* don't exceed last allowed SBAL */
  167. if (q_req->sbal_last == q_req->sbal_limit)
  168. return NULL;
  169. /* set chaining flag in first SBALE of current SBAL */
  170. sbale = zfcp_qdio_sbale_req(qdio, q_req);
  171. sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
  172. /* calculate index of next SBAL */
  173. q_req->sbal_last++;
  174. q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
  175. /* keep this requests number of SBALs up-to-date */
  176. q_req->sbal_number++;
  177. BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
  178. /* start at first SBALE of new SBAL */
  179. q_req->sbale_curr = 0;
  180. /* set storage-block type for new SBAL */
  181. sbale = zfcp_qdio_sbale_curr(qdio, q_req);
  182. sbale->sflags |= q_req->sbtype;
  183. return sbale;
  184. }
  185. static struct qdio_buffer_element *
  186. zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
  187. {
  188. if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
  189. return zfcp_qdio_sbal_chain(qdio, q_req);
  190. q_req->sbale_curr++;
  191. return zfcp_qdio_sbale_curr(qdio, q_req);
  192. }
  193. /**
  194. * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
  195. * @qdio: pointer to struct zfcp_qdio
  196. * @q_req: pointer to struct zfcp_qdio_req
  197. * @sg: scatter-gather list
  198. * Returns: zero or -EINVAL on error
  199. */
  200. int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
  201. struct scatterlist *sg)
  202. {
  203. struct qdio_buffer_element *sbale;
  204. /* set storage-block type for this request */
  205. sbale = zfcp_qdio_sbale_req(qdio, q_req);
  206. sbale->sflags |= q_req->sbtype;
  207. for (; sg; sg = sg_next(sg)) {
  208. sbale = zfcp_qdio_sbale_next(qdio, q_req);
  209. if (!sbale) {
  210. atomic_inc(&qdio->req_q_full);
  211. zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
  212. q_req->sbal_number);
  213. return -EINVAL;
  214. }
  215. sbale->addr = u64_to_dma64(sg_phys(sg));
  216. sbale->length = sg->length;
  217. }
  218. return 0;
  219. }
  220. static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
  221. {
  222. if (atomic_read(&qdio->req_q_free) ||
  223. !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
  224. return 1;
  225. return 0;
  226. }
  227. /**
  228. * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
  229. * @qdio: pointer to struct zfcp_qdio
  230. *
  231. * The req_q_lock must be held by the caller of this function, and
  232. * this function may only be called from process context; it will
  233. * sleep when waiting for a free sbal.
  234. *
  235. * Returns: 0 on success, -EIO if there is no free sbal after waiting.
  236. */
  237. int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
  238. {
  239. long ret;
  240. ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
  241. zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
  242. if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
  243. return -EIO;
  244. if (ret > 0)
  245. return 0;
  246. if (!ret) {
  247. atomic_inc(&qdio->req_q_full);
  248. /* assume hanging outbound queue, try queue recovery */
  249. zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
  250. }
  251. return -EIO;
  252. }
  253. /**
  254. * zfcp_qdio_send - send req to QDIO
  255. * @qdio: pointer to struct zfcp_qdio
  256. * @q_req: pointer to struct zfcp_qdio_req
  257. * Returns: 0 on success, error otherwise
  258. */
  259. int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
  260. {
  261. int retval;
  262. u8 sbal_number = q_req->sbal_number;
  263. /*
  264. * This should actually be a spin_lock_bh(stat_lock), to protect against
  265. * Request Queue completion processing in tasklet context.
  266. * But we can't do so (and are safe), as we always get called with IRQs
  267. * disabled by spin_lock_irq[save](req_q_lock).
  268. */
  269. lockdep_assert_irqs_disabled();
  270. spin_lock(&qdio->stat_lock);
  271. zfcp_qdio_account(qdio);
  272. spin_unlock(&qdio->stat_lock);
  273. atomic_sub(sbal_number, &qdio->req_q_free);
  274. retval = qdio_add_bufs_to_output_queue(qdio->adapter->ccw_device, 0,
  275. q_req->sbal_first, sbal_number,
  276. NULL);
  277. if (unlikely(retval)) {
  278. /* Failed to submit the IO, roll back our modifications. */
  279. atomic_add(sbal_number, &qdio->req_q_free);
  280. zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
  281. sbal_number);
  282. return retval;
  283. }
  284. if (atomic_read(&qdio->req_q_free) <= 2 * ZFCP_QDIO_MAX_SBALS_PER_REQ)
  285. tasklet_schedule(&qdio->request_tasklet);
  286. else
  287. timer_reduce(&qdio->request_timer,
  288. jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_SCAN_MSECS));
  289. /* account for transferred buffers */
  290. qdio->req_q_idx += sbal_number;
  291. qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
  292. return 0;
  293. }
  294. /**
  295. * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
  296. * @qdio: pointer to struct zfcp_qdio
  297. * Returns: -ENOMEM on memory allocation error or return value from
  298. * qdio_allocate
  299. */
  300. static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
  301. {
  302. int ret;
  303. ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
  304. if (ret)
  305. return -ENOMEM;
  306. ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
  307. if (ret)
  308. goto free_req_q;
  309. init_waitqueue_head(&qdio->req_q_wq);
  310. ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
  311. if (ret)
  312. goto free_res_q;
  313. return 0;
  314. free_res_q:
  315. qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
  316. free_req_q:
  317. qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
  318. return ret;
  319. }
  320. /**
  321. * zfcp_qdio_close - close qdio queues for an adapter
  322. * @qdio: pointer to structure zfcp_qdio
  323. */
  324. void zfcp_qdio_close(struct zfcp_qdio *qdio)
  325. {
  326. struct zfcp_adapter *adapter = qdio->adapter;
  327. int idx, count;
  328. if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
  329. return;
  330. /*
  331. * Clear QDIOUP flag, thus qdio_add_bufs_to_output_queue() is not called
  332. * during qdio_shutdown().
  333. */
  334. spin_lock_irq(&qdio->req_q_lock);
  335. atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
  336. spin_unlock_irq(&qdio->req_q_lock);
  337. wake_up(&qdio->req_q_wq);
  338. tasklet_disable(&qdio->irq_tasklet);
  339. tasklet_disable(&qdio->request_tasklet);
  340. del_timer_sync(&qdio->request_timer);
  341. qdio_stop_irq(adapter->ccw_device);
  342. qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
  343. /* cleanup used outbound sbals */
  344. count = atomic_read(&qdio->req_q_free);
  345. if (count < QDIO_MAX_BUFFERS_PER_Q) {
  346. idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
  347. count = QDIO_MAX_BUFFERS_PER_Q - count;
  348. zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
  349. }
  350. qdio->req_q_idx = 0;
  351. atomic_set(&qdio->req_q_free, 0);
  352. }
  353. void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
  354. const struct zfcp_qdio *const qdio)
  355. {
  356. struct Scsi_Host *const shost = adapter->scsi_host;
  357. if (shost == NULL)
  358. return;
  359. shost->sg_tablesize = qdio->max_sbale_per_req;
  360. shost->max_sectors = qdio->max_sbale_per_req * 8;
  361. }
  362. /**
  363. * zfcp_qdio_open - prepare and initialize response queue
  364. * @qdio: pointer to struct zfcp_qdio
  365. * Returns: 0 on success, otherwise -EIO
  366. */
  367. int zfcp_qdio_open(struct zfcp_qdio *qdio)
  368. {
  369. struct qdio_buffer **input_sbals[1] = {qdio->res_q};
  370. struct qdio_buffer **output_sbals[1] = {qdio->req_q};
  371. struct qdio_buffer_element *sbale;
  372. struct qdio_initialize init_data = {0};
  373. struct zfcp_adapter *adapter = qdio->adapter;
  374. struct ccw_device *cdev = adapter->ccw_device;
  375. struct qdio_ssqd_desc ssqd;
  376. int cc;
  377. if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
  378. return -EIO;
  379. atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
  380. &qdio->adapter->status);
  381. init_data.q_format = QDIO_ZFCP_QFMT;
  382. init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
  383. if (enable_multibuffer)
  384. init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
  385. init_data.no_input_qs = 1;
  386. init_data.no_output_qs = 1;
  387. init_data.input_handler = zfcp_qdio_int_resp;
  388. init_data.output_handler = zfcp_qdio_int_req;
  389. init_data.irq_poll = zfcp_qdio_poll;
  390. init_data.int_parm = (unsigned long) qdio;
  391. init_data.input_sbal_addr_array = input_sbals;
  392. init_data.output_sbal_addr_array = output_sbals;
  393. if (qdio_establish(cdev, &init_data))
  394. goto failed_establish;
  395. if (qdio_get_ssqd_desc(cdev, &ssqd))
  396. goto failed_qdio;
  397. if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
  398. atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
  399. &qdio->adapter->status);
  400. if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
  401. atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
  402. qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
  403. } else {
  404. atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
  405. qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
  406. }
  407. qdio->max_sbale_per_req =
  408. ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
  409. - 2;
  410. if (qdio_activate(cdev))
  411. goto failed_qdio;
  412. for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
  413. sbale = &(qdio->res_q[cc]->element[0]);
  414. sbale->length = 0;
  415. sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
  416. sbale->sflags = 0;
  417. sbale->addr = 0;
  418. }
  419. if (qdio_add_bufs_to_input_queue(cdev, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
  420. goto failed_qdio;
  421. /* set index of first available SBALS / number of available SBALS */
  422. qdio->req_q_idx = 0;
  423. atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
  424. atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
  425. /* Enable processing for Request Queue completions: */
  426. tasklet_enable(&qdio->request_tasklet);
  427. /* Enable processing for QDIO interrupts: */
  428. tasklet_enable(&qdio->irq_tasklet);
  429. /* This results in a qdio_start_irq(): */
  430. tasklet_schedule(&qdio->irq_tasklet);
  431. zfcp_qdio_shost_update(adapter, qdio);
  432. return 0;
  433. failed_qdio:
  434. qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
  435. failed_establish:
  436. dev_err(&cdev->dev,
  437. "Setting up the QDIO connection to the FCP adapter failed\n");
  438. return -EIO;
  439. }
  440. void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
  441. {
  442. if (!qdio)
  443. return;
  444. tasklet_kill(&qdio->irq_tasklet);
  445. tasklet_kill(&qdio->request_tasklet);
  446. if (qdio->adapter->ccw_device)
  447. qdio_free(qdio->adapter->ccw_device);
  448. qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
  449. qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
  450. kfree(qdio);
  451. }
  452. int zfcp_qdio_setup(struct zfcp_adapter *adapter)
  453. {
  454. struct zfcp_qdio *qdio;
  455. qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
  456. if (!qdio)
  457. return -ENOMEM;
  458. qdio->adapter = adapter;
  459. if (zfcp_qdio_allocate(qdio)) {
  460. kfree(qdio);
  461. return -ENOMEM;
  462. }
  463. spin_lock_init(&qdio->req_q_lock);
  464. spin_lock_init(&qdio->stat_lock);
  465. timer_setup(&qdio->request_timer, zfcp_qdio_request_timer, 0);
  466. tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
  467. tasklet_setup(&qdio->request_tasklet, zfcp_qdio_request_tasklet);
  468. tasklet_disable(&qdio->irq_tasklet);
  469. tasklet_disable(&qdio->request_tasklet);
  470. adapter->qdio = qdio;
  471. return 0;
  472. }
  473. /**
  474. * zfcp_qdio_siosl - Trigger logging in FCP channel
  475. * @adapter: The zfcp_adapter where to trigger logging
  476. *
  477. * Call the cio siosl function to trigger hardware logging. This
  478. * wrapper function sets a flag to ensure hardware logging is only
  479. * triggered once before going through qdio shutdown.
  480. *
  481. * The triggers are always run from qdio tasklet context, so no
  482. * additional synchronization is necessary.
  483. */
  484. void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
  485. {
  486. int rc;
  487. if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
  488. return;
  489. rc = ccw_device_siosl(adapter->ccw_device);
  490. if (!rc)
  491. atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
  492. &adapter->status);
  493. }