zfcp_qdio.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * zfcp device driver
  4. *
  5. * Setup and helper functions to access QDIO.
  6. *
  7. * Copyright IBM Corp. 2002, 2010
  8. */
  9. #define KMSG_COMPONENT "zfcp"
  10. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11. #include <linux/slab.h>
  12. #include <linux/module.h>
  13. #include "zfcp_ext.h"
  14. #include "zfcp_qdio.h"
  15. static bool enable_multibuffer = true;
  16. module_param_named(datarouter, enable_multibuffer, bool, 0400);
  17. MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
  18. static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
  19. unsigned int qdio_err)
  20. {
  21. struct zfcp_adapter *adapter = qdio->adapter;
  22. dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
  23. if (qdio_err & QDIO_ERROR_SLSB_STATE) {
  24. zfcp_qdio_siosl(adapter);
  25. zfcp_erp_adapter_shutdown(adapter, 0, id);
  26. return;
  27. }
  28. zfcp_erp_adapter_reopen(adapter,
  29. ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
  30. ZFCP_STATUS_COMMON_ERP_FAILED, id);
  31. }
  32. static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
  33. {
  34. int i, sbal_idx;
  35. for (i = first; i < first + cnt; i++) {
  36. sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
  37. memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
  38. }
  39. }
  40. /* this needs to be called prior to updating the queue fill level */
  41. static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
  42. {
  43. unsigned long long now, span;
  44. int used;
  45. now = get_tod_clock_monotonic();
  46. span = (now - qdio->req_q_time) >> 12;
  47. used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
  48. qdio->req_q_util += used * span;
  49. qdio->req_q_time = now;
  50. }
  51. static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
  52. int queue_no, int idx, int count,
  53. unsigned long parm)
  54. {
  55. struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
  56. if (unlikely(qdio_err)) {
  57. zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
  58. return;
  59. }
  60. /* cleanup all SBALs being program-owned now */
  61. zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
  62. spin_lock_irq(&qdio->stat_lock);
  63. zfcp_qdio_account(qdio);
  64. spin_unlock_irq(&qdio->stat_lock);
  65. atomic_add(count, &qdio->req_q_free);
  66. wake_up(&qdio->req_q_wq);
  67. }
  68. static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
  69. int queue_no, int idx, int count,
  70. unsigned long parm)
  71. {
  72. struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
  73. struct zfcp_adapter *adapter = qdio->adapter;
  74. int sbal_no, sbal_idx;
  75. if (unlikely(qdio_err)) {
  76. if (zfcp_adapter_multi_buffer_active(adapter)) {
  77. void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
  78. struct qdio_buffer_element *sbale;
  79. u64 req_id;
  80. u8 scount;
  81. memset(pl, 0,
  82. ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
  83. sbale = qdio->res_q[idx]->element;
  84. req_id = (u64) sbale->addr;
  85. scount = min(sbale->scount + 1,
  86. ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
  87. /* incl. signaling SBAL */
  88. for (sbal_no = 0; sbal_no < scount; sbal_no++) {
  89. sbal_idx = (idx + sbal_no) %
  90. QDIO_MAX_BUFFERS_PER_Q;
  91. pl[sbal_no] = qdio->res_q[sbal_idx];
  92. }
  93. zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
  94. }
  95. zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
  96. return;
  97. }
  98. /*
  99. * go through all SBALs from input queue currently
  100. * returned by QDIO layer
  101. */
  102. for (sbal_no = 0; sbal_no < count; sbal_no++) {
  103. sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
  104. /* go through all SBALEs of SBAL */
  105. zfcp_fsf_reqid_check(qdio, sbal_idx);
  106. }
  107. /*
  108. * put SBALs back to response queue
  109. */
  110. if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
  111. zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
  112. }
  113. static struct qdio_buffer_element *
  114. zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
  115. {
  116. struct qdio_buffer_element *sbale;
  117. /* set last entry flag in current SBALE of current SBAL */
  118. sbale = zfcp_qdio_sbale_curr(qdio, q_req);
  119. sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
  120. /* don't exceed last allowed SBAL */
  121. if (q_req->sbal_last == q_req->sbal_limit)
  122. return NULL;
  123. /* set chaining flag in first SBALE of current SBAL */
  124. sbale = zfcp_qdio_sbale_req(qdio, q_req);
  125. sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
  126. /* calculate index of next SBAL */
  127. q_req->sbal_last++;
  128. q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
  129. /* keep this requests number of SBALs up-to-date */
  130. q_req->sbal_number++;
  131. BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
  132. /* start at first SBALE of new SBAL */
  133. q_req->sbale_curr = 0;
  134. /* set storage-block type for new SBAL */
  135. sbale = zfcp_qdio_sbale_curr(qdio, q_req);
  136. sbale->sflags |= q_req->sbtype;
  137. return sbale;
  138. }
  139. static struct qdio_buffer_element *
  140. zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
  141. {
  142. if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
  143. return zfcp_qdio_sbal_chain(qdio, q_req);
  144. q_req->sbale_curr++;
  145. return zfcp_qdio_sbale_curr(qdio, q_req);
  146. }
  147. /**
  148. * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
  149. * @qdio: pointer to struct zfcp_qdio
  150. * @q_req: pointer to struct zfcp_qdio_req
  151. * @sg: scatter-gather list
  152. * @max_sbals: upper bound for number of SBALs to be used
  153. * Returns: zero or -EINVAL on error
  154. */
  155. int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
  156. struct scatterlist *sg)
  157. {
  158. struct qdio_buffer_element *sbale;
  159. /* set storage-block type for this request */
  160. sbale = zfcp_qdio_sbale_req(qdio, q_req);
  161. sbale->sflags |= q_req->sbtype;
  162. for (; sg; sg = sg_next(sg)) {
  163. sbale = zfcp_qdio_sbale_next(qdio, q_req);
  164. if (!sbale) {
  165. atomic_inc(&qdio->req_q_full);
  166. zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
  167. q_req->sbal_number);
  168. return -EINVAL;
  169. }
  170. sbale->addr = sg_virt(sg);
  171. sbale->length = sg->length;
  172. }
  173. return 0;
  174. }
  175. static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
  176. {
  177. if (atomic_read(&qdio->req_q_free) ||
  178. !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
  179. return 1;
  180. return 0;
  181. }
  182. /**
  183. * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
  184. * @qdio: pointer to struct zfcp_qdio
  185. *
  186. * The req_q_lock must be held by the caller of this function, and
  187. * this function may only be called from process context; it will
  188. * sleep when waiting for a free sbal.
  189. *
  190. * Returns: 0 on success, -EIO if there is no free sbal after waiting.
  191. */
  192. int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
  193. {
  194. long ret;
  195. ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
  196. zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
  197. if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
  198. return -EIO;
  199. if (ret > 0)
  200. return 0;
  201. if (!ret) {
  202. atomic_inc(&qdio->req_q_full);
  203. /* assume hanging outbound queue, try queue recovery */
  204. zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
  205. }
  206. return -EIO;
  207. }
  208. /**
  209. * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
  210. * @qdio: pointer to struct zfcp_qdio
  211. * @q_req: pointer to struct zfcp_qdio_req
  212. * Returns: 0 on success, error otherwise
  213. */
  214. int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
  215. {
  216. int retval;
  217. u8 sbal_number = q_req->sbal_number;
  218. spin_lock(&qdio->stat_lock);
  219. zfcp_qdio_account(qdio);
  220. spin_unlock(&qdio->stat_lock);
  221. retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
  222. q_req->sbal_first, sbal_number);
  223. if (unlikely(retval)) {
  224. zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
  225. sbal_number);
  226. return retval;
  227. }
  228. /* account for transferred buffers */
  229. atomic_sub(sbal_number, &qdio->req_q_free);
  230. qdio->req_q_idx += sbal_number;
  231. qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
  232. return 0;
  233. }
  234. static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
  235. struct zfcp_qdio *qdio)
  236. {
  237. memset(id, 0, sizeof(*id));
  238. id->cdev = qdio->adapter->ccw_device;
  239. id->q_format = QDIO_ZFCP_QFMT;
  240. memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
  241. ASCEBC(id->adapter_name, 8);
  242. id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
  243. if (enable_multibuffer)
  244. id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
  245. id->no_input_qs = 1;
  246. id->no_output_qs = 1;
  247. id->input_handler = zfcp_qdio_int_resp;
  248. id->output_handler = zfcp_qdio_int_req;
  249. id->int_parm = (unsigned long) qdio;
  250. id->input_sbal_addr_array = (void **) (qdio->res_q);
  251. id->output_sbal_addr_array = (void **) (qdio->req_q);
  252. id->scan_threshold =
  253. QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
  254. }
  255. /**
  256. * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
  257. * @adapter: pointer to struct zfcp_adapter
  258. * Returns: -ENOMEM on memory allocation error or return value from
  259. * qdio_allocate
  260. */
  261. static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
  262. {
  263. struct qdio_initialize init_data;
  264. int ret;
  265. ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
  266. if (ret)
  267. return -ENOMEM;
  268. ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
  269. if (ret)
  270. goto free_req_q;
  271. zfcp_qdio_setup_init_data(&init_data, qdio);
  272. init_waitqueue_head(&qdio->req_q_wq);
  273. ret = qdio_allocate(&init_data);
  274. if (ret)
  275. goto free_res_q;
  276. return 0;
  277. free_res_q:
  278. qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
  279. free_req_q:
  280. qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
  281. return ret;
  282. }
  283. /**
  284. * zfcp_close_qdio - close qdio queues for an adapter
  285. * @qdio: pointer to structure zfcp_qdio
  286. */
  287. void zfcp_qdio_close(struct zfcp_qdio *qdio)
  288. {
  289. struct zfcp_adapter *adapter = qdio->adapter;
  290. int idx, count;
  291. if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
  292. return;
  293. /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
  294. spin_lock_irq(&qdio->req_q_lock);
  295. atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
  296. spin_unlock_irq(&qdio->req_q_lock);
  297. wake_up(&qdio->req_q_wq);
  298. qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
  299. /* cleanup used outbound sbals */
  300. count = atomic_read(&qdio->req_q_free);
  301. if (count < QDIO_MAX_BUFFERS_PER_Q) {
  302. idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
  303. count = QDIO_MAX_BUFFERS_PER_Q - count;
  304. zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
  305. }
  306. qdio->req_q_idx = 0;
  307. atomic_set(&qdio->req_q_free, 0);
  308. }
  309. /**
  310. * zfcp_qdio_open - prepare and initialize response queue
  311. * @qdio: pointer to struct zfcp_qdio
  312. * Returns: 0 on success, otherwise -EIO
  313. */
  314. int zfcp_qdio_open(struct zfcp_qdio *qdio)
  315. {
  316. struct qdio_buffer_element *sbale;
  317. struct qdio_initialize init_data;
  318. struct zfcp_adapter *adapter = qdio->adapter;
  319. struct ccw_device *cdev = adapter->ccw_device;
  320. struct qdio_ssqd_desc ssqd;
  321. int cc;
  322. if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
  323. return -EIO;
  324. atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
  325. &qdio->adapter->status);
  326. zfcp_qdio_setup_init_data(&init_data, qdio);
  327. if (qdio_establish(&init_data))
  328. goto failed_establish;
  329. if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
  330. goto failed_qdio;
  331. if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
  332. atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
  333. &qdio->adapter->status);
  334. if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
  335. atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
  336. qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
  337. } else {
  338. atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
  339. qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
  340. }
  341. qdio->max_sbale_per_req =
  342. ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
  343. - 2;
  344. if (qdio_activate(cdev))
  345. goto failed_qdio;
  346. for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
  347. sbale = &(qdio->res_q[cc]->element[0]);
  348. sbale->length = 0;
  349. sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
  350. sbale->sflags = 0;
  351. sbale->addr = NULL;
  352. }
  353. if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
  354. goto failed_qdio;
  355. /* set index of first available SBALS / number of available SBALS */
  356. qdio->req_q_idx = 0;
  357. atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
  358. atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
  359. if (adapter->scsi_host) {
  360. adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
  361. adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
  362. }
  363. return 0;
  364. failed_qdio:
  365. qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
  366. failed_establish:
  367. dev_err(&cdev->dev,
  368. "Setting up the QDIO connection to the FCP adapter failed\n");
  369. return -EIO;
  370. }
  371. void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
  372. {
  373. if (!qdio)
  374. return;
  375. if (qdio->adapter->ccw_device)
  376. qdio_free(qdio->adapter->ccw_device);
  377. qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
  378. qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
  379. kfree(qdio);
  380. }
  381. int zfcp_qdio_setup(struct zfcp_adapter *adapter)
  382. {
  383. struct zfcp_qdio *qdio;
  384. qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
  385. if (!qdio)
  386. return -ENOMEM;
  387. qdio->adapter = adapter;
  388. if (zfcp_qdio_allocate(qdio)) {
  389. kfree(qdio);
  390. return -ENOMEM;
  391. }
  392. spin_lock_init(&qdio->req_q_lock);
  393. spin_lock_init(&qdio->stat_lock);
  394. adapter->qdio = qdio;
  395. return 0;
  396. }
  397. /**
  398. * zfcp_qdio_siosl - Trigger logging in FCP channel
  399. * @adapter: The zfcp_adapter where to trigger logging
  400. *
  401. * Call the cio siosl function to trigger hardware logging. This
  402. * wrapper function sets a flag to ensure hardware logging is only
  403. * triggered once before going through qdio shutdown.
  404. *
  405. * The triggers are always run from qdio tasklet context, so no
  406. * additional synchronization is necessary.
  407. */
  408. void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
  409. {
  410. int rc;
  411. if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
  412. return;
  413. rc = ccw_device_siosl(adapter->ccw_device);
  414. if (!rc)
  415. atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
  416. &adapter->status);
  417. }