virtio_scsi.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Virtio SCSI HBA driver
  4. *
  5. * Copyright IBM Corp. 2010
  6. * Copyright Red Hat, Inc. 2011
  7. *
  8. * Authors:
  9. * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  10. * Paolo Bonzini <pbonzini@redhat.com>
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/module.h>
  14. #include <linux/slab.h>
  15. #include <linux/mempool.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/virtio.h>
  18. #include <linux/virtio_ids.h>
  19. #include <linux/virtio_config.h>
  20. #include <linux/virtio_scsi.h>
  21. #include <linux/cpu.h>
  22. #include <linux/blkdev.h>
  23. #include <linux/blk-integrity.h>
  24. #include <scsi/scsi_host.h>
  25. #include <scsi/scsi_device.h>
  26. #include <scsi/scsi_cmnd.h>
  27. #include <scsi/scsi_tcq.h>
  28. #include <scsi/scsi_devinfo.h>
  29. #include <linux/seqlock.h>
  30. #include <linux/blk-mq-virtio.h>
  31. #include "sd.h"
  32. #define VIRTIO_SCSI_MEMPOOL_SZ 64
  33. #define VIRTIO_SCSI_EVENT_LEN 8
  34. #define VIRTIO_SCSI_VQ_BASE 2
  35. static unsigned int virtscsi_poll_queues;
  36. module_param(virtscsi_poll_queues, uint, 0644);
  37. MODULE_PARM_DESC(virtscsi_poll_queues,
  38. "The number of dedicated virtqueues for polling I/O");
  39. /* Command queue element */
  40. struct virtio_scsi_cmd {
  41. struct scsi_cmnd *sc;
  42. struct completion *comp;
  43. union {
  44. struct virtio_scsi_cmd_req cmd;
  45. struct virtio_scsi_cmd_req_pi cmd_pi;
  46. struct virtio_scsi_ctrl_tmf_req tmf;
  47. struct virtio_scsi_ctrl_an_req an;
  48. } req;
  49. union {
  50. struct virtio_scsi_cmd_resp cmd;
  51. struct virtio_scsi_ctrl_tmf_resp tmf;
  52. struct virtio_scsi_ctrl_an_resp an;
  53. struct virtio_scsi_event evt;
  54. } resp;
  55. } ____cacheline_aligned_in_smp;
  56. struct virtio_scsi_event_node {
  57. struct virtio_scsi *vscsi;
  58. struct virtio_scsi_event event;
  59. struct work_struct work;
  60. };
  61. struct virtio_scsi_vq {
  62. /* Protects vq */
  63. spinlock_t vq_lock;
  64. struct virtqueue *vq;
  65. };
  66. /* Driver instance state */
  67. struct virtio_scsi {
  68. struct virtio_device *vdev;
  69. /* Get some buffers ready for event vq */
  70. struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
  71. u32 num_queues;
  72. int io_queues[HCTX_MAX_TYPES];
  73. struct hlist_node node;
  74. /* Protected by event_vq lock */
  75. bool stop_events;
  76. struct virtio_scsi_vq ctrl_vq;
  77. struct virtio_scsi_vq event_vq;
  78. struct virtio_scsi_vq req_vqs[];
  79. };
  80. static struct kmem_cache *virtscsi_cmd_cache;
  81. static mempool_t *virtscsi_cmd_pool;
  82. static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
  83. {
  84. return vdev->priv;
  85. }
  86. static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
  87. {
  88. if (resid)
  89. scsi_set_resid(sc, min(resid, scsi_bufflen(sc)));
  90. }
  91. /*
  92. * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
  93. *
  94. * Called with vq_lock held.
  95. */
  96. static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
  97. {
  98. struct virtio_scsi_cmd *cmd = buf;
  99. struct scsi_cmnd *sc = cmd->sc;
  100. struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
  101. dev_dbg(&sc->device->sdev_gendev,
  102. "cmd %p response %u status %#02x sense_len %u\n",
  103. sc, resp->response, resp->status, resp->sense_len);
  104. sc->result = resp->status;
  105. virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
  106. switch (resp->response) {
  107. case VIRTIO_SCSI_S_OK:
  108. set_host_byte(sc, DID_OK);
  109. break;
  110. case VIRTIO_SCSI_S_OVERRUN:
  111. set_host_byte(sc, DID_ERROR);
  112. break;
  113. case VIRTIO_SCSI_S_ABORTED:
  114. set_host_byte(sc, DID_ABORT);
  115. break;
  116. case VIRTIO_SCSI_S_BAD_TARGET:
  117. set_host_byte(sc, DID_BAD_TARGET);
  118. break;
  119. case VIRTIO_SCSI_S_RESET:
  120. set_host_byte(sc, DID_RESET);
  121. break;
  122. case VIRTIO_SCSI_S_BUSY:
  123. set_host_byte(sc, DID_BUS_BUSY);
  124. break;
  125. case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
  126. set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
  127. break;
  128. case VIRTIO_SCSI_S_TARGET_FAILURE:
  129. set_host_byte(sc, DID_BAD_TARGET);
  130. break;
  131. case VIRTIO_SCSI_S_NEXUS_FAILURE:
  132. set_status_byte(sc, SAM_STAT_RESERVATION_CONFLICT);
  133. break;
  134. default:
  135. scmd_printk(KERN_WARNING, sc, "Unknown response %d",
  136. resp->response);
  137. fallthrough;
  138. case VIRTIO_SCSI_S_FAILURE:
  139. set_host_byte(sc, DID_ERROR);
  140. break;
  141. }
  142. WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
  143. VIRTIO_SCSI_SENSE_SIZE);
  144. if (resp->sense_len) {
  145. memcpy(sc->sense_buffer, resp->sense,
  146. min_t(u32,
  147. virtio32_to_cpu(vscsi->vdev, resp->sense_len),
  148. VIRTIO_SCSI_SENSE_SIZE));
  149. }
  150. scsi_done(sc);
  151. }
  152. static void virtscsi_vq_done(struct virtio_scsi *vscsi,
  153. struct virtio_scsi_vq *virtscsi_vq,
  154. void (*fn)(struct virtio_scsi *vscsi, void *buf))
  155. {
  156. void *buf;
  157. unsigned int len;
  158. unsigned long flags;
  159. struct virtqueue *vq = virtscsi_vq->vq;
  160. spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
  161. do {
  162. virtqueue_disable_cb(vq);
  163. while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
  164. fn(vscsi, buf);
  165. } while (!virtqueue_enable_cb(vq));
  166. spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
  167. }
  168. static void virtscsi_req_done(struct virtqueue *vq)
  169. {
  170. struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
  171. struct virtio_scsi *vscsi = shost_priv(sh);
  172. int index = vq->index - VIRTIO_SCSI_VQ_BASE;
  173. struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
  174. virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
  175. };
  176. static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
  177. {
  178. int i, num_vqs;
  179. num_vqs = vscsi->num_queues;
  180. for (i = 0; i < num_vqs; i++)
  181. virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
  182. virtscsi_complete_cmd);
  183. }
  184. static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
  185. {
  186. struct virtio_scsi_cmd *cmd = buf;
  187. if (cmd->comp)
  188. complete(cmd->comp);
  189. }
  190. static void virtscsi_ctrl_done(struct virtqueue *vq)
  191. {
  192. struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
  193. struct virtio_scsi *vscsi = shost_priv(sh);
  194. virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
  195. };
  196. static void virtscsi_handle_event(struct work_struct *work);
  197. static int virtscsi_kick_event(struct virtio_scsi *vscsi,
  198. struct virtio_scsi_event_node *event_node)
  199. {
  200. int err;
  201. struct scatterlist sg;
  202. unsigned long flags;
  203. INIT_WORK(&event_node->work, virtscsi_handle_event);
  204. sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
  205. spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
  206. err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
  207. GFP_ATOMIC);
  208. if (!err)
  209. virtqueue_kick(vscsi->event_vq.vq);
  210. spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
  211. return err;
  212. }
  213. static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
  214. {
  215. int i;
  216. for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
  217. vscsi->event_list[i].vscsi = vscsi;
  218. virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
  219. }
  220. return 0;
  221. }
  222. static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
  223. {
  224. int i;
  225. /* Stop scheduling work before calling cancel_work_sync. */
  226. spin_lock_irq(&vscsi->event_vq.vq_lock);
  227. vscsi->stop_events = true;
  228. spin_unlock_irq(&vscsi->event_vq.vq_lock);
  229. for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
  230. cancel_work_sync(&vscsi->event_list[i].work);
  231. }
  232. static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
  233. struct virtio_scsi_event *event)
  234. {
  235. struct scsi_device *sdev;
  236. struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
  237. unsigned int target = event->lun[1];
  238. unsigned int lun = (event->lun[2] << 8) | event->lun[3];
  239. switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
  240. case VIRTIO_SCSI_EVT_RESET_RESCAN:
  241. if (lun == 0) {
  242. scsi_scan_target(&shost->shost_gendev, 0, target,
  243. SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
  244. } else {
  245. scsi_add_device(shost, 0, target, lun);
  246. }
  247. break;
  248. case VIRTIO_SCSI_EVT_RESET_REMOVED:
  249. sdev = scsi_device_lookup(shost, 0, target, lun);
  250. if (sdev) {
  251. scsi_remove_device(sdev);
  252. scsi_device_put(sdev);
  253. } else {
  254. pr_err("SCSI device %d 0 %d %d not found\n",
  255. shost->host_no, target, lun);
  256. }
  257. break;
  258. default:
  259. pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
  260. }
  261. }
  262. static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
  263. struct virtio_scsi_event *event)
  264. {
  265. struct scsi_device *sdev;
  266. struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
  267. unsigned int target = event->lun[1];
  268. unsigned int lun = (event->lun[2] << 8) | event->lun[3];
  269. u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
  270. u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
  271. sdev = scsi_device_lookup(shost, 0, target, lun);
  272. if (!sdev) {
  273. pr_err("SCSI device %d 0 %d %d not found\n",
  274. shost->host_no, target, lun);
  275. return;
  276. }
  277. /* Handle "Parameters changed", "Mode parameters changed", and
  278. "Capacity data has changed". */
  279. if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
  280. scsi_rescan_device(sdev);
  281. scsi_device_put(sdev);
  282. }
  283. static int virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
  284. {
  285. struct scsi_device *sdev;
  286. struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
  287. unsigned char scsi_cmd[MAX_COMMAND_SIZE];
  288. int result, inquiry_len, inq_result_len = 256;
  289. char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
  290. if (!inq_result)
  291. return -ENOMEM;
  292. shost_for_each_device(sdev, shost) {
  293. inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
  294. memset(scsi_cmd, 0, sizeof(scsi_cmd));
  295. scsi_cmd[0] = INQUIRY;
  296. scsi_cmd[4] = (unsigned char) inquiry_len;
  297. memset(inq_result, 0, inq_result_len);
  298. result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
  299. inq_result, inquiry_len,
  300. SD_TIMEOUT, SD_MAX_RETRIES, NULL);
  301. if (result == 0 && inq_result[0] >> 5) {
  302. /* PQ indicates the LUN is not attached */
  303. scsi_remove_device(sdev);
  304. } else if (result > 0 && host_byte(result) == DID_BAD_TARGET) {
  305. /*
  306. * If all LUNs of a virtio-scsi device are unplugged
  307. * it will respond with BAD TARGET on any INQUIRY
  308. * command.
  309. * Remove the device in this case as well.
  310. */
  311. scsi_remove_device(sdev);
  312. }
  313. }
  314. kfree(inq_result);
  315. return 0;
  316. }
  317. static void virtscsi_handle_event(struct work_struct *work)
  318. {
  319. struct virtio_scsi_event_node *event_node =
  320. container_of(work, struct virtio_scsi_event_node, work);
  321. struct virtio_scsi *vscsi = event_node->vscsi;
  322. struct virtio_scsi_event *event = &event_node->event;
  323. if (event->event &
  324. cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
  325. int ret;
  326. event->event &= ~cpu_to_virtio32(vscsi->vdev,
  327. VIRTIO_SCSI_T_EVENTS_MISSED);
  328. ret = virtscsi_rescan_hotunplug(vscsi);
  329. if (ret)
  330. return;
  331. scsi_scan_host(virtio_scsi_host(vscsi->vdev));
  332. }
  333. switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
  334. case VIRTIO_SCSI_T_NO_EVENT:
  335. break;
  336. case VIRTIO_SCSI_T_TRANSPORT_RESET:
  337. virtscsi_handle_transport_reset(vscsi, event);
  338. break;
  339. case VIRTIO_SCSI_T_PARAM_CHANGE:
  340. virtscsi_handle_param_change(vscsi, event);
  341. break;
  342. default:
  343. pr_err("Unsupported virtio scsi event %x\n", event->event);
  344. }
  345. virtscsi_kick_event(vscsi, event_node);
  346. }
  347. static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
  348. {
  349. struct virtio_scsi_event_node *event_node = buf;
  350. if (!vscsi->stop_events)
  351. queue_work(system_freezable_wq, &event_node->work);
  352. }
  353. static void virtscsi_event_done(struct virtqueue *vq)
  354. {
  355. struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
  356. struct virtio_scsi *vscsi = shost_priv(sh);
  357. virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
  358. };
  359. static int __virtscsi_add_cmd(struct virtqueue *vq,
  360. struct virtio_scsi_cmd *cmd,
  361. size_t req_size, size_t resp_size)
  362. {
  363. struct scsi_cmnd *sc = cmd->sc;
  364. struct scatterlist *sgs[6], req, resp;
  365. struct sg_table *out, *in;
  366. unsigned out_num = 0, in_num = 0;
  367. out = in = NULL;
  368. if (sc && sc->sc_data_direction != DMA_NONE) {
  369. if (sc->sc_data_direction != DMA_FROM_DEVICE)
  370. out = &sc->sdb.table;
  371. if (sc->sc_data_direction != DMA_TO_DEVICE)
  372. in = &sc->sdb.table;
  373. }
  374. /* Request header. */
  375. sg_init_one(&req, &cmd->req, req_size);
  376. sgs[out_num++] = &req;
  377. /* Data-out buffer. */
  378. if (out) {
  379. /* Place WRITE protection SGLs before Data OUT payload */
  380. if (scsi_prot_sg_count(sc))
  381. sgs[out_num++] = scsi_prot_sglist(sc);
  382. sgs[out_num++] = out->sgl;
  383. }
  384. /* Response header. */
  385. sg_init_one(&resp, &cmd->resp, resp_size);
  386. sgs[out_num + in_num++] = &resp;
  387. /* Data-in buffer */
  388. if (in) {
  389. /* Place READ protection SGLs before Data IN payload */
  390. if (scsi_prot_sg_count(sc))
  391. sgs[out_num + in_num++] = scsi_prot_sglist(sc);
  392. sgs[out_num + in_num++] = in->sgl;
  393. }
  394. return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
  395. }
  396. static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
  397. {
  398. bool needs_kick;
  399. unsigned long flags;
  400. spin_lock_irqsave(&vq->vq_lock, flags);
  401. needs_kick = virtqueue_kick_prepare(vq->vq);
  402. spin_unlock_irqrestore(&vq->vq_lock, flags);
  403. if (needs_kick)
  404. virtqueue_notify(vq->vq);
  405. }
  406. /**
  407. * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
  408. * @vq : the struct virtqueue we're talking about
  409. * @cmd : command structure
  410. * @req_size : size of the request buffer
  411. * @resp_size : size of the response buffer
  412. * @kick : whether to kick the virtqueue immediately
  413. */
  414. static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
  415. struct virtio_scsi_cmd *cmd,
  416. size_t req_size, size_t resp_size,
  417. bool kick)
  418. {
  419. unsigned long flags;
  420. int err;
  421. bool needs_kick = false;
  422. spin_lock_irqsave(&vq->vq_lock, flags);
  423. err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
  424. if (!err && kick)
  425. needs_kick = virtqueue_kick_prepare(vq->vq);
  426. spin_unlock_irqrestore(&vq->vq_lock, flags);
  427. if (needs_kick)
  428. virtqueue_notify(vq->vq);
  429. return err;
  430. }
  431. static void virtio_scsi_init_hdr(struct virtio_device *vdev,
  432. struct virtio_scsi_cmd_req *cmd,
  433. struct scsi_cmnd *sc)
  434. {
  435. cmd->lun[0] = 1;
  436. cmd->lun[1] = sc->device->id;
  437. cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
  438. cmd->lun[3] = sc->device->lun & 0xff;
  439. cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
  440. cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
  441. cmd->prio = 0;
  442. cmd->crn = 0;
  443. }
  444. #ifdef CONFIG_BLK_DEV_INTEGRITY
  445. static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
  446. struct virtio_scsi_cmd_req_pi *cmd_pi,
  447. struct scsi_cmnd *sc)
  448. {
  449. struct request *rq = scsi_cmd_to_rq(sc);
  450. struct blk_integrity *bi;
  451. virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
  452. if (!rq || !scsi_prot_sg_count(sc))
  453. return;
  454. bi = blk_get_integrity(rq->q->disk);
  455. if (sc->sc_data_direction == DMA_TO_DEVICE)
  456. cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
  457. bio_integrity_bytes(bi,
  458. blk_rq_sectors(rq)));
  459. else if (sc->sc_data_direction == DMA_FROM_DEVICE)
  460. cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
  461. bio_integrity_bytes(bi,
  462. blk_rq_sectors(rq)));
  463. }
  464. #endif
  465. static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
  466. struct scsi_cmnd *sc)
  467. {
  468. u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
  469. u16 hwq = blk_mq_unique_tag_to_hwq(tag);
  470. return &vscsi->req_vqs[hwq];
  471. }
  472. static int virtscsi_queuecommand(struct Scsi_Host *shost,
  473. struct scsi_cmnd *sc)
  474. {
  475. struct virtio_scsi *vscsi = shost_priv(shost);
  476. struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
  477. struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
  478. bool kick;
  479. unsigned long flags;
  480. int req_size;
  481. int ret;
  482. BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
  483. /* TODO: check feature bit and fail if unsupported? */
  484. BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
  485. dev_dbg(&sc->device->sdev_gendev,
  486. "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
  487. cmd->sc = sc;
  488. BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
  489. #ifdef CONFIG_BLK_DEV_INTEGRITY
  490. if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
  491. virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
  492. memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
  493. req_size = sizeof(cmd->req.cmd_pi);
  494. } else
  495. #endif
  496. {
  497. virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
  498. memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
  499. req_size = sizeof(cmd->req.cmd);
  500. }
  501. kick = (sc->flags & SCMD_LAST) != 0;
  502. ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
  503. if (ret == -EIO) {
  504. cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
  505. spin_lock_irqsave(&req_vq->vq_lock, flags);
  506. virtscsi_complete_cmd(vscsi, cmd);
  507. spin_unlock_irqrestore(&req_vq->vq_lock, flags);
  508. } else if (ret != 0) {
  509. return SCSI_MLQUEUE_HOST_BUSY;
  510. }
  511. return 0;
  512. }
  513. static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
  514. {
  515. DECLARE_COMPLETION_ONSTACK(comp);
  516. int ret = FAILED;
  517. cmd->comp = &comp;
  518. if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
  519. sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
  520. goto out;
  521. wait_for_completion(&comp);
  522. if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
  523. cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
  524. ret = SUCCESS;
  525. /*
  526. * The spec guarantees that all requests related to the TMF have
  527. * been completed, but the callback might not have run yet if
  528. * we're using independent interrupts (e.g. MSI). Poll the
  529. * virtqueues once.
  530. *
  531. * In the abort case, scsi_done() will do nothing, because the
  532. * command timed out and hence SCMD_STATE_COMPLETE has been set.
  533. */
  534. virtscsi_poll_requests(vscsi);
  535. out:
  536. mempool_free(cmd, virtscsi_cmd_pool);
  537. return ret;
  538. }
  539. static int virtscsi_device_reset(struct scsi_cmnd *sc)
  540. {
  541. struct virtio_scsi *vscsi = shost_priv(sc->device->host);
  542. struct virtio_scsi_cmd *cmd;
  543. sdev_printk(KERN_INFO, sc->device, "device reset\n");
  544. cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
  545. if (!cmd)
  546. return FAILED;
  547. memset(cmd, 0, sizeof(*cmd));
  548. cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
  549. .type = VIRTIO_SCSI_T_TMF,
  550. .subtype = cpu_to_virtio32(vscsi->vdev,
  551. VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
  552. .lun[0] = 1,
  553. .lun[1] = sc->device->id,
  554. .lun[2] = (sc->device->lun >> 8) | 0x40,
  555. .lun[3] = sc->device->lun & 0xff,
  556. };
  557. return virtscsi_tmf(vscsi, cmd);
  558. }
  559. static int virtscsi_device_alloc(struct scsi_device *sdevice)
  560. {
  561. /*
  562. * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
  563. * may have transfer limits which come from the host SCSI
  564. * controller or something on the host side other than the
  565. * target itself.
  566. *
  567. * To make this work properly, the hypervisor can adjust the
  568. * target's VPD information to advertise these limits. But
  569. * for that to work, the guest has to look at the VPD pages,
  570. * which we won't do by default if it is an SPC-2 device, even
  571. * if it does actually support it.
  572. *
  573. * So, set the blist to always try to read the VPD pages.
  574. */
  575. sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
  576. return 0;
  577. }
  578. /**
  579. * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
  580. * @sdev: Virtscsi target whose queue depth to change
  581. * @qdepth: New queue depth
  582. */
  583. static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
  584. {
  585. struct Scsi_Host *shost = sdev->host;
  586. int max_depth = shost->cmd_per_lun;
  587. return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
  588. }
  589. static int virtscsi_abort(struct scsi_cmnd *sc)
  590. {
  591. struct virtio_scsi *vscsi = shost_priv(sc->device->host);
  592. struct virtio_scsi_cmd *cmd;
  593. scmd_printk(KERN_INFO, sc, "abort\n");
  594. cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
  595. if (!cmd)
  596. return FAILED;
  597. memset(cmd, 0, sizeof(*cmd));
  598. cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
  599. .type = VIRTIO_SCSI_T_TMF,
  600. .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
  601. .lun[0] = 1,
  602. .lun[1] = sc->device->id,
  603. .lun[2] = (sc->device->lun >> 8) | 0x40,
  604. .lun[3] = sc->device->lun & 0xff,
  605. .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
  606. };
  607. return virtscsi_tmf(vscsi, cmd);
  608. }
  609. static void virtscsi_map_queues(struct Scsi_Host *shost)
  610. {
  611. struct virtio_scsi *vscsi = shost_priv(shost);
  612. int i, qoff;
  613. for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
  614. struct blk_mq_queue_map *map = &shost->tag_set.map[i];
  615. map->nr_queues = vscsi->io_queues[i];
  616. map->queue_offset = qoff;
  617. qoff += map->nr_queues;
  618. if (map->nr_queues == 0)
  619. continue;
  620. /*
  621. * Regular queues have interrupts and hence CPU affinity is
  622. * defined by the core virtio code, but polling queues have
  623. * no interrupts so we let the block layer assign CPU affinity.
  624. */
  625. if (i == HCTX_TYPE_POLL)
  626. blk_mq_map_queues(map);
  627. else
  628. blk_mq_virtio_map_queues(map, vscsi->vdev, 2);
  629. }
  630. }
  631. static int virtscsi_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
  632. {
  633. struct virtio_scsi *vscsi = shost_priv(shost);
  634. struct virtio_scsi_vq *virtscsi_vq = &vscsi->req_vqs[queue_num];
  635. unsigned long flags;
  636. unsigned int len;
  637. int found = 0;
  638. void *buf;
  639. spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
  640. while ((buf = virtqueue_get_buf(virtscsi_vq->vq, &len)) != NULL) {
  641. virtscsi_complete_cmd(vscsi, buf);
  642. found++;
  643. }
  644. spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
  645. return found;
  646. }
  647. static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
  648. {
  649. struct virtio_scsi *vscsi = shost_priv(shost);
  650. virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
  651. }
  652. /*
  653. * The host guarantees to respond to each command, although I/O
  654. * latencies might be higher than on bare metal. Reset the timer
  655. * unconditionally to give the host a chance to perform EH.
  656. */
  657. static enum scsi_timeout_action virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
  658. {
  659. return SCSI_EH_RESET_TIMER;
  660. }
  661. static const struct scsi_host_template virtscsi_host_template = {
  662. .module = THIS_MODULE,
  663. .name = "Virtio SCSI HBA",
  664. .proc_name = "virtio_scsi",
  665. .this_id = -1,
  666. .cmd_size = sizeof(struct virtio_scsi_cmd),
  667. .queuecommand = virtscsi_queuecommand,
  668. .mq_poll = virtscsi_mq_poll,
  669. .commit_rqs = virtscsi_commit_rqs,
  670. .change_queue_depth = virtscsi_change_queue_depth,
  671. .eh_abort_handler = virtscsi_abort,
  672. .eh_device_reset_handler = virtscsi_device_reset,
  673. .eh_timed_out = virtscsi_eh_timed_out,
  674. .slave_alloc = virtscsi_device_alloc,
  675. .dma_boundary = UINT_MAX,
  676. .map_queues = virtscsi_map_queues,
  677. .track_queue_depth = 1,
  678. };
  679. #define virtscsi_config_get(vdev, fld) \
  680. ({ \
  681. __virtio_native_type(struct virtio_scsi_config, fld) __val; \
  682. virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
  683. __val; \
  684. })
  685. #define virtscsi_config_set(vdev, fld, val) \
  686. do { \
  687. __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
  688. virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
  689. } while(0)
  690. static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
  691. struct virtqueue *vq)
  692. {
  693. spin_lock_init(&virtscsi_vq->vq_lock);
  694. virtscsi_vq->vq = vq;
  695. }
  696. static void virtscsi_remove_vqs(struct virtio_device *vdev)
  697. {
  698. /* Stop all the virtqueues. */
  699. virtio_reset_device(vdev);
  700. vdev->config->del_vqs(vdev);
  701. }
  702. static int virtscsi_init(struct virtio_device *vdev,
  703. struct virtio_scsi *vscsi)
  704. {
  705. int err;
  706. u32 i;
  707. u32 num_vqs, num_poll_vqs, num_req_vqs;
  708. struct virtqueue_info *vqs_info;
  709. struct virtqueue **vqs;
  710. struct irq_affinity desc = { .pre_vectors = 2 };
  711. num_req_vqs = vscsi->num_queues;
  712. num_vqs = num_req_vqs + VIRTIO_SCSI_VQ_BASE;
  713. vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
  714. vqs_info = kcalloc(num_vqs, sizeof(*vqs_info), GFP_KERNEL);
  715. if (!vqs || !vqs_info) {
  716. err = -ENOMEM;
  717. goto out;
  718. }
  719. num_poll_vqs = min_t(unsigned int, virtscsi_poll_queues,
  720. num_req_vqs - 1);
  721. vscsi->io_queues[HCTX_TYPE_DEFAULT] = num_req_vqs - num_poll_vqs;
  722. vscsi->io_queues[HCTX_TYPE_READ] = 0;
  723. vscsi->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
  724. dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
  725. vscsi->io_queues[HCTX_TYPE_DEFAULT],
  726. vscsi->io_queues[HCTX_TYPE_READ],
  727. vscsi->io_queues[HCTX_TYPE_POLL]);
  728. vqs_info[0].callback = virtscsi_ctrl_done;
  729. vqs_info[0].name = "control";
  730. vqs_info[1].callback = virtscsi_event_done;
  731. vqs_info[1].name = "event";
  732. for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs - num_poll_vqs; i++) {
  733. vqs_info[i].callback = virtscsi_req_done;
  734. vqs_info[i].name = "request";
  735. }
  736. for (; i < num_vqs; i++)
  737. vqs_info[i].name = "request_poll";
  738. /* Discover virtqueues and write information to configuration. */
  739. err = virtio_find_vqs(vdev, num_vqs, vqs, vqs_info, &desc);
  740. if (err)
  741. goto out;
  742. virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
  743. virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
  744. for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
  745. virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
  746. vqs[i]);
  747. virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
  748. virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
  749. err = 0;
  750. out:
  751. kfree(vqs_info);
  752. kfree(vqs);
  753. if (err)
  754. virtscsi_remove_vqs(vdev);
  755. return err;
  756. }
  757. static int virtscsi_probe(struct virtio_device *vdev)
  758. {
  759. struct Scsi_Host *shost;
  760. struct virtio_scsi *vscsi;
  761. int err;
  762. u32 sg_elems, num_targets;
  763. u32 cmd_per_lun;
  764. u32 num_queues;
  765. if (!vdev->config->get) {
  766. dev_err(&vdev->dev, "%s failure: config access disabled\n",
  767. __func__);
  768. return -EINVAL;
  769. }
  770. /* We need to know how many queues before we allocate. */
  771. num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
  772. num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
  773. num_targets = virtscsi_config_get(vdev, max_target) + 1;
  774. shost = scsi_host_alloc(&virtscsi_host_template,
  775. struct_size(vscsi, req_vqs, num_queues));
  776. if (!shost)
  777. return -ENOMEM;
  778. sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
  779. shost->sg_tablesize = sg_elems;
  780. shost->nr_maps = 1;
  781. vscsi = shost_priv(shost);
  782. vscsi->vdev = vdev;
  783. vscsi->num_queues = num_queues;
  784. vdev->priv = shost;
  785. err = virtscsi_init(vdev, vscsi);
  786. if (err)
  787. goto virtscsi_init_failed;
  788. if (vscsi->io_queues[HCTX_TYPE_POLL])
  789. shost->nr_maps = HCTX_TYPE_POLL + 1;
  790. shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
  791. cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
  792. shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
  793. shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
  794. /* LUNs > 256 are reported with format 1, so they go in the range
  795. * 16640-32767.
  796. */
  797. shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
  798. shost->max_id = num_targets;
  799. shost->max_channel = 0;
  800. shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
  801. shost->nr_hw_queues = num_queues;
  802. #ifdef CONFIG_BLK_DEV_INTEGRITY
  803. if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
  804. int host_prot;
  805. host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
  806. SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
  807. SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
  808. scsi_host_set_prot(shost, host_prot);
  809. scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
  810. }
  811. #endif
  812. err = scsi_add_host(shost, &vdev->dev);
  813. if (err)
  814. goto scsi_add_host_failed;
  815. virtio_device_ready(vdev);
  816. if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
  817. virtscsi_kick_event_all(vscsi);
  818. scsi_scan_host(shost);
  819. return 0;
  820. scsi_add_host_failed:
  821. vdev->config->del_vqs(vdev);
  822. virtscsi_init_failed:
  823. scsi_host_put(shost);
  824. return err;
  825. }
  826. static void virtscsi_remove(struct virtio_device *vdev)
  827. {
  828. struct Scsi_Host *shost = virtio_scsi_host(vdev);
  829. struct virtio_scsi *vscsi = shost_priv(shost);
  830. if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
  831. virtscsi_cancel_event_work(vscsi);
  832. scsi_remove_host(shost);
  833. virtscsi_remove_vqs(vdev);
  834. scsi_host_put(shost);
  835. }
  836. #ifdef CONFIG_PM_SLEEP
  837. static int virtscsi_freeze(struct virtio_device *vdev)
  838. {
  839. virtscsi_remove_vqs(vdev);
  840. return 0;
  841. }
  842. static int virtscsi_restore(struct virtio_device *vdev)
  843. {
  844. struct Scsi_Host *sh = virtio_scsi_host(vdev);
  845. struct virtio_scsi *vscsi = shost_priv(sh);
  846. int err;
  847. err = virtscsi_init(vdev, vscsi);
  848. if (err)
  849. return err;
  850. virtio_device_ready(vdev);
  851. if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
  852. virtscsi_kick_event_all(vscsi);
  853. return err;
  854. }
  855. #endif
  856. static struct virtio_device_id id_table[] = {
  857. { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
  858. { 0 },
  859. };
  860. static unsigned int features[] = {
  861. VIRTIO_SCSI_F_HOTPLUG,
  862. VIRTIO_SCSI_F_CHANGE,
  863. #ifdef CONFIG_BLK_DEV_INTEGRITY
  864. VIRTIO_SCSI_F_T10_PI,
  865. #endif
  866. };
  867. static struct virtio_driver virtio_scsi_driver = {
  868. .feature_table = features,
  869. .feature_table_size = ARRAY_SIZE(features),
  870. .driver.name = KBUILD_MODNAME,
  871. .id_table = id_table,
  872. .probe = virtscsi_probe,
  873. #ifdef CONFIG_PM_SLEEP
  874. .freeze = virtscsi_freeze,
  875. .restore = virtscsi_restore,
  876. #endif
  877. .remove = virtscsi_remove,
  878. };
  879. static int __init virtio_scsi_init(void)
  880. {
  881. int ret = -ENOMEM;
  882. virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
  883. if (!virtscsi_cmd_cache) {
  884. pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
  885. goto error;
  886. }
  887. virtscsi_cmd_pool =
  888. mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
  889. virtscsi_cmd_cache);
  890. if (!virtscsi_cmd_pool) {
  891. pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
  892. goto error;
  893. }
  894. ret = register_virtio_driver(&virtio_scsi_driver);
  895. if (ret < 0)
  896. goto error;
  897. return 0;
  898. error:
  899. mempool_destroy(virtscsi_cmd_pool);
  900. virtscsi_cmd_pool = NULL;
  901. kmem_cache_destroy(virtscsi_cmd_cache);
  902. virtscsi_cmd_cache = NULL;
  903. return ret;
  904. }
  905. static void __exit virtio_scsi_fini(void)
  906. {
  907. unregister_virtio_driver(&virtio_scsi_driver);
  908. mempool_destroy(virtscsi_cmd_pool);
  909. kmem_cache_destroy(virtscsi_cmd_cache);
  910. }
  911. module_init(virtio_scsi_init);
  912. module_exit(virtio_scsi_fini);
  913. MODULE_DEVICE_TABLE(virtio, id_table);
  914. MODULE_DESCRIPTION("Virtio SCSI HBA driver");
  915. MODULE_LICENSE("GPL");