virtio_blk.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. //#define DEBUG
  3. #include <linux/spinlock.h>
  4. #include <linux/slab.h>
  5. #include <linux/blkdev.h>
  6. #include <linux/hdreg.h>
  7. #include <linux/module.h>
  8. #include <linux/mutex.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/virtio.h>
  11. #include <linux/virtio_blk.h>
  12. #include <linux/scatterlist.h>
  13. #include <linux/string_helpers.h>
  14. #include <linux/idr.h>
  15. #include <linux/blk-mq.h>
  16. #include <linux/blk-mq-virtio.h>
  17. #include <linux/numa.h>
  18. #include <linux/vmalloc.h>
  19. #include <uapi/linux/virtio_ring.h>
  20. #define PART_BITS 4
  21. #define VQ_NAME_LEN 16
  22. #define MAX_DISCARD_SEGMENTS 256u
  23. /* The maximum number of sg elements that fit into a virtqueue */
  24. #define VIRTIO_BLK_MAX_SG_ELEMS 32768
  25. #ifdef CONFIG_ARCH_NO_SG_CHAIN
  26. #define VIRTIO_BLK_INLINE_SG_CNT 0
  27. #else
  28. #define VIRTIO_BLK_INLINE_SG_CNT 2
  29. #endif
  30. static unsigned int num_request_queues;
  31. module_param(num_request_queues, uint, 0644);
  32. MODULE_PARM_DESC(num_request_queues,
  33. "Limit the number of request queues to use for blk device. "
  34. "0 for no limit. "
  35. "Values > nr_cpu_ids truncated to nr_cpu_ids.");
  36. static unsigned int poll_queues;
  37. module_param(poll_queues, uint, 0644);
  38. MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O");
  39. static int major;
  40. static DEFINE_IDA(vd_index_ida);
  41. static struct workqueue_struct *virtblk_wq;
  42. struct virtio_blk_vq {
  43. struct virtqueue *vq;
  44. spinlock_t lock;
  45. char name[VQ_NAME_LEN];
  46. } ____cacheline_aligned_in_smp;
  47. struct virtio_blk {
  48. /*
  49. * This mutex must be held by anything that may run after
  50. * virtblk_remove() sets vblk->vdev to NULL.
  51. *
  52. * blk-mq, virtqueue processing, and sysfs attribute code paths are
  53. * shut down before vblk->vdev is set to NULL and therefore do not need
  54. * to hold this mutex.
  55. */
  56. struct mutex vdev_mutex;
  57. struct virtio_device *vdev;
  58. /* The disk structure for the kernel. */
  59. struct gendisk *disk;
  60. /* Block layer tags. */
  61. struct blk_mq_tag_set tag_set;
  62. /* Process context for config space updates */
  63. struct work_struct config_work;
  64. /* Ida index - used to track minor number allocations. */
  65. int index;
  66. /* num of vqs */
  67. int num_vqs;
  68. int io_queues[HCTX_MAX_TYPES];
  69. struct virtio_blk_vq *vqs;
  70. /* For zoned device */
  71. unsigned int zone_sectors;
  72. };
  73. struct virtblk_req {
  74. /* Out header */
  75. struct virtio_blk_outhdr out_hdr;
  76. /* In header */
  77. union {
  78. u8 status;
  79. /*
  80. * The zone append command has an extended in header.
  81. * The status field in zone_append_in_hdr must always
  82. * be the last byte.
  83. */
  84. struct {
  85. __virtio64 sector;
  86. u8 status;
  87. } zone_append;
  88. } in_hdr;
  89. size_t in_hdr_len;
  90. struct sg_table sg_table;
  91. struct scatterlist sg[];
  92. };
  93. static inline blk_status_t virtblk_result(u8 status)
  94. {
  95. switch (status) {
  96. case VIRTIO_BLK_S_OK:
  97. return BLK_STS_OK;
  98. case VIRTIO_BLK_S_UNSUPP:
  99. return BLK_STS_NOTSUPP;
  100. case VIRTIO_BLK_S_ZONE_OPEN_RESOURCE:
  101. return BLK_STS_ZONE_OPEN_RESOURCE;
  102. case VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE:
  103. return BLK_STS_ZONE_ACTIVE_RESOURCE;
  104. case VIRTIO_BLK_S_IOERR:
  105. case VIRTIO_BLK_S_ZONE_UNALIGNED_WP:
  106. default:
  107. return BLK_STS_IOERR;
  108. }
  109. }
  110. static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
  111. {
  112. struct virtio_blk *vblk = hctx->queue->queuedata;
  113. struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
  114. return vq;
  115. }
  116. static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
  117. {
  118. struct scatterlist out_hdr, in_hdr, *sgs[3];
  119. unsigned int num_out = 0, num_in = 0;
  120. sg_init_one(&out_hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
  121. sgs[num_out++] = &out_hdr;
  122. if (vbr->sg_table.nents) {
  123. if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
  124. sgs[num_out++] = vbr->sg_table.sgl;
  125. else
  126. sgs[num_out + num_in++] = vbr->sg_table.sgl;
  127. }
  128. sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len);
  129. sgs[num_out + num_in++] = &in_hdr;
  130. return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
  131. }
  132. static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool unmap)
  133. {
  134. unsigned short segments = blk_rq_nr_discard_segments(req);
  135. unsigned short n = 0;
  136. struct virtio_blk_discard_write_zeroes *range;
  137. struct bio *bio;
  138. u32 flags = 0;
  139. if (unmap)
  140. flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
  141. range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
  142. if (!range)
  143. return -ENOMEM;
  144. /*
  145. * Single max discard segment means multi-range discard isn't
  146. * supported, and block layer only runs contiguity merge like
  147. * normal RW request. So we can't reply on bio for retrieving
  148. * each range info.
  149. */
  150. if (queue_max_discard_segments(req->q) == 1) {
  151. range[0].flags = cpu_to_le32(flags);
  152. range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
  153. range[0].sector = cpu_to_le64(blk_rq_pos(req));
  154. n = 1;
  155. } else {
  156. __rq_for_each_bio(bio, req) {
  157. u64 sector = bio->bi_iter.bi_sector;
  158. u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
  159. range[n].flags = cpu_to_le32(flags);
  160. range[n].num_sectors = cpu_to_le32(num_sectors);
  161. range[n].sector = cpu_to_le64(sector);
  162. n++;
  163. }
  164. }
  165. WARN_ON_ONCE(n != segments);
  166. bvec_set_virt(&req->special_vec, range, sizeof(*range) * segments);
  167. req->rq_flags |= RQF_SPECIAL_PAYLOAD;
  168. return 0;
  169. }
  170. static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
  171. {
  172. if (blk_rq_nr_phys_segments(req))
  173. sg_free_table_chained(&vbr->sg_table,
  174. VIRTIO_BLK_INLINE_SG_CNT);
  175. }
  176. static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
  177. struct virtblk_req *vbr)
  178. {
  179. int err;
  180. if (!blk_rq_nr_phys_segments(req))
  181. return 0;
  182. vbr->sg_table.sgl = vbr->sg;
  183. err = sg_alloc_table_chained(&vbr->sg_table,
  184. blk_rq_nr_phys_segments(req),
  185. vbr->sg_table.sgl,
  186. VIRTIO_BLK_INLINE_SG_CNT);
  187. if (unlikely(err))
  188. return -ENOMEM;
  189. return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
  190. }
  191. static void virtblk_cleanup_cmd(struct request *req)
  192. {
  193. if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
  194. kfree(bvec_virt(&req->special_vec));
  195. }
  196. static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
  197. struct request *req,
  198. struct virtblk_req *vbr)
  199. {
  200. size_t in_hdr_len = sizeof(vbr->in_hdr.status);
  201. bool unmap = false;
  202. u32 type;
  203. u64 sector = 0;
  204. if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req)))
  205. return BLK_STS_NOTSUPP;
  206. /* Set fields for all request types */
  207. vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
  208. switch (req_op(req)) {
  209. case REQ_OP_READ:
  210. type = VIRTIO_BLK_T_IN;
  211. sector = blk_rq_pos(req);
  212. break;
  213. case REQ_OP_WRITE:
  214. type = VIRTIO_BLK_T_OUT;
  215. sector = blk_rq_pos(req);
  216. break;
  217. case REQ_OP_FLUSH:
  218. type = VIRTIO_BLK_T_FLUSH;
  219. break;
  220. case REQ_OP_DISCARD:
  221. type = VIRTIO_BLK_T_DISCARD;
  222. break;
  223. case REQ_OP_WRITE_ZEROES:
  224. type = VIRTIO_BLK_T_WRITE_ZEROES;
  225. unmap = !(req->cmd_flags & REQ_NOUNMAP);
  226. break;
  227. case REQ_OP_SECURE_ERASE:
  228. type = VIRTIO_BLK_T_SECURE_ERASE;
  229. break;
  230. case REQ_OP_ZONE_OPEN:
  231. type = VIRTIO_BLK_T_ZONE_OPEN;
  232. sector = blk_rq_pos(req);
  233. break;
  234. case REQ_OP_ZONE_CLOSE:
  235. type = VIRTIO_BLK_T_ZONE_CLOSE;
  236. sector = blk_rq_pos(req);
  237. break;
  238. case REQ_OP_ZONE_FINISH:
  239. type = VIRTIO_BLK_T_ZONE_FINISH;
  240. sector = blk_rq_pos(req);
  241. break;
  242. case REQ_OP_ZONE_APPEND:
  243. type = VIRTIO_BLK_T_ZONE_APPEND;
  244. sector = blk_rq_pos(req);
  245. in_hdr_len = sizeof(vbr->in_hdr.zone_append);
  246. break;
  247. case REQ_OP_ZONE_RESET:
  248. type = VIRTIO_BLK_T_ZONE_RESET;
  249. sector = blk_rq_pos(req);
  250. break;
  251. case REQ_OP_ZONE_RESET_ALL:
  252. type = VIRTIO_BLK_T_ZONE_RESET_ALL;
  253. break;
  254. case REQ_OP_DRV_IN:
  255. /*
  256. * Out header has already been prepared by the caller (virtblk_get_id()
  257. * or virtblk_submit_zone_report()), nothing to do here.
  258. */
  259. return 0;
  260. default:
  261. WARN_ON_ONCE(1);
  262. return BLK_STS_IOERR;
  263. }
  264. /* Set fields for non-REQ_OP_DRV_IN request types */
  265. vbr->in_hdr_len = in_hdr_len;
  266. vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
  267. vbr->out_hdr.sector = cpu_to_virtio64(vdev, sector);
  268. if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES ||
  269. type == VIRTIO_BLK_T_SECURE_ERASE) {
  270. if (virtblk_setup_discard_write_zeroes_erase(req, unmap))
  271. return BLK_STS_RESOURCE;
  272. }
  273. return 0;
  274. }
  275. /*
  276. * The status byte is always the last byte of the virtblk request
  277. * in-header. This helper fetches its value for all in-header formats
  278. * that are currently defined.
  279. */
  280. static inline u8 virtblk_vbr_status(struct virtblk_req *vbr)
  281. {
  282. return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1);
  283. }
  284. static inline void virtblk_request_done(struct request *req)
  285. {
  286. struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
  287. blk_status_t status = virtblk_result(virtblk_vbr_status(vbr));
  288. struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
  289. virtblk_unmap_data(req, vbr);
  290. virtblk_cleanup_cmd(req);
  291. if (req_op(req) == REQ_OP_ZONE_APPEND)
  292. req->__sector = virtio64_to_cpu(vblk->vdev,
  293. vbr->in_hdr.zone_append.sector);
  294. blk_mq_end_request(req, status);
  295. }
  296. static void virtblk_done(struct virtqueue *vq)
  297. {
  298. struct virtio_blk *vblk = vq->vdev->priv;
  299. bool req_done = false;
  300. int qid = vq->index;
  301. struct virtblk_req *vbr;
  302. unsigned long flags;
  303. unsigned int len;
  304. spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
  305. do {
  306. virtqueue_disable_cb(vq);
  307. while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
  308. struct request *req = blk_mq_rq_from_pdu(vbr);
  309. if (likely(!blk_should_fake_timeout(req->q)))
  310. blk_mq_complete_request(req);
  311. req_done = true;
  312. }
  313. } while (!virtqueue_enable_cb(vq));
  314. /* In case queue is stopped waiting for more buffers. */
  315. if (req_done)
  316. blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
  317. spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
  318. }
  319. static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
  320. {
  321. struct virtio_blk *vblk = hctx->queue->queuedata;
  322. struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
  323. bool kick;
  324. spin_lock_irq(&vq->lock);
  325. kick = virtqueue_kick_prepare(vq->vq);
  326. spin_unlock_irq(&vq->lock);
  327. if (kick)
  328. virtqueue_notify(vq->vq);
  329. }
  330. static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
  331. {
  332. virtblk_cleanup_cmd(req);
  333. switch (rc) {
  334. case -ENOSPC:
  335. return BLK_STS_DEV_RESOURCE;
  336. case -ENOMEM:
  337. return BLK_STS_RESOURCE;
  338. default:
  339. return BLK_STS_IOERR;
  340. }
  341. }
  342. static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
  343. struct virtio_blk *vblk,
  344. struct request *req,
  345. struct virtblk_req *vbr)
  346. {
  347. blk_status_t status;
  348. int num;
  349. status = virtblk_setup_cmd(vblk->vdev, req, vbr);
  350. if (unlikely(status))
  351. return status;
  352. num = virtblk_map_data(hctx, req, vbr);
  353. if (unlikely(num < 0))
  354. return virtblk_fail_to_queue(req, -ENOMEM);
  355. vbr->sg_table.nents = num;
  356. blk_mq_start_request(req);
  357. return BLK_STS_OK;
  358. }
  359. static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
  360. const struct blk_mq_queue_data *bd)
  361. {
  362. struct virtio_blk *vblk = hctx->queue->queuedata;
  363. struct request *req = bd->rq;
  364. struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
  365. unsigned long flags;
  366. int qid = hctx->queue_num;
  367. bool notify = false;
  368. blk_status_t status;
  369. int err;
  370. status = virtblk_prep_rq(hctx, vblk, req, vbr);
  371. if (unlikely(status))
  372. return status;
  373. spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
  374. err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
  375. if (err) {
  376. virtqueue_kick(vblk->vqs[qid].vq);
  377. /* Don't stop the queue if -ENOMEM: we may have failed to
  378. * bounce the buffer due to global resource outage.
  379. */
  380. if (err == -ENOSPC)
  381. blk_mq_stop_hw_queue(hctx);
  382. spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
  383. virtblk_unmap_data(req, vbr);
  384. return virtblk_fail_to_queue(req, err);
  385. }
  386. if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
  387. notify = true;
  388. spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
  389. if (notify)
  390. virtqueue_notify(vblk->vqs[qid].vq);
  391. return BLK_STS_OK;
  392. }
  393. static bool virtblk_prep_rq_batch(struct request *req)
  394. {
  395. struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
  396. struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
  397. return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
  398. }
  399. static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
  400. struct rq_list *rqlist)
  401. {
  402. struct request *req;
  403. unsigned long flags;
  404. bool kick;
  405. spin_lock_irqsave(&vq->lock, flags);
  406. while ((req = rq_list_pop(rqlist))) {
  407. struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
  408. int err;
  409. err = virtblk_add_req(vq->vq, vbr);
  410. if (err) {
  411. virtblk_unmap_data(req, vbr);
  412. virtblk_cleanup_cmd(req);
  413. blk_mq_requeue_request(req, true);
  414. }
  415. }
  416. kick = virtqueue_kick_prepare(vq->vq);
  417. spin_unlock_irqrestore(&vq->lock, flags);
  418. if (kick)
  419. virtqueue_notify(vq->vq);
  420. }
  421. static void virtio_queue_rqs(struct rq_list *rqlist)
  422. {
  423. struct rq_list submit_list = { };
  424. struct rq_list requeue_list = { };
  425. struct virtio_blk_vq *vq = NULL;
  426. struct request *req;
  427. while ((req = rq_list_pop(rqlist))) {
  428. struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx);
  429. if (vq && vq != this_vq)
  430. virtblk_add_req_batch(vq, &submit_list);
  431. vq = this_vq;
  432. if (virtblk_prep_rq_batch(req))
  433. rq_list_add_tail(&submit_list, req);
  434. else
  435. rq_list_add_tail(&requeue_list, req);
  436. }
  437. if (vq)
  438. virtblk_add_req_batch(vq, &submit_list);
  439. *rqlist = requeue_list;
  440. }
  441. #ifdef CONFIG_BLK_DEV_ZONED
  442. static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
  443. unsigned int nr_zones,
  444. size_t *buflen)
  445. {
  446. struct request_queue *q = vblk->disk->queue;
  447. size_t bufsize;
  448. void *buf;
  449. nr_zones = min_t(unsigned int, nr_zones,
  450. get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors));
  451. bufsize = sizeof(struct virtio_blk_zone_report) +
  452. nr_zones * sizeof(struct virtio_blk_zone_descriptor);
  453. bufsize = min_t(size_t, bufsize,
  454. queue_max_hw_sectors(q) << SECTOR_SHIFT);
  455. bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
  456. while (bufsize >= sizeof(struct virtio_blk_zone_report)) {
  457. buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
  458. if (buf) {
  459. *buflen = bufsize;
  460. return buf;
  461. }
  462. bufsize >>= 1;
  463. }
  464. return NULL;
  465. }
  466. static int virtblk_submit_zone_report(struct virtio_blk *vblk,
  467. char *report_buf, size_t report_len,
  468. sector_t sector)
  469. {
  470. struct request_queue *q = vblk->disk->queue;
  471. struct request *req;
  472. struct virtblk_req *vbr;
  473. int err;
  474. req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
  475. if (IS_ERR(req))
  476. return PTR_ERR(req);
  477. vbr = blk_mq_rq_to_pdu(req);
  478. vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
  479. vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
  480. vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
  481. err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL);
  482. if (err)
  483. goto out;
  484. blk_execute_rq(req, false);
  485. err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
  486. out:
  487. blk_mq_free_request(req);
  488. return err;
  489. }
  490. static int virtblk_parse_zone(struct virtio_blk *vblk,
  491. struct virtio_blk_zone_descriptor *entry,
  492. unsigned int idx, report_zones_cb cb, void *data)
  493. {
  494. struct blk_zone zone = { };
  495. zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start);
  496. if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk))
  497. zone.len = vblk->zone_sectors;
  498. else
  499. zone.len = get_capacity(vblk->disk) - zone.start;
  500. zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap);
  501. zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp);
  502. switch (entry->z_type) {
  503. case VIRTIO_BLK_ZT_SWR:
  504. zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
  505. break;
  506. case VIRTIO_BLK_ZT_SWP:
  507. zone.type = BLK_ZONE_TYPE_SEQWRITE_PREF;
  508. break;
  509. case VIRTIO_BLK_ZT_CONV:
  510. zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
  511. break;
  512. default:
  513. dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n",
  514. zone.start, entry->z_type);
  515. return -EIO;
  516. }
  517. switch (entry->z_state) {
  518. case VIRTIO_BLK_ZS_EMPTY:
  519. zone.cond = BLK_ZONE_COND_EMPTY;
  520. break;
  521. case VIRTIO_BLK_ZS_CLOSED:
  522. zone.cond = BLK_ZONE_COND_CLOSED;
  523. break;
  524. case VIRTIO_BLK_ZS_FULL:
  525. zone.cond = BLK_ZONE_COND_FULL;
  526. zone.wp = zone.start + zone.len;
  527. break;
  528. case VIRTIO_BLK_ZS_EOPEN:
  529. zone.cond = BLK_ZONE_COND_EXP_OPEN;
  530. break;
  531. case VIRTIO_BLK_ZS_IOPEN:
  532. zone.cond = BLK_ZONE_COND_IMP_OPEN;
  533. break;
  534. case VIRTIO_BLK_ZS_NOT_WP:
  535. zone.cond = BLK_ZONE_COND_NOT_WP;
  536. break;
  537. case VIRTIO_BLK_ZS_RDONLY:
  538. zone.cond = BLK_ZONE_COND_READONLY;
  539. zone.wp = ULONG_MAX;
  540. break;
  541. case VIRTIO_BLK_ZS_OFFLINE:
  542. zone.cond = BLK_ZONE_COND_OFFLINE;
  543. zone.wp = ULONG_MAX;
  544. break;
  545. default:
  546. dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n",
  547. zone.start, entry->z_state);
  548. return -EIO;
  549. }
  550. /*
  551. * The callback below checks the validity of the reported
  552. * entry data, no need to further validate it here.
  553. */
  554. return cb(&zone, idx, data);
  555. }
  556. static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
  557. unsigned int nr_zones, report_zones_cb cb,
  558. void *data)
  559. {
  560. struct virtio_blk *vblk = disk->private_data;
  561. struct virtio_blk_zone_report *report;
  562. unsigned long long nz, i;
  563. size_t buflen;
  564. unsigned int zone_idx = 0;
  565. int ret;
  566. if (WARN_ON_ONCE(!vblk->zone_sectors))
  567. return -EOPNOTSUPP;
  568. report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen);
  569. if (!report)
  570. return -ENOMEM;
  571. mutex_lock(&vblk->vdev_mutex);
  572. if (!vblk->vdev) {
  573. ret = -ENXIO;
  574. goto fail_report;
  575. }
  576. while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) {
  577. memset(report, 0, buflen);
  578. ret = virtblk_submit_zone_report(vblk, (char *)report,
  579. buflen, sector);
  580. if (ret)
  581. goto fail_report;
  582. nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones),
  583. nr_zones);
  584. if (!nz)
  585. break;
  586. for (i = 0; i < nz && zone_idx < nr_zones; i++) {
  587. ret = virtblk_parse_zone(vblk, &report->zones[i],
  588. zone_idx, cb, data);
  589. if (ret)
  590. goto fail_report;
  591. sector = virtio64_to_cpu(vblk->vdev,
  592. report->zones[i].z_start) +
  593. vblk->zone_sectors;
  594. zone_idx++;
  595. }
  596. }
  597. if (zone_idx > 0)
  598. ret = zone_idx;
  599. else
  600. ret = -EINVAL;
  601. fail_report:
  602. mutex_unlock(&vblk->vdev_mutex);
  603. kvfree(report);
  604. return ret;
  605. }
  606. static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
  607. struct queue_limits *lim)
  608. {
  609. struct virtio_device *vdev = vblk->vdev;
  610. u32 v, wg;
  611. dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
  612. lim->features |= BLK_FEAT_ZONED;
  613. virtio_cread(vdev, struct virtio_blk_config,
  614. zoned.max_open_zones, &v);
  615. lim->max_open_zones = v;
  616. dev_dbg(&vdev->dev, "max open zones = %u\n", v);
  617. virtio_cread(vdev, struct virtio_blk_config,
  618. zoned.max_active_zones, &v);
  619. lim->max_active_zones = v;
  620. dev_dbg(&vdev->dev, "max active zones = %u\n", v);
  621. virtio_cread(vdev, struct virtio_blk_config,
  622. zoned.write_granularity, &wg);
  623. if (!wg) {
  624. dev_warn(&vdev->dev, "zero write granularity reported\n");
  625. return -ENODEV;
  626. }
  627. lim->physical_block_size = wg;
  628. lim->io_min = wg;
  629. dev_dbg(&vdev->dev, "write granularity = %u\n", wg);
  630. /*
  631. * virtio ZBD specification doesn't require zones to be a power of
  632. * two sectors in size, but the code in this driver expects that.
  633. */
  634. virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors,
  635. &vblk->zone_sectors);
  636. if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) {
  637. dev_err(&vdev->dev,
  638. "zoned device with non power of two zone size %u\n",
  639. vblk->zone_sectors);
  640. return -ENODEV;
  641. }
  642. lim->chunk_sectors = vblk->zone_sectors;
  643. dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
  644. if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
  645. dev_warn(&vblk->vdev->dev,
  646. "ignoring negotiated F_DISCARD for zoned device\n");
  647. lim->max_hw_discard_sectors = 0;
  648. }
  649. virtio_cread(vdev, struct virtio_blk_config,
  650. zoned.max_append_sectors, &v);
  651. if (!v) {
  652. dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
  653. return -ENODEV;
  654. }
  655. if ((v << SECTOR_SHIFT) < wg) {
  656. dev_err(&vdev->dev,
  657. "write granularity %u exceeds max_append_sectors %u limit\n",
  658. wg, v);
  659. return -ENODEV;
  660. }
  661. lim->max_zone_append_sectors = v;
  662. dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
  663. return 0;
  664. }
  665. #else
  666. /*
  667. * Zoned block device support is not configured in this kernel, host-managed
  668. * zoned devices can't be supported.
  669. */
  670. #define virtblk_report_zones NULL
  671. static inline int virtblk_read_zoned_limits(struct virtio_blk *vblk,
  672. struct queue_limits *lim)
  673. {
  674. dev_err(&vblk->vdev->dev,
  675. "virtio_blk: zoned devices are not supported");
  676. return -EOPNOTSUPP;
  677. }
  678. #endif /* CONFIG_BLK_DEV_ZONED */
  679. /* return id (s/n) string for *disk to *id_str
  680. */
  681. static int virtblk_get_id(struct gendisk *disk, char *id_str)
  682. {
  683. struct virtio_blk *vblk = disk->private_data;
  684. struct request_queue *q = vblk->disk->queue;
  685. struct request *req;
  686. struct virtblk_req *vbr;
  687. int err;
  688. req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
  689. if (IS_ERR(req))
  690. return PTR_ERR(req);
  691. vbr = blk_mq_rq_to_pdu(req);
  692. vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
  693. vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
  694. vbr->out_hdr.sector = 0;
  695. err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
  696. if (err)
  697. goto out;
  698. blk_execute_rq(req, false);
  699. err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
  700. out:
  701. blk_mq_free_request(req);
  702. return err;
  703. }
  704. /* We provide getgeo only to please some old bootloader/partitioning tools */
  705. static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
  706. {
  707. struct virtio_blk *vblk = bd->bd_disk->private_data;
  708. int ret = 0;
  709. mutex_lock(&vblk->vdev_mutex);
  710. if (!vblk->vdev) {
  711. ret = -ENXIO;
  712. goto out;
  713. }
  714. /* see if the host passed in geometry config */
  715. if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
  716. virtio_cread(vblk->vdev, struct virtio_blk_config,
  717. geometry.cylinders, &geo->cylinders);
  718. virtio_cread(vblk->vdev, struct virtio_blk_config,
  719. geometry.heads, &geo->heads);
  720. virtio_cread(vblk->vdev, struct virtio_blk_config,
  721. geometry.sectors, &geo->sectors);
  722. } else {
  723. /* some standard values, similar to sd */
  724. geo->heads = 1 << 6;
  725. geo->sectors = 1 << 5;
  726. geo->cylinders = get_capacity(bd->bd_disk) >> 11;
  727. }
  728. out:
  729. mutex_unlock(&vblk->vdev_mutex);
  730. return ret;
  731. }
  732. static void virtblk_free_disk(struct gendisk *disk)
  733. {
  734. struct virtio_blk *vblk = disk->private_data;
  735. ida_free(&vd_index_ida, vblk->index);
  736. mutex_destroy(&vblk->vdev_mutex);
  737. kfree(vblk);
  738. }
  739. static const struct block_device_operations virtblk_fops = {
  740. .owner = THIS_MODULE,
  741. .getgeo = virtblk_getgeo,
  742. .free_disk = virtblk_free_disk,
  743. .report_zones = virtblk_report_zones,
  744. };
  745. static int index_to_minor(int index)
  746. {
  747. return index << PART_BITS;
  748. }
  749. static int minor_to_index(int minor)
  750. {
  751. return minor >> PART_BITS;
  752. }
  753. static ssize_t serial_show(struct device *dev,
  754. struct device_attribute *attr, char *buf)
  755. {
  756. struct gendisk *disk = dev_to_disk(dev);
  757. int err;
  758. /* sysfs gives us a PAGE_SIZE buffer */
  759. BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
  760. buf[VIRTIO_BLK_ID_BYTES] = '\0';
  761. err = virtblk_get_id(disk, buf);
  762. if (!err)
  763. return strlen(buf);
  764. if (err == -EIO) /* Unsupported? Make it empty. */
  765. return 0;
  766. return err;
  767. }
  768. static DEVICE_ATTR_RO(serial);
  769. /* The queue's logical block size must be set before calling this */
  770. static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
  771. {
  772. struct virtio_device *vdev = vblk->vdev;
  773. struct request_queue *q = vblk->disk->queue;
  774. char cap_str_2[10], cap_str_10[10];
  775. unsigned long long nblocks;
  776. u64 capacity;
  777. /* Host must always specify the capacity. */
  778. virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
  779. nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
  780. string_get_size(nblocks, queue_logical_block_size(q),
  781. STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
  782. string_get_size(nblocks, queue_logical_block_size(q),
  783. STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
  784. dev_notice(&vdev->dev,
  785. "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
  786. vblk->disk->disk_name,
  787. resize ? "new size: " : "",
  788. nblocks,
  789. queue_logical_block_size(q),
  790. cap_str_10,
  791. cap_str_2);
  792. set_capacity_and_notify(vblk->disk, capacity);
  793. }
  794. static void virtblk_config_changed_work(struct work_struct *work)
  795. {
  796. struct virtio_blk *vblk =
  797. container_of(work, struct virtio_blk, config_work);
  798. virtblk_update_capacity(vblk, true);
  799. }
  800. static void virtblk_config_changed(struct virtio_device *vdev)
  801. {
  802. struct virtio_blk *vblk = vdev->priv;
  803. queue_work(virtblk_wq, &vblk->config_work);
  804. }
  805. static int init_vq(struct virtio_blk *vblk)
  806. {
  807. int err;
  808. unsigned short i;
  809. struct virtqueue_info *vqs_info;
  810. struct virtqueue **vqs;
  811. unsigned short num_vqs;
  812. unsigned short num_poll_vqs;
  813. struct virtio_device *vdev = vblk->vdev;
  814. struct irq_affinity desc = { 0, };
  815. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
  816. struct virtio_blk_config, num_queues,
  817. &num_vqs);
  818. if (err)
  819. num_vqs = 1;
  820. if (!err && !num_vqs) {
  821. dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
  822. return -EINVAL;
  823. }
  824. num_vqs = min_t(unsigned int,
  825. min_not_zero(num_request_queues, nr_cpu_ids),
  826. num_vqs);
  827. num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
  828. vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
  829. vblk->io_queues[HCTX_TYPE_READ] = 0;
  830. vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
  831. dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
  832. vblk->io_queues[HCTX_TYPE_DEFAULT],
  833. vblk->io_queues[HCTX_TYPE_READ],
  834. vblk->io_queues[HCTX_TYPE_POLL]);
  835. vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
  836. if (!vblk->vqs)
  837. return -ENOMEM;
  838. vqs_info = kcalloc(num_vqs, sizeof(*vqs_info), GFP_KERNEL);
  839. vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
  840. if (!vqs_info || !vqs) {
  841. err = -ENOMEM;
  842. goto out;
  843. }
  844. for (i = 0; i < num_vqs - num_poll_vqs; i++) {
  845. vqs_info[i].callback = virtblk_done;
  846. snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%u", i);
  847. vqs_info[i].name = vblk->vqs[i].name;
  848. }
  849. for (; i < num_vqs; i++) {
  850. snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%u", i);
  851. vqs_info[i].name = vblk->vqs[i].name;
  852. }
  853. /* Discover virtqueues and write information to configuration. */
  854. err = virtio_find_vqs(vdev, num_vqs, vqs, vqs_info, &desc);
  855. if (err)
  856. goto out;
  857. for (i = 0; i < num_vqs; i++) {
  858. spin_lock_init(&vblk->vqs[i].lock);
  859. vblk->vqs[i].vq = vqs[i];
  860. }
  861. vblk->num_vqs = num_vqs;
  862. out:
  863. kfree(vqs);
  864. kfree(vqs_info);
  865. if (err)
  866. kfree(vblk->vqs);
  867. return err;
  868. }
  869. /*
  870. * Legacy naming scheme used for virtio devices. We are stuck with it for
  871. * virtio blk but don't ever use it for any new driver.
  872. */
  873. static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
  874. {
  875. const int base = 'z' - 'a' + 1;
  876. char *begin = buf + strlen(prefix);
  877. char *end = buf + buflen;
  878. char *p;
  879. int unit;
  880. p = end - 1;
  881. *p = '\0';
  882. unit = base;
  883. do {
  884. if (p == begin)
  885. return -EINVAL;
  886. *--p = 'a' + (index % unit);
  887. index = (index / unit) - 1;
  888. } while (index >= 0);
  889. memmove(begin, p, end - p);
  890. memcpy(buf, prefix, strlen(prefix));
  891. return 0;
  892. }
  893. static int virtblk_get_cache_mode(struct virtio_device *vdev)
  894. {
  895. u8 writeback;
  896. int err;
  897. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
  898. struct virtio_blk_config, wce,
  899. &writeback);
  900. /*
  901. * If WCE is not configurable and flush is not available,
  902. * assume no writeback cache is in use.
  903. */
  904. if (err)
  905. writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
  906. return writeback;
  907. }
  908. static const char *const virtblk_cache_types[] = {
  909. "write through", "write back"
  910. };
  911. static ssize_t
  912. cache_type_store(struct device *dev, struct device_attribute *attr,
  913. const char *buf, size_t count)
  914. {
  915. struct gendisk *disk = dev_to_disk(dev);
  916. struct virtio_blk *vblk = disk->private_data;
  917. struct virtio_device *vdev = vblk->vdev;
  918. struct queue_limits lim;
  919. int i;
  920. BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
  921. i = sysfs_match_string(virtblk_cache_types, buf);
  922. if (i < 0)
  923. return i;
  924. virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
  925. lim = queue_limits_start_update(disk->queue);
  926. if (virtblk_get_cache_mode(vdev))
  927. lim.features |= BLK_FEAT_WRITE_CACHE;
  928. else
  929. lim.features &= ~BLK_FEAT_WRITE_CACHE;
  930. i = queue_limits_commit_update_frozen(disk->queue, &lim);
  931. if (i)
  932. return i;
  933. return count;
  934. }
  935. static ssize_t
  936. cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
  937. {
  938. struct gendisk *disk = dev_to_disk(dev);
  939. struct virtio_blk *vblk = disk->private_data;
  940. u8 writeback = virtblk_get_cache_mode(vblk->vdev);
  941. BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
  942. return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]);
  943. }
  944. static DEVICE_ATTR_RW(cache_type);
  945. static struct attribute *virtblk_attrs[] = {
  946. &dev_attr_serial.attr,
  947. &dev_attr_cache_type.attr,
  948. NULL,
  949. };
  950. static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
  951. struct attribute *a, int n)
  952. {
  953. struct device *dev = kobj_to_dev(kobj);
  954. struct gendisk *disk = dev_to_disk(dev);
  955. struct virtio_blk *vblk = disk->private_data;
  956. struct virtio_device *vdev = vblk->vdev;
  957. if (a == &dev_attr_cache_type.attr &&
  958. !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
  959. return S_IRUGO;
  960. return a->mode;
  961. }
  962. static const struct attribute_group virtblk_attr_group = {
  963. .attrs = virtblk_attrs,
  964. .is_visible = virtblk_attrs_are_visible,
  965. };
  966. static const struct attribute_group *virtblk_attr_groups[] = {
  967. &virtblk_attr_group,
  968. NULL,
  969. };
  970. static void virtblk_map_queues(struct blk_mq_tag_set *set)
  971. {
  972. struct virtio_blk *vblk = set->driver_data;
  973. int i, qoff;
  974. for (i = 0, qoff = 0; i < set->nr_maps; i++) {
  975. struct blk_mq_queue_map *map = &set->map[i];
  976. map->nr_queues = vblk->io_queues[i];
  977. map->queue_offset = qoff;
  978. qoff += map->nr_queues;
  979. if (map->nr_queues == 0)
  980. continue;
  981. /*
  982. * Regular queues have interrupts and hence CPU affinity is
  983. * defined by the core virtio code, but polling queues have
  984. * no interrupts so we let the block layer assign CPU affinity.
  985. */
  986. if (i == HCTX_TYPE_POLL)
  987. blk_mq_map_queues(&set->map[i]);
  988. else
  989. blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
  990. }
  991. }
  992. static void virtblk_complete_batch(struct io_comp_batch *iob)
  993. {
  994. struct request *req;
  995. rq_list_for_each(&iob->req_list, req) {
  996. virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
  997. virtblk_cleanup_cmd(req);
  998. }
  999. blk_mq_end_request_batch(iob);
  1000. }
  1001. static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
  1002. {
  1003. struct virtio_blk *vblk = hctx->queue->queuedata;
  1004. struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
  1005. struct virtblk_req *vbr;
  1006. unsigned long flags;
  1007. unsigned int len;
  1008. int found = 0;
  1009. spin_lock_irqsave(&vq->lock, flags);
  1010. while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
  1011. struct request *req = blk_mq_rq_from_pdu(vbr);
  1012. u8 status = virtblk_vbr_status(vbr);
  1013. found++;
  1014. if (!blk_mq_complete_request_remote(req) &&
  1015. !blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK,
  1016. virtblk_complete_batch))
  1017. virtblk_request_done(req);
  1018. }
  1019. if (found)
  1020. blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
  1021. spin_unlock_irqrestore(&vq->lock, flags);
  1022. return found;
  1023. }
  1024. static const struct blk_mq_ops virtio_mq_ops = {
  1025. .queue_rq = virtio_queue_rq,
  1026. .queue_rqs = virtio_queue_rqs,
  1027. .commit_rqs = virtio_commit_rqs,
  1028. .complete = virtblk_request_done,
  1029. .map_queues = virtblk_map_queues,
  1030. .poll = virtblk_poll,
  1031. };
  1032. static unsigned int virtblk_queue_depth;
  1033. module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
  1034. static int virtblk_read_limits(struct virtio_blk *vblk,
  1035. struct queue_limits *lim)
  1036. {
  1037. struct virtio_device *vdev = vblk->vdev;
  1038. u32 v, max_size, sg_elems, opt_io_size;
  1039. u32 max_discard_segs = 0;
  1040. u32 discard_granularity = 0;
  1041. u16 min_io_size;
  1042. u8 physical_block_exp, alignment_offset;
  1043. size_t max_dma_size;
  1044. int err;
  1045. /* We need to know how many segments before we allocate. */
  1046. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
  1047. struct virtio_blk_config, seg_max,
  1048. &sg_elems);
  1049. /* We need at least one SG element, whatever they say. */
  1050. if (err || !sg_elems)
  1051. sg_elems = 1;
  1052. /* Prevent integer overflows and honor max vq size */
  1053. sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
  1054. /* We can handle whatever the host told us to handle. */
  1055. lim->max_segments = sg_elems;
  1056. /* No real sector limit. */
  1057. lim->max_hw_sectors = UINT_MAX;
  1058. max_dma_size = virtio_max_dma_size(vdev);
  1059. max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
  1060. /* Host can optionally specify maximum segment size and number of
  1061. * segments. */
  1062. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
  1063. struct virtio_blk_config, size_max, &v);
  1064. if (!err)
  1065. max_size = min(max_size, v);
  1066. lim->max_segment_size = max_size;
  1067. /* Host can optionally specify the block size of the device */
  1068. virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
  1069. struct virtio_blk_config, blk_size,
  1070. &lim->logical_block_size);
  1071. /* Use topology information if available */
  1072. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
  1073. struct virtio_blk_config, physical_block_exp,
  1074. &physical_block_exp);
  1075. if (!err && physical_block_exp)
  1076. lim->physical_block_size =
  1077. lim->logical_block_size * (1 << physical_block_exp);
  1078. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
  1079. struct virtio_blk_config, alignment_offset,
  1080. &alignment_offset);
  1081. if (!err && alignment_offset)
  1082. lim->alignment_offset =
  1083. lim->logical_block_size * alignment_offset;
  1084. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
  1085. struct virtio_blk_config, min_io_size,
  1086. &min_io_size);
  1087. if (!err && min_io_size)
  1088. lim->io_min = lim->logical_block_size * min_io_size;
  1089. err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
  1090. struct virtio_blk_config, opt_io_size,
  1091. &opt_io_size);
  1092. if (!err && opt_io_size)
  1093. lim->io_opt = lim->logical_block_size * opt_io_size;
  1094. if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
  1095. virtio_cread(vdev, struct virtio_blk_config,
  1096. discard_sector_alignment, &discard_granularity);
  1097. virtio_cread(vdev, struct virtio_blk_config,
  1098. max_discard_sectors, &v);
  1099. lim->max_hw_discard_sectors = v ? v : UINT_MAX;
  1100. virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
  1101. &max_discard_segs);
  1102. }
  1103. if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
  1104. virtio_cread(vdev, struct virtio_blk_config,
  1105. max_write_zeroes_sectors, &v);
  1106. lim->max_write_zeroes_sectors = v ? v : UINT_MAX;
  1107. }
  1108. /* The discard and secure erase limits are combined since the Linux
  1109. * block layer uses the same limit for both commands.
  1110. *
  1111. * If both VIRTIO_BLK_F_SECURE_ERASE and VIRTIO_BLK_F_DISCARD features
  1112. * are negotiated, we will use the minimum between the limits.
  1113. *
  1114. * discard sector alignment is set to the minimum between discard_sector_alignment
  1115. * and secure_erase_sector_alignment.
  1116. *
  1117. * max discard sectors is set to the minimum between max_discard_seg and
  1118. * max_secure_erase_seg.
  1119. */
  1120. if (virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
  1121. virtio_cread(vdev, struct virtio_blk_config,
  1122. secure_erase_sector_alignment, &v);
  1123. /* secure_erase_sector_alignment should not be zero, the device should set a
  1124. * valid number of sectors.
  1125. */
  1126. if (!v) {
  1127. dev_err(&vdev->dev,
  1128. "virtio_blk: secure_erase_sector_alignment can't be 0\n");
  1129. return -EINVAL;
  1130. }
  1131. discard_granularity = min_not_zero(discard_granularity, v);
  1132. virtio_cread(vdev, struct virtio_blk_config,
  1133. max_secure_erase_sectors, &v);
  1134. /* max_secure_erase_sectors should not be zero, the device should set a
  1135. * valid number of sectors.
  1136. */
  1137. if (!v) {
  1138. dev_err(&vdev->dev,
  1139. "virtio_blk: max_secure_erase_sectors can't be 0\n");
  1140. return -EINVAL;
  1141. }
  1142. lim->max_secure_erase_sectors = v;
  1143. virtio_cread(vdev, struct virtio_blk_config,
  1144. max_secure_erase_seg, &v);
  1145. /* max_secure_erase_seg should not be zero, the device should set a
  1146. * valid number of segments
  1147. */
  1148. if (!v) {
  1149. dev_err(&vdev->dev,
  1150. "virtio_blk: max_secure_erase_seg can't be 0\n");
  1151. return -EINVAL;
  1152. }
  1153. max_discard_segs = min_not_zero(max_discard_segs, v);
  1154. }
  1155. if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD) ||
  1156. virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
  1157. /* max_discard_seg and discard_granularity will be 0 only
  1158. * if max_discard_seg and discard_sector_alignment fields in the virtio
  1159. * config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated.
  1160. * In this case, we use default values.
  1161. */
  1162. if (!max_discard_segs)
  1163. max_discard_segs = sg_elems;
  1164. lim->max_discard_segments =
  1165. min(max_discard_segs, MAX_DISCARD_SEGMENTS);
  1166. if (discard_granularity)
  1167. lim->discard_granularity =
  1168. discard_granularity << SECTOR_SHIFT;
  1169. else
  1170. lim->discard_granularity = lim->logical_block_size;
  1171. }
  1172. if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
  1173. u8 model;
  1174. virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
  1175. switch (model) {
  1176. case VIRTIO_BLK_Z_NONE:
  1177. case VIRTIO_BLK_Z_HA:
  1178. /* treat host-aware devices as non-zoned */
  1179. return 0;
  1180. case VIRTIO_BLK_Z_HM:
  1181. err = virtblk_read_zoned_limits(vblk, lim);
  1182. if (err)
  1183. return err;
  1184. break;
  1185. default:
  1186. dev_err(&vdev->dev, "unsupported zone model %d\n", model);
  1187. return -EINVAL;
  1188. }
  1189. }
  1190. return 0;
  1191. }
  1192. static int virtblk_probe(struct virtio_device *vdev)
  1193. {
  1194. struct virtio_blk *vblk;
  1195. struct queue_limits lim = {
  1196. .features = BLK_FEAT_ROTATIONAL,
  1197. .logical_block_size = SECTOR_SIZE,
  1198. };
  1199. int err, index;
  1200. unsigned int queue_depth;
  1201. if (!vdev->config->get) {
  1202. dev_err(&vdev->dev, "%s failure: config access disabled\n",
  1203. __func__);
  1204. return -EINVAL;
  1205. }
  1206. err = ida_alloc_range(&vd_index_ida, 0,
  1207. minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL);
  1208. if (err < 0)
  1209. goto out;
  1210. index = err;
  1211. vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
  1212. if (!vblk) {
  1213. err = -ENOMEM;
  1214. goto out_free_index;
  1215. }
  1216. mutex_init(&vblk->vdev_mutex);
  1217. vblk->vdev = vdev;
  1218. INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
  1219. err = init_vq(vblk);
  1220. if (err)
  1221. goto out_free_vblk;
  1222. /* Default queue sizing is to fill the ring. */
  1223. if (!virtblk_queue_depth) {
  1224. queue_depth = vblk->vqs[0].vq->num_free;
  1225. /* ... but without indirect descs, we use 2 descs per req */
  1226. if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
  1227. queue_depth /= 2;
  1228. } else {
  1229. queue_depth = virtblk_queue_depth;
  1230. }
  1231. memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
  1232. vblk->tag_set.ops = &virtio_mq_ops;
  1233. vblk->tag_set.queue_depth = queue_depth;
  1234. vblk->tag_set.numa_node = NUMA_NO_NODE;
  1235. vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
  1236. vblk->tag_set.cmd_size =
  1237. sizeof(struct virtblk_req) +
  1238. sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
  1239. vblk->tag_set.driver_data = vblk;
  1240. vblk->tag_set.nr_hw_queues = vblk->num_vqs;
  1241. vblk->tag_set.nr_maps = 1;
  1242. if (vblk->io_queues[HCTX_TYPE_POLL])
  1243. vblk->tag_set.nr_maps = 3;
  1244. err = blk_mq_alloc_tag_set(&vblk->tag_set);
  1245. if (err)
  1246. goto out_free_vq;
  1247. err = virtblk_read_limits(vblk, &lim);
  1248. if (err)
  1249. goto out_free_tags;
  1250. if (virtblk_get_cache_mode(vdev))
  1251. lim.features |= BLK_FEAT_WRITE_CACHE;
  1252. vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk);
  1253. if (IS_ERR(vblk->disk)) {
  1254. err = PTR_ERR(vblk->disk);
  1255. goto out_free_tags;
  1256. }
  1257. virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
  1258. vblk->disk->major = major;
  1259. vblk->disk->first_minor = index_to_minor(index);
  1260. vblk->disk->minors = 1 << PART_BITS;
  1261. vblk->disk->private_data = vblk;
  1262. vblk->disk->fops = &virtblk_fops;
  1263. vblk->index = index;
  1264. /* If disk is read-only in the host, the guest should obey */
  1265. if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
  1266. set_disk_ro(vblk->disk, 1);
  1267. virtblk_update_capacity(vblk, false);
  1268. virtio_device_ready(vdev);
  1269. /*
  1270. * All steps that follow use the VQs therefore they need to be
  1271. * placed after the virtio_device_ready() call above.
  1272. */
  1273. if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
  1274. (lim.features & BLK_FEAT_ZONED)) {
  1275. err = blk_revalidate_disk_zones(vblk->disk);
  1276. if (err)
  1277. goto out_cleanup_disk;
  1278. }
  1279. err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
  1280. if (err)
  1281. goto out_cleanup_disk;
  1282. return 0;
  1283. out_cleanup_disk:
  1284. put_disk(vblk->disk);
  1285. out_free_tags:
  1286. blk_mq_free_tag_set(&vblk->tag_set);
  1287. out_free_vq:
  1288. vdev->config->del_vqs(vdev);
  1289. kfree(vblk->vqs);
  1290. out_free_vblk:
  1291. kfree(vblk);
  1292. out_free_index:
  1293. ida_free(&vd_index_ida, index);
  1294. out:
  1295. return err;
  1296. }
  1297. static void virtblk_remove(struct virtio_device *vdev)
  1298. {
  1299. struct virtio_blk *vblk = vdev->priv;
  1300. /* Make sure no work handler is accessing the device. */
  1301. flush_work(&vblk->config_work);
  1302. del_gendisk(vblk->disk);
  1303. blk_mq_free_tag_set(&vblk->tag_set);
  1304. mutex_lock(&vblk->vdev_mutex);
  1305. /* Stop all the virtqueues. */
  1306. virtio_reset_device(vdev);
  1307. /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
  1308. vblk->vdev = NULL;
  1309. vdev->config->del_vqs(vdev);
  1310. kfree(vblk->vqs);
  1311. mutex_unlock(&vblk->vdev_mutex);
  1312. put_disk(vblk->disk);
  1313. }
  1314. #ifdef CONFIG_PM_SLEEP
  1315. static int virtblk_freeze(struct virtio_device *vdev)
  1316. {
  1317. struct virtio_blk *vblk = vdev->priv;
  1318. struct request_queue *q = vblk->disk->queue;
  1319. /* Ensure no requests in virtqueues before deleting vqs. */
  1320. blk_mq_freeze_queue(q);
  1321. blk_mq_quiesce_queue_nowait(q);
  1322. blk_mq_unfreeze_queue(q);
  1323. /* Ensure we don't receive any more interrupts */
  1324. virtio_reset_device(vdev);
  1325. /* Make sure no work handler is accessing the device. */
  1326. flush_work(&vblk->config_work);
  1327. vdev->config->del_vqs(vdev);
  1328. kfree(vblk->vqs);
  1329. return 0;
  1330. }
  1331. static int virtblk_restore(struct virtio_device *vdev)
  1332. {
  1333. struct virtio_blk *vblk = vdev->priv;
  1334. int ret;
  1335. ret = init_vq(vdev->priv);
  1336. if (ret)
  1337. return ret;
  1338. virtio_device_ready(vdev);
  1339. blk_mq_unquiesce_queue(vblk->disk->queue);
  1340. return 0;
  1341. }
  1342. #endif
  1343. static const struct virtio_device_id id_table[] = {
  1344. { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
  1345. { 0 },
  1346. };
  1347. static unsigned int features_legacy[] = {
  1348. VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
  1349. VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
  1350. VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
  1351. VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
  1352. VIRTIO_BLK_F_SECURE_ERASE,
  1353. }
  1354. ;
  1355. static unsigned int features[] = {
  1356. VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
  1357. VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
  1358. VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
  1359. VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
  1360. VIRTIO_BLK_F_SECURE_ERASE, VIRTIO_BLK_F_ZONED,
  1361. };
  1362. static struct virtio_driver virtio_blk = {
  1363. .feature_table = features,
  1364. .feature_table_size = ARRAY_SIZE(features),
  1365. .feature_table_legacy = features_legacy,
  1366. .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
  1367. .driver.name = KBUILD_MODNAME,
  1368. .id_table = id_table,
  1369. .probe = virtblk_probe,
  1370. .remove = virtblk_remove,
  1371. .config_changed = virtblk_config_changed,
  1372. #ifdef CONFIG_PM_SLEEP
  1373. .freeze = virtblk_freeze,
  1374. .restore = virtblk_restore,
  1375. #endif
  1376. };
  1377. static int __init virtio_blk_init(void)
  1378. {
  1379. int error;
  1380. virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
  1381. if (!virtblk_wq)
  1382. return -ENOMEM;
  1383. major = register_blkdev(0, "virtblk");
  1384. if (major < 0) {
  1385. error = major;
  1386. goto out_destroy_workqueue;
  1387. }
  1388. error = register_virtio_driver(&virtio_blk);
  1389. if (error)
  1390. goto out_unregister_blkdev;
  1391. return 0;
  1392. out_unregister_blkdev:
  1393. unregister_blkdev(major, "virtblk");
  1394. out_destroy_workqueue:
  1395. destroy_workqueue(virtblk_wq);
  1396. return error;
  1397. }
  1398. static void __exit virtio_blk_fini(void)
  1399. {
  1400. unregister_virtio_driver(&virtio_blk);
  1401. unregister_blkdev(major, "virtblk");
  1402. destroy_workqueue(virtblk_wq);
  1403. }
  1404. module_init(virtio_blk_init);
  1405. module_exit(virtio_blk_fini);
  1406. MODULE_DEVICE_TABLE(virtio, id_table);
  1407. MODULE_DESCRIPTION("Virtio block driver");
  1408. MODULE_LICENSE("GPL");