blk-merge.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Functions related to segment and merge handling
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/module.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/blk-integrity.h>
  10. #include <linux/scatterlist.h>
  11. #include <linux/part_stat.h>
  12. #include <linux/blk-cgroup.h>
  13. #include <trace/events/block.h>
  14. #include "blk.h"
  15. #include "blk-mq-sched.h"
  16. #include "blk-rq-qos.h"
  17. #include "blk-throttle.h"
  18. static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
  19. {
  20. *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
  21. }
  22. static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
  23. {
  24. struct bvec_iter iter = bio->bi_iter;
  25. int idx;
  26. bio_get_first_bvec(bio, bv);
  27. if (bv->bv_len == bio->bi_iter.bi_size)
  28. return; /* this bio only has a single bvec */
  29. bio_advance_iter(bio, &iter, iter.bi_size);
  30. if (!iter.bi_bvec_done)
  31. idx = iter.bi_idx - 1;
  32. else /* in the middle of bvec */
  33. idx = iter.bi_idx;
  34. *bv = bio->bi_io_vec[idx];
  35. /*
  36. * iter.bi_bvec_done records actual length of the last bvec
  37. * if this bio ends in the middle of one io vector
  38. */
  39. if (iter.bi_bvec_done)
  40. bv->bv_len = iter.bi_bvec_done;
  41. }
  42. static inline bool bio_will_gap(struct request_queue *q,
  43. struct request *prev_rq, struct bio *prev, struct bio *next)
  44. {
  45. struct bio_vec pb, nb;
  46. if (!bio_has_data(prev) || !queue_virt_boundary(q))
  47. return false;
  48. /*
  49. * Don't merge if the 1st bio starts with non-zero offset, otherwise it
  50. * is quite difficult to respect the sg gap limit. We work hard to
  51. * merge a huge number of small single bios in case of mkfs.
  52. */
  53. if (prev_rq)
  54. bio_get_first_bvec(prev_rq->bio, &pb);
  55. else
  56. bio_get_first_bvec(prev, &pb);
  57. if (pb.bv_offset & queue_virt_boundary(q))
  58. return true;
  59. /*
  60. * We don't need to worry about the situation that the merged segment
  61. * ends in unaligned virt boundary:
  62. *
  63. * - if 'pb' ends aligned, the merged segment ends aligned
  64. * - if 'pb' ends unaligned, the next bio must include
  65. * one single bvec of 'nb', otherwise the 'nb' can't
  66. * merge with 'pb'
  67. */
  68. bio_get_last_bvec(prev, &pb);
  69. bio_get_first_bvec(next, &nb);
  70. if (biovec_phys_mergeable(q, &pb, &nb))
  71. return false;
  72. return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
  73. }
  74. static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
  75. {
  76. return bio_will_gap(req->q, req, req->biotail, bio);
  77. }
  78. static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
  79. {
  80. return bio_will_gap(req->q, NULL, bio, req->bio);
  81. }
  82. /*
  83. * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
  84. * is defined as 'unsigned int', meantime it has to be aligned to with the
  85. * logical block size, which is the minimum accepted unit by hardware.
  86. */
  87. static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
  88. {
  89. return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
  90. }
  91. static struct bio *bio_submit_split(struct bio *bio, int split_sectors)
  92. {
  93. if (unlikely(split_sectors < 0)) {
  94. bio->bi_status = errno_to_blk_status(split_sectors);
  95. bio_endio(bio);
  96. return NULL;
  97. }
  98. if (split_sectors) {
  99. struct bio *split;
  100. split = bio_split(bio, split_sectors, GFP_NOIO,
  101. &bio->bi_bdev->bd_disk->bio_split);
  102. split->bi_opf |= REQ_NOMERGE;
  103. blkcg_bio_issue_init(split);
  104. bio_chain(split, bio);
  105. trace_block_split(split, bio->bi_iter.bi_sector);
  106. WARN_ON_ONCE(bio_zone_write_plugging(bio));
  107. submit_bio_noacct(bio);
  108. return split;
  109. }
  110. return bio;
  111. }
  112. struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
  113. unsigned *nsegs)
  114. {
  115. unsigned int max_discard_sectors, granularity;
  116. sector_t tmp;
  117. unsigned split_sectors;
  118. *nsegs = 1;
  119. granularity = max(lim->discard_granularity >> 9, 1U);
  120. max_discard_sectors =
  121. min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
  122. max_discard_sectors -= max_discard_sectors % granularity;
  123. if (unlikely(!max_discard_sectors))
  124. return bio;
  125. if (bio_sectors(bio) <= max_discard_sectors)
  126. return bio;
  127. split_sectors = max_discard_sectors;
  128. /*
  129. * If the next starting sector would be misaligned, stop the discard at
  130. * the previous aligned sector.
  131. */
  132. tmp = bio->bi_iter.bi_sector + split_sectors -
  133. ((lim->discard_alignment >> 9) % granularity);
  134. tmp = sector_div(tmp, granularity);
  135. if (split_sectors > tmp)
  136. split_sectors -= tmp;
  137. return bio_submit_split(bio, split_sectors);
  138. }
  139. static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim,
  140. bool is_atomic)
  141. {
  142. /*
  143. * chunk_sectors must be a multiple of atomic_write_boundary_sectors if
  144. * both non-zero.
  145. */
  146. if (is_atomic && lim->atomic_write_boundary_sectors)
  147. return lim->atomic_write_boundary_sectors;
  148. return lim->chunk_sectors;
  149. }
  150. /*
  151. * Return the maximum number of sectors from the start of a bio that may be
  152. * submitted as a single request to a block device. If enough sectors remain,
  153. * align the end to the physical block size. Otherwise align the end to the
  154. * logical block size. This approach minimizes the number of non-aligned
  155. * requests that are submitted to a block device if the start of a bio is not
  156. * aligned to a physical block boundary.
  157. */
  158. static inline unsigned get_max_io_size(struct bio *bio,
  159. const struct queue_limits *lim)
  160. {
  161. unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
  162. unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
  163. bool is_atomic = bio->bi_opf & REQ_ATOMIC;
  164. unsigned boundary_sectors = blk_boundary_sectors(lim, is_atomic);
  165. unsigned max_sectors, start, end;
  166. /*
  167. * We ignore lim->max_sectors for atomic writes because it may less
  168. * than the actual bio size, which we cannot tolerate.
  169. */
  170. if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
  171. max_sectors = lim->max_write_zeroes_sectors;
  172. else if (is_atomic)
  173. max_sectors = lim->atomic_write_max_sectors;
  174. else
  175. max_sectors = lim->max_sectors;
  176. if (boundary_sectors) {
  177. max_sectors = min(max_sectors,
  178. blk_boundary_sectors_left(bio->bi_iter.bi_sector,
  179. boundary_sectors));
  180. }
  181. start = bio->bi_iter.bi_sector & (pbs - 1);
  182. end = (start + max_sectors) & ~(pbs - 1);
  183. if (end > start)
  184. return end - start;
  185. return max_sectors & ~(lbs - 1);
  186. }
  187. /**
  188. * get_max_segment_size() - maximum number of bytes to add as a single segment
  189. * @lim: Request queue limits.
  190. * @paddr: address of the range to add
  191. * @len: maximum length available to add at @paddr
  192. *
  193. * Returns the maximum number of bytes of the range starting at @paddr that can
  194. * be added to a single segment.
  195. */
  196. static inline unsigned get_max_segment_size(const struct queue_limits *lim,
  197. phys_addr_t paddr, unsigned int len)
  198. {
  199. /*
  200. * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
  201. * after having calculated the minimum.
  202. */
  203. return min_t(unsigned long, len,
  204. min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
  205. (unsigned long)lim->max_segment_size - 1) + 1);
  206. }
  207. /**
  208. * bvec_split_segs - verify whether or not a bvec should be split in the middle
  209. * @lim: [in] queue limits to split based on
  210. * @bv: [in] bvec to examine
  211. * @nsegs: [in,out] Number of segments in the bio being built. Incremented
  212. * by the number of segments from @bv that may be appended to that
  213. * bio without exceeding @max_segs
  214. * @bytes: [in,out] Number of bytes in the bio being built. Incremented
  215. * by the number of bytes from @bv that may be appended to that
  216. * bio without exceeding @max_bytes
  217. * @max_segs: [in] upper bound for *@nsegs
  218. * @max_bytes: [in] upper bound for *@bytes
  219. *
  220. * When splitting a bio, it can happen that a bvec is encountered that is too
  221. * big to fit in a single segment and hence that it has to be split in the
  222. * middle. This function verifies whether or not that should happen. The value
  223. * %true is returned if and only if appending the entire @bv to a bio with
  224. * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
  225. * the block driver.
  226. */
  227. static bool bvec_split_segs(const struct queue_limits *lim,
  228. const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
  229. unsigned max_segs, unsigned max_bytes)
  230. {
  231. unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
  232. unsigned len = min(bv->bv_len, max_len);
  233. unsigned total_len = 0;
  234. unsigned seg_size = 0;
  235. while (len && *nsegs < max_segs) {
  236. seg_size = get_max_segment_size(lim, bvec_phys(bv) + total_len, len);
  237. (*nsegs)++;
  238. total_len += seg_size;
  239. len -= seg_size;
  240. if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
  241. break;
  242. }
  243. *bytes += total_len;
  244. /* tell the caller to split the bvec if it is too big to fit */
  245. return len > 0 || bv->bv_len > max_len;
  246. }
  247. static unsigned int bio_split_alignment(struct bio *bio,
  248. const struct queue_limits *lim)
  249. {
  250. if (op_is_write(bio_op(bio)) && lim->zone_write_granularity)
  251. return lim->zone_write_granularity;
  252. return lim->logical_block_size;
  253. }
  254. /**
  255. * bio_split_rw_at - check if and where to split a read/write bio
  256. * @bio: [in] bio to be split
  257. * @lim: [in] queue limits to split based on
  258. * @segs: [out] number of segments in the bio with the first half of the sectors
  259. * @max_bytes: [in] maximum number of bytes per bio
  260. *
  261. * Find out if @bio needs to be split to fit the queue limits in @lim and a
  262. * maximum size of @max_bytes. Returns a negative error number if @bio can't be
  263. * split, 0 if the bio doesn't have to be split, or a positive sector offset if
  264. * @bio needs to be split.
  265. */
  266. int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
  267. unsigned *segs, unsigned max_bytes)
  268. {
  269. struct bio_vec bv, bvprv, *bvprvp = NULL;
  270. struct bvec_iter iter;
  271. unsigned nsegs = 0, bytes = 0;
  272. bio_for_each_bvec(bv, bio, iter) {
  273. /*
  274. * If the queue doesn't support SG gaps and adding this
  275. * offset would create a gap, disallow it.
  276. */
  277. if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
  278. goto split;
  279. if (nsegs < lim->max_segments &&
  280. bytes + bv.bv_len <= max_bytes &&
  281. bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
  282. nsegs++;
  283. bytes += bv.bv_len;
  284. } else {
  285. if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
  286. lim->max_segments, max_bytes))
  287. goto split;
  288. }
  289. bvprv = bv;
  290. bvprvp = &bvprv;
  291. }
  292. *segs = nsegs;
  293. return 0;
  294. split:
  295. if (bio->bi_opf & REQ_ATOMIC)
  296. return -EINVAL;
  297. /*
  298. * We can't sanely support splitting for a REQ_NOWAIT bio. End it
  299. * with EAGAIN if splitting is required and return an error pointer.
  300. */
  301. if (bio->bi_opf & REQ_NOWAIT)
  302. return -EAGAIN;
  303. *segs = nsegs;
  304. /*
  305. * Individual bvecs might not be logical block aligned. Round down the
  306. * split size so that each bio is properly block size aligned, even if
  307. * we do not use the full hardware limits.
  308. */
  309. bytes = ALIGN_DOWN(bytes, bio_split_alignment(bio, lim));
  310. /*
  311. * Bio splitting may cause subtle trouble such as hang when doing sync
  312. * iopoll in direct IO routine. Given performance gain of iopoll for
  313. * big IO can be trival, disable iopoll when split needed.
  314. */
  315. bio_clear_polled(bio);
  316. return bytes >> SECTOR_SHIFT;
  317. }
  318. EXPORT_SYMBOL_GPL(bio_split_rw_at);
  319. struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
  320. unsigned *nr_segs)
  321. {
  322. return bio_submit_split(bio,
  323. bio_split_rw_at(bio, lim, nr_segs,
  324. get_max_io_size(bio, lim) << SECTOR_SHIFT));
  325. }
  326. /*
  327. * REQ_OP_ZONE_APPEND bios must never be split by the block layer.
  328. *
  329. * But we want the nr_segs calculation provided by bio_split_rw_at, and having
  330. * a good sanity check that the submitter built the bio correctly is nice to
  331. * have as well.
  332. */
  333. struct bio *bio_split_zone_append(struct bio *bio,
  334. const struct queue_limits *lim, unsigned *nr_segs)
  335. {
  336. unsigned int max_sectors = queue_limits_max_zone_append_sectors(lim);
  337. int split_sectors;
  338. split_sectors = bio_split_rw_at(bio, lim, nr_segs,
  339. max_sectors << SECTOR_SHIFT);
  340. if (WARN_ON_ONCE(split_sectors > 0))
  341. split_sectors = -EINVAL;
  342. return bio_submit_split(bio, split_sectors);
  343. }
  344. struct bio *bio_split_write_zeroes(struct bio *bio,
  345. const struct queue_limits *lim, unsigned *nsegs)
  346. {
  347. unsigned int max_sectors = get_max_io_size(bio, lim);
  348. *nsegs = 0;
  349. /*
  350. * An unset limit should normally not happen, as bio submission is keyed
  351. * off having a non-zero limit. But SCSI can clear the limit in the
  352. * I/O completion handler, and we can race and see this. Splitting to a
  353. * zero limit obviously doesn't make sense, so band-aid it here.
  354. */
  355. if (!max_sectors)
  356. return bio;
  357. if (bio_sectors(bio) <= max_sectors)
  358. return bio;
  359. return bio_submit_split(bio, max_sectors);
  360. }
  361. /**
  362. * bio_split_to_limits - split a bio to fit the queue limits
  363. * @bio: bio to be split
  364. *
  365. * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
  366. * if so split off a bio fitting the limits from the beginning of @bio and
  367. * return it. @bio is shortened to the remainder and re-submitted.
  368. *
  369. * The split bio is allocated from @q->bio_split, which is provided by the
  370. * block layer.
  371. */
  372. struct bio *bio_split_to_limits(struct bio *bio)
  373. {
  374. const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
  375. unsigned int nr_segs;
  376. return __bio_split_to_limits(bio, lim, &nr_segs);
  377. }
  378. EXPORT_SYMBOL(bio_split_to_limits);
  379. unsigned int blk_recalc_rq_segments(struct request *rq)
  380. {
  381. unsigned int nr_phys_segs = 0;
  382. unsigned int bytes = 0;
  383. struct req_iterator iter;
  384. struct bio_vec bv;
  385. if (!rq->bio)
  386. return 0;
  387. switch (bio_op(rq->bio)) {
  388. case REQ_OP_DISCARD:
  389. case REQ_OP_SECURE_ERASE:
  390. if (queue_max_discard_segments(rq->q) > 1) {
  391. struct bio *bio = rq->bio;
  392. for_each_bio(bio)
  393. nr_phys_segs++;
  394. return nr_phys_segs;
  395. }
  396. return 1;
  397. case REQ_OP_WRITE_ZEROES:
  398. return 0;
  399. default:
  400. break;
  401. }
  402. rq_for_each_bvec(bv, rq, iter)
  403. bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
  404. UINT_MAX, UINT_MAX);
  405. return nr_phys_segs;
  406. }
  407. static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
  408. struct scatterlist *sglist)
  409. {
  410. if (!*sg)
  411. return sglist;
  412. /*
  413. * If the driver previously mapped a shorter list, we could see a
  414. * termination bit prematurely unless it fully inits the sg table
  415. * on each mapping. We KNOW that there must be more entries here
  416. * or the driver would be buggy, so force clear the termination bit
  417. * to avoid doing a full sg_init_table() in drivers for each command.
  418. */
  419. sg_unmark_end(*sg);
  420. return sg_next(*sg);
  421. }
  422. static unsigned blk_bvec_map_sg(struct request_queue *q,
  423. struct bio_vec *bvec, struct scatterlist *sglist,
  424. struct scatterlist **sg)
  425. {
  426. unsigned nbytes = bvec->bv_len;
  427. unsigned nsegs = 0, total = 0;
  428. while (nbytes > 0) {
  429. unsigned offset = bvec->bv_offset + total;
  430. unsigned len = get_max_segment_size(&q->limits,
  431. bvec_phys(bvec) + total, nbytes);
  432. struct page *page = bvec->bv_page;
  433. /*
  434. * Unfortunately a fair number of drivers barf on scatterlists
  435. * that have an offset larger than PAGE_SIZE, despite other
  436. * subsystems dealing with that invariant just fine. For now
  437. * stick to the legacy format where we never present those from
  438. * the block layer, but the code below should be removed once
  439. * these offenders (mostly MMC/SD drivers) are fixed.
  440. */
  441. page += (offset >> PAGE_SHIFT);
  442. offset &= ~PAGE_MASK;
  443. *sg = blk_next_sg(sg, sglist);
  444. sg_set_page(*sg, page, len, offset);
  445. total += len;
  446. nbytes -= len;
  447. nsegs++;
  448. }
  449. return nsegs;
  450. }
  451. static inline int __blk_bvec_map_sg(struct bio_vec bv,
  452. struct scatterlist *sglist, struct scatterlist **sg)
  453. {
  454. *sg = blk_next_sg(sg, sglist);
  455. sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
  456. return 1;
  457. }
  458. /* only try to merge bvecs into one sg if they are from two bios */
  459. static inline bool
  460. __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
  461. struct bio_vec *bvprv, struct scatterlist **sg)
  462. {
  463. int nbytes = bvec->bv_len;
  464. if (!*sg)
  465. return false;
  466. if ((*sg)->length + nbytes > queue_max_segment_size(q))
  467. return false;
  468. if (!biovec_phys_mergeable(q, bvprv, bvec))
  469. return false;
  470. (*sg)->length += nbytes;
  471. return true;
  472. }
  473. static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
  474. struct scatterlist *sglist,
  475. struct scatterlist **sg)
  476. {
  477. struct bio_vec bvec, bvprv = { NULL };
  478. struct bvec_iter iter;
  479. int nsegs = 0;
  480. bool new_bio = false;
  481. for_each_bio(bio) {
  482. bio_for_each_bvec(bvec, bio, iter) {
  483. /*
  484. * Only try to merge bvecs from two bios given we
  485. * have done bio internal merge when adding pages
  486. * to bio
  487. */
  488. if (new_bio &&
  489. __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
  490. goto next_bvec;
  491. if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
  492. nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
  493. else
  494. nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
  495. next_bvec:
  496. new_bio = false;
  497. }
  498. if (likely(bio->bi_iter.bi_size)) {
  499. bvprv = bvec;
  500. new_bio = true;
  501. }
  502. }
  503. return nsegs;
  504. }
  505. /*
  506. * map a request to scatterlist, return number of sg entries setup. Caller
  507. * must make sure sg can hold rq->nr_phys_segments entries
  508. */
  509. int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
  510. struct scatterlist *sglist, struct scatterlist **last_sg)
  511. {
  512. int nsegs = 0;
  513. if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
  514. nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
  515. else if (rq->bio)
  516. nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
  517. if (*last_sg)
  518. sg_mark_end(*last_sg);
  519. /*
  520. * Something must have been wrong if the figured number of
  521. * segment is bigger than number of req's physical segments
  522. */
  523. WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
  524. return nsegs;
  525. }
  526. EXPORT_SYMBOL(__blk_rq_map_sg);
  527. static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
  528. sector_t offset)
  529. {
  530. struct request_queue *q = rq->q;
  531. struct queue_limits *lim = &q->limits;
  532. unsigned int max_sectors, boundary_sectors;
  533. bool is_atomic = rq->cmd_flags & REQ_ATOMIC;
  534. if (blk_rq_is_passthrough(rq))
  535. return q->limits.max_hw_sectors;
  536. boundary_sectors = blk_boundary_sectors(lim, is_atomic);
  537. max_sectors = blk_queue_get_max_sectors(rq);
  538. if (!boundary_sectors ||
  539. req_op(rq) == REQ_OP_DISCARD ||
  540. req_op(rq) == REQ_OP_SECURE_ERASE)
  541. return max_sectors;
  542. return min(max_sectors,
  543. blk_boundary_sectors_left(offset, boundary_sectors));
  544. }
  545. static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
  546. unsigned int nr_phys_segs)
  547. {
  548. if (!blk_cgroup_mergeable(req, bio))
  549. goto no_merge;
  550. if (blk_integrity_merge_bio(req->q, req, bio) == false)
  551. goto no_merge;
  552. /* discard request merge won't add new segment */
  553. if (req_op(req) == REQ_OP_DISCARD)
  554. return 1;
  555. if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
  556. goto no_merge;
  557. /*
  558. * This will form the start of a new hw segment. Bump both
  559. * counters.
  560. */
  561. req->nr_phys_segments += nr_phys_segs;
  562. if (bio_integrity(bio))
  563. req->nr_integrity_segments += blk_rq_count_integrity_sg(req->q,
  564. bio);
  565. return 1;
  566. no_merge:
  567. req_set_nomerge(req->q, req);
  568. return 0;
  569. }
  570. int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
  571. {
  572. if (req_gap_back_merge(req, bio))
  573. return 0;
  574. if (blk_integrity_rq(req) &&
  575. integrity_req_gap_back_merge(req, bio))
  576. return 0;
  577. if (!bio_crypt_ctx_back_mergeable(req, bio))
  578. return 0;
  579. if (blk_rq_sectors(req) + bio_sectors(bio) >
  580. blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
  581. req_set_nomerge(req->q, req);
  582. return 0;
  583. }
  584. return ll_new_hw_segment(req, bio, nr_segs);
  585. }
  586. static int ll_front_merge_fn(struct request *req, struct bio *bio,
  587. unsigned int nr_segs)
  588. {
  589. if (req_gap_front_merge(req, bio))
  590. return 0;
  591. if (blk_integrity_rq(req) &&
  592. integrity_req_gap_front_merge(req, bio))
  593. return 0;
  594. if (!bio_crypt_ctx_front_mergeable(req, bio))
  595. return 0;
  596. if (blk_rq_sectors(req) + bio_sectors(bio) >
  597. blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
  598. req_set_nomerge(req->q, req);
  599. return 0;
  600. }
  601. return ll_new_hw_segment(req, bio, nr_segs);
  602. }
  603. static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
  604. struct request *next)
  605. {
  606. unsigned short segments = blk_rq_nr_discard_segments(req);
  607. if (segments >= queue_max_discard_segments(q))
  608. goto no_merge;
  609. if (blk_rq_sectors(req) + bio_sectors(next->bio) >
  610. blk_rq_get_max_sectors(req, blk_rq_pos(req)))
  611. goto no_merge;
  612. req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
  613. return true;
  614. no_merge:
  615. req_set_nomerge(q, req);
  616. return false;
  617. }
  618. static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
  619. struct request *next)
  620. {
  621. int total_phys_segments;
  622. if (req_gap_back_merge(req, next->bio))
  623. return 0;
  624. /*
  625. * Will it become too large?
  626. */
  627. if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
  628. blk_rq_get_max_sectors(req, blk_rq_pos(req)))
  629. return 0;
  630. total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
  631. if (total_phys_segments > blk_rq_get_max_segments(req))
  632. return 0;
  633. if (!blk_cgroup_mergeable(req, next->bio))
  634. return 0;
  635. if (blk_integrity_merge_rq(q, req, next) == false)
  636. return 0;
  637. if (!bio_crypt_ctx_merge_rq(req, next))
  638. return 0;
  639. /* Merge is OK... */
  640. req->nr_phys_segments = total_phys_segments;
  641. req->nr_integrity_segments += next->nr_integrity_segments;
  642. return 1;
  643. }
  644. /**
  645. * blk_rq_set_mixed_merge - mark a request as mixed merge
  646. * @rq: request to mark as mixed merge
  647. *
  648. * Description:
  649. * @rq is about to be mixed merged. Make sure the attributes
  650. * which can be mixed are set in each bio and mark @rq as mixed
  651. * merged.
  652. */
  653. static void blk_rq_set_mixed_merge(struct request *rq)
  654. {
  655. blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  656. struct bio *bio;
  657. if (rq->rq_flags & RQF_MIXED_MERGE)
  658. return;
  659. /*
  660. * @rq will no longer represent mixable attributes for all the
  661. * contained bios. It will just track those of the first one.
  662. * Distributes the attributs to each bio.
  663. */
  664. for (bio = rq->bio; bio; bio = bio->bi_next) {
  665. WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
  666. (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
  667. bio->bi_opf |= ff;
  668. }
  669. rq->rq_flags |= RQF_MIXED_MERGE;
  670. }
  671. static inline blk_opf_t bio_failfast(const struct bio *bio)
  672. {
  673. if (bio->bi_opf & REQ_RAHEAD)
  674. return REQ_FAILFAST_MASK;
  675. return bio->bi_opf & REQ_FAILFAST_MASK;
  676. }
  677. /*
  678. * After we are marked as MIXED_MERGE, any new RA bio has to be updated
  679. * as failfast, and request's failfast has to be updated in case of
  680. * front merge.
  681. */
  682. static inline void blk_update_mixed_merge(struct request *req,
  683. struct bio *bio, bool front_merge)
  684. {
  685. if (req->rq_flags & RQF_MIXED_MERGE) {
  686. if (bio->bi_opf & REQ_RAHEAD)
  687. bio->bi_opf |= REQ_FAILFAST_MASK;
  688. if (front_merge) {
  689. req->cmd_flags &= ~REQ_FAILFAST_MASK;
  690. req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
  691. }
  692. }
  693. }
  694. static void blk_account_io_merge_request(struct request *req)
  695. {
  696. if (blk_do_io_stat(req)) {
  697. part_stat_lock();
  698. part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
  699. part_stat_local_dec(req->part,
  700. in_flight[op_is_write(req_op(req))]);
  701. part_stat_unlock();
  702. }
  703. }
  704. static enum elv_merge blk_try_req_merge(struct request *req,
  705. struct request *next)
  706. {
  707. if (blk_discard_mergable(req))
  708. return ELEVATOR_DISCARD_MERGE;
  709. else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
  710. return ELEVATOR_BACK_MERGE;
  711. return ELEVATOR_NO_MERGE;
  712. }
  713. static bool blk_atomic_write_mergeable_rq_bio(struct request *rq,
  714. struct bio *bio)
  715. {
  716. return (rq->cmd_flags & REQ_ATOMIC) == (bio->bi_opf & REQ_ATOMIC);
  717. }
  718. static bool blk_atomic_write_mergeable_rqs(struct request *rq,
  719. struct request *next)
  720. {
  721. return (rq->cmd_flags & REQ_ATOMIC) == (next->cmd_flags & REQ_ATOMIC);
  722. }
  723. /*
  724. * For non-mq, this has to be called with the request spinlock acquired.
  725. * For mq with scheduling, the appropriate queue wide lock should be held.
  726. */
  727. static struct request *attempt_merge(struct request_queue *q,
  728. struct request *req, struct request *next)
  729. {
  730. if (!rq_mergeable(req) || !rq_mergeable(next))
  731. return NULL;
  732. if (req_op(req) != req_op(next))
  733. return NULL;
  734. if (rq_data_dir(req) != rq_data_dir(next))
  735. return NULL;
  736. /* Don't merge requests with different write hints. */
  737. if (req->write_hint != next->write_hint)
  738. return NULL;
  739. if (req->ioprio != next->ioprio)
  740. return NULL;
  741. if (!blk_atomic_write_mergeable_rqs(req, next))
  742. return NULL;
  743. /*
  744. * If we are allowed to merge, then append bio list
  745. * from next to rq and release next. merge_requests_fn
  746. * will have updated segment counts, update sector
  747. * counts here. Handle DISCARDs separately, as they
  748. * have separate settings.
  749. */
  750. switch (blk_try_req_merge(req, next)) {
  751. case ELEVATOR_DISCARD_MERGE:
  752. if (!req_attempt_discard_merge(q, req, next))
  753. return NULL;
  754. break;
  755. case ELEVATOR_BACK_MERGE:
  756. if (!ll_merge_requests_fn(q, req, next))
  757. return NULL;
  758. break;
  759. default:
  760. return NULL;
  761. }
  762. /*
  763. * If failfast settings disagree or any of the two is already
  764. * a mixed merge, mark both as mixed before proceeding. This
  765. * makes sure that all involved bios have mixable attributes
  766. * set properly.
  767. */
  768. if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
  769. (req->cmd_flags & REQ_FAILFAST_MASK) !=
  770. (next->cmd_flags & REQ_FAILFAST_MASK)) {
  771. blk_rq_set_mixed_merge(req);
  772. blk_rq_set_mixed_merge(next);
  773. }
  774. /*
  775. * At this point we have either done a back merge or front merge. We
  776. * need the smaller start_time_ns of the merged requests to be the
  777. * current request for accounting purposes.
  778. */
  779. if (next->start_time_ns < req->start_time_ns)
  780. req->start_time_ns = next->start_time_ns;
  781. req->biotail->bi_next = next->bio;
  782. req->biotail = next->biotail;
  783. req->__data_len += blk_rq_bytes(next);
  784. if (!blk_discard_mergable(req))
  785. elv_merge_requests(q, req, next);
  786. blk_crypto_rq_put_keyslot(next);
  787. /*
  788. * 'next' is going away, so update stats accordingly
  789. */
  790. blk_account_io_merge_request(next);
  791. trace_block_rq_merge(next);
  792. /*
  793. * ownership of bio passed from next to req, return 'next' for
  794. * the caller to free
  795. */
  796. next->bio = NULL;
  797. return next;
  798. }
  799. static struct request *attempt_back_merge(struct request_queue *q,
  800. struct request *rq)
  801. {
  802. struct request *next = elv_latter_request(q, rq);
  803. if (next)
  804. return attempt_merge(q, rq, next);
  805. return NULL;
  806. }
  807. static struct request *attempt_front_merge(struct request_queue *q,
  808. struct request *rq)
  809. {
  810. struct request *prev = elv_former_request(q, rq);
  811. if (prev)
  812. return attempt_merge(q, prev, rq);
  813. return NULL;
  814. }
  815. /*
  816. * Try to merge 'next' into 'rq'. Return true if the merge happened, false
  817. * otherwise. The caller is responsible for freeing 'next' if the merge
  818. * happened.
  819. */
  820. bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  821. struct request *next)
  822. {
  823. return attempt_merge(q, rq, next);
  824. }
  825. bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
  826. {
  827. if (!rq_mergeable(rq) || !bio_mergeable(bio))
  828. return false;
  829. if (req_op(rq) != bio_op(bio))
  830. return false;
  831. /* different data direction or already started, don't merge */
  832. if (bio_data_dir(bio) != rq_data_dir(rq))
  833. return false;
  834. /* don't merge across cgroup boundaries */
  835. if (!blk_cgroup_mergeable(rq, bio))
  836. return false;
  837. /* only merge integrity protected bio into ditto rq */
  838. if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
  839. return false;
  840. /* Only merge if the crypt contexts are compatible */
  841. if (!bio_crypt_rq_ctx_compatible(rq, bio))
  842. return false;
  843. /* Don't merge requests with different write hints. */
  844. if (rq->write_hint != bio->bi_write_hint)
  845. return false;
  846. if (rq->ioprio != bio_prio(bio))
  847. return false;
  848. if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false)
  849. return false;
  850. return true;
  851. }
  852. enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
  853. {
  854. if (blk_discard_mergable(rq))
  855. return ELEVATOR_DISCARD_MERGE;
  856. else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
  857. return ELEVATOR_BACK_MERGE;
  858. else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
  859. return ELEVATOR_FRONT_MERGE;
  860. return ELEVATOR_NO_MERGE;
  861. }
  862. static void blk_account_io_merge_bio(struct request *req)
  863. {
  864. if (!blk_do_io_stat(req))
  865. return;
  866. part_stat_lock();
  867. part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
  868. part_stat_unlock();
  869. }
  870. enum bio_merge_status bio_attempt_back_merge(struct request *req,
  871. struct bio *bio, unsigned int nr_segs)
  872. {
  873. const blk_opf_t ff = bio_failfast(bio);
  874. if (!ll_back_merge_fn(req, bio, nr_segs))
  875. return BIO_MERGE_FAILED;
  876. trace_block_bio_backmerge(bio);
  877. rq_qos_merge(req->q, req, bio);
  878. if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  879. blk_rq_set_mixed_merge(req);
  880. blk_update_mixed_merge(req, bio, false);
  881. if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING)
  882. blk_zone_write_plug_bio_merged(bio);
  883. req->biotail->bi_next = bio;
  884. req->biotail = bio;
  885. req->__data_len += bio->bi_iter.bi_size;
  886. bio_crypt_free_ctx(bio);
  887. blk_account_io_merge_bio(req);
  888. return BIO_MERGE_OK;
  889. }
  890. static enum bio_merge_status bio_attempt_front_merge(struct request *req,
  891. struct bio *bio, unsigned int nr_segs)
  892. {
  893. const blk_opf_t ff = bio_failfast(bio);
  894. /*
  895. * A front merge for writes to sequential zones of a zoned block device
  896. * can happen only if the user submitted writes out of order. Do not
  897. * merge such write to let it fail.
  898. */
  899. if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING)
  900. return BIO_MERGE_FAILED;
  901. if (!ll_front_merge_fn(req, bio, nr_segs))
  902. return BIO_MERGE_FAILED;
  903. trace_block_bio_frontmerge(bio);
  904. rq_qos_merge(req->q, req, bio);
  905. if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  906. blk_rq_set_mixed_merge(req);
  907. blk_update_mixed_merge(req, bio, true);
  908. bio->bi_next = req->bio;
  909. req->bio = bio;
  910. req->__sector = bio->bi_iter.bi_sector;
  911. req->__data_len += bio->bi_iter.bi_size;
  912. bio_crypt_do_front_merge(req, bio);
  913. blk_account_io_merge_bio(req);
  914. return BIO_MERGE_OK;
  915. }
  916. static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
  917. struct request *req, struct bio *bio)
  918. {
  919. unsigned short segments = blk_rq_nr_discard_segments(req);
  920. if (segments >= queue_max_discard_segments(q))
  921. goto no_merge;
  922. if (blk_rq_sectors(req) + bio_sectors(bio) >
  923. blk_rq_get_max_sectors(req, blk_rq_pos(req)))
  924. goto no_merge;
  925. rq_qos_merge(q, req, bio);
  926. req->biotail->bi_next = bio;
  927. req->biotail = bio;
  928. req->__data_len += bio->bi_iter.bi_size;
  929. req->nr_phys_segments = segments + 1;
  930. blk_account_io_merge_bio(req);
  931. return BIO_MERGE_OK;
  932. no_merge:
  933. req_set_nomerge(q, req);
  934. return BIO_MERGE_FAILED;
  935. }
  936. static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
  937. struct request *rq,
  938. struct bio *bio,
  939. unsigned int nr_segs,
  940. bool sched_allow_merge)
  941. {
  942. if (!blk_rq_merge_ok(rq, bio))
  943. return BIO_MERGE_NONE;
  944. switch (blk_try_merge(rq, bio)) {
  945. case ELEVATOR_BACK_MERGE:
  946. if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
  947. return bio_attempt_back_merge(rq, bio, nr_segs);
  948. break;
  949. case ELEVATOR_FRONT_MERGE:
  950. if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
  951. return bio_attempt_front_merge(rq, bio, nr_segs);
  952. break;
  953. case ELEVATOR_DISCARD_MERGE:
  954. return bio_attempt_discard_merge(q, rq, bio);
  955. default:
  956. return BIO_MERGE_NONE;
  957. }
  958. return BIO_MERGE_FAILED;
  959. }
  960. /**
  961. * blk_attempt_plug_merge - try to merge with %current's plugged list
  962. * @q: request_queue new bio is being queued at
  963. * @bio: new bio being queued
  964. * @nr_segs: number of segments in @bio
  965. * from the passed in @q already in the plug list
  966. *
  967. * Determine whether @bio being queued on @q can be merged with the previous
  968. * request on %current's plugged list. Returns %true if merge was successful,
  969. * otherwise %false.
  970. *
  971. * Plugging coalesces IOs from the same issuer for the same purpose without
  972. * going through @q->queue_lock. As such it's more of an issuing mechanism
  973. * than scheduling, and the request, while may have elvpriv data, is not
  974. * added on the elevator at this point. In addition, we don't have
  975. * reliable access to the elevator outside queue lock. Only check basic
  976. * merging parameters without querying the elevator.
  977. *
  978. * Caller must ensure !blk_queue_nomerges(q) beforehand.
  979. */
  980. bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
  981. unsigned int nr_segs)
  982. {
  983. struct blk_plug *plug = current->plug;
  984. struct request *rq;
  985. if (!plug || rq_list_empty(plug->mq_list))
  986. return false;
  987. rq_list_for_each(&plug->mq_list, rq) {
  988. if (rq->q == q) {
  989. if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
  990. BIO_MERGE_OK)
  991. return true;
  992. break;
  993. }
  994. /*
  995. * Only keep iterating plug list for merges if we have multiple
  996. * queues
  997. */
  998. if (!plug->multiple_queues)
  999. break;
  1000. }
  1001. return false;
  1002. }
  1003. /*
  1004. * Iterate list of requests and see if we can merge this bio with any
  1005. * of them.
  1006. */
  1007. bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
  1008. struct bio *bio, unsigned int nr_segs)
  1009. {
  1010. struct request *rq;
  1011. int checked = 8;
  1012. list_for_each_entry_reverse(rq, list, queuelist) {
  1013. if (!checked--)
  1014. break;
  1015. switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
  1016. case BIO_MERGE_NONE:
  1017. continue;
  1018. case BIO_MERGE_OK:
  1019. return true;
  1020. case BIO_MERGE_FAILED:
  1021. return false;
  1022. }
  1023. }
  1024. return false;
  1025. }
  1026. EXPORT_SYMBOL_GPL(blk_bio_list_merge);
  1027. bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
  1028. unsigned int nr_segs, struct request **merged_request)
  1029. {
  1030. struct request *rq;
  1031. switch (elv_merge(q, &rq, bio)) {
  1032. case ELEVATOR_BACK_MERGE:
  1033. if (!blk_mq_sched_allow_merge(q, rq, bio))
  1034. return false;
  1035. if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
  1036. return false;
  1037. *merged_request = attempt_back_merge(q, rq);
  1038. if (!*merged_request)
  1039. elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
  1040. return true;
  1041. case ELEVATOR_FRONT_MERGE:
  1042. if (!blk_mq_sched_allow_merge(q, rq, bio))
  1043. return false;
  1044. if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
  1045. return false;
  1046. *merged_request = attempt_front_merge(q, rq);
  1047. if (!*merged_request)
  1048. elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
  1049. return true;
  1050. case ELEVATOR_DISCARD_MERGE:
  1051. return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
  1052. default:
  1053. return false;
  1054. }
  1055. }
  1056. EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);