blk-map.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Functions related to mapping data to requests
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/sched/task_stack.h>
  7. #include <linux/module.h>
  8. #include <linux/bio.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/uio.h>
  11. #include "blk.h"
  12. struct bio_map_data {
  13. bool is_our_pages : 1;
  14. bool is_null_mapped : 1;
  15. struct iov_iter iter;
  16. struct iovec iov[];
  17. };
  18. static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
  19. gfp_t gfp_mask)
  20. {
  21. struct bio_map_data *bmd;
  22. if (data->nr_segs > UIO_MAXIOV)
  23. return NULL;
  24. bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
  25. if (!bmd)
  26. return NULL;
  27. bmd->iter = *data;
  28. if (iter_is_iovec(data)) {
  29. memcpy(bmd->iov, iter_iov(data), sizeof(struct iovec) * data->nr_segs);
  30. bmd->iter.__iov = bmd->iov;
  31. }
  32. return bmd;
  33. }
  34. /**
  35. * bio_copy_from_iter - copy all pages from iov_iter to bio
  36. * @bio: The &struct bio which describes the I/O as destination
  37. * @iter: iov_iter as source
  38. *
  39. * Copy all pages from iov_iter to bio.
  40. * Returns 0 on success, or error on failure.
  41. */
  42. static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
  43. {
  44. struct bio_vec *bvec;
  45. struct bvec_iter_all iter_all;
  46. bio_for_each_segment_all(bvec, bio, iter_all) {
  47. ssize_t ret;
  48. ret = copy_page_from_iter(bvec->bv_page,
  49. bvec->bv_offset,
  50. bvec->bv_len,
  51. iter);
  52. if (!iov_iter_count(iter))
  53. break;
  54. if (ret < bvec->bv_len)
  55. return -EFAULT;
  56. }
  57. return 0;
  58. }
  59. /**
  60. * bio_copy_to_iter - copy all pages from bio to iov_iter
  61. * @bio: The &struct bio which describes the I/O as source
  62. * @iter: iov_iter as destination
  63. *
  64. * Copy all pages from bio to iov_iter.
  65. * Returns 0 on success, or error on failure.
  66. */
  67. static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
  68. {
  69. struct bio_vec *bvec;
  70. struct bvec_iter_all iter_all;
  71. bio_for_each_segment_all(bvec, bio, iter_all) {
  72. ssize_t ret;
  73. ret = copy_page_to_iter(bvec->bv_page,
  74. bvec->bv_offset,
  75. bvec->bv_len,
  76. &iter);
  77. if (!iov_iter_count(&iter))
  78. break;
  79. if (ret < bvec->bv_len)
  80. return -EFAULT;
  81. }
  82. return 0;
  83. }
  84. /**
  85. * bio_uncopy_user - finish previously mapped bio
  86. * @bio: bio being terminated
  87. *
  88. * Free pages allocated from bio_copy_user_iov() and write back data
  89. * to user space in case of a read.
  90. */
  91. static int bio_uncopy_user(struct bio *bio)
  92. {
  93. struct bio_map_data *bmd = bio->bi_private;
  94. int ret = 0;
  95. if (!bmd->is_null_mapped) {
  96. /*
  97. * if we're in a workqueue, the request is orphaned, so
  98. * don't copy into a random user address space, just free
  99. * and return -EINTR so user space doesn't expect any data.
  100. */
  101. if (!current->mm)
  102. ret = -EINTR;
  103. else if (bio_data_dir(bio) == READ)
  104. ret = bio_copy_to_iter(bio, bmd->iter);
  105. if (bmd->is_our_pages)
  106. bio_free_pages(bio);
  107. }
  108. kfree(bmd);
  109. return ret;
  110. }
  111. static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
  112. struct iov_iter *iter, gfp_t gfp_mask)
  113. {
  114. struct bio_map_data *bmd;
  115. struct page *page;
  116. struct bio *bio;
  117. int i = 0, ret;
  118. int nr_pages;
  119. unsigned int len = iter->count;
  120. unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
  121. bmd = bio_alloc_map_data(iter, gfp_mask);
  122. if (!bmd)
  123. return -ENOMEM;
  124. /*
  125. * We need to do a deep copy of the iov_iter including the iovecs.
  126. * The caller provided iov might point to an on-stack or otherwise
  127. * shortlived one.
  128. */
  129. bmd->is_our_pages = !map_data;
  130. bmd->is_null_mapped = (map_data && map_data->null_mapped);
  131. nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
  132. ret = -ENOMEM;
  133. bio = bio_kmalloc(nr_pages, gfp_mask);
  134. if (!bio)
  135. goto out_bmd;
  136. bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
  137. if (map_data) {
  138. nr_pages = 1U << map_data->page_order;
  139. i = map_data->offset / PAGE_SIZE;
  140. }
  141. while (len) {
  142. unsigned int bytes = PAGE_SIZE;
  143. bytes -= offset;
  144. if (bytes > len)
  145. bytes = len;
  146. if (map_data) {
  147. if (i == map_data->nr_entries * nr_pages) {
  148. ret = -ENOMEM;
  149. goto cleanup;
  150. }
  151. page = map_data->pages[i / nr_pages];
  152. page += (i % nr_pages);
  153. i++;
  154. } else {
  155. page = alloc_page(GFP_NOIO | gfp_mask);
  156. if (!page) {
  157. ret = -ENOMEM;
  158. goto cleanup;
  159. }
  160. }
  161. if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
  162. if (!map_data)
  163. __free_page(page);
  164. break;
  165. }
  166. len -= bytes;
  167. offset = 0;
  168. }
  169. if (map_data)
  170. map_data->offset += bio->bi_iter.bi_size;
  171. /*
  172. * success
  173. */
  174. if (iov_iter_rw(iter) == WRITE &&
  175. (!map_data || !map_data->null_mapped)) {
  176. ret = bio_copy_from_iter(bio, iter);
  177. if (ret)
  178. goto cleanup;
  179. } else if (map_data && map_data->from_user) {
  180. struct iov_iter iter2 = *iter;
  181. /* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
  182. iter2.data_source = ITER_SOURCE;
  183. ret = bio_copy_from_iter(bio, &iter2);
  184. if (ret)
  185. goto cleanup;
  186. } else {
  187. if (bmd->is_our_pages)
  188. zero_fill_bio(bio);
  189. iov_iter_advance(iter, bio->bi_iter.bi_size);
  190. }
  191. bio->bi_private = bmd;
  192. ret = blk_rq_append_bio(rq, bio);
  193. if (ret)
  194. goto cleanup;
  195. return 0;
  196. cleanup:
  197. if (!map_data)
  198. bio_free_pages(bio);
  199. bio_uninit(bio);
  200. kfree(bio);
  201. out_bmd:
  202. kfree(bmd);
  203. return ret;
  204. }
  205. static void blk_mq_map_bio_put(struct bio *bio)
  206. {
  207. if (bio->bi_opf & REQ_ALLOC_CACHE) {
  208. bio_put(bio);
  209. } else {
  210. bio_uninit(bio);
  211. kfree(bio);
  212. }
  213. }
  214. static struct bio *blk_rq_map_bio_alloc(struct request *rq,
  215. unsigned int nr_vecs, gfp_t gfp_mask)
  216. {
  217. struct bio *bio;
  218. if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
  219. bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
  220. &fs_bio_set);
  221. if (!bio)
  222. return NULL;
  223. } else {
  224. bio = bio_kmalloc(nr_vecs, gfp_mask);
  225. if (!bio)
  226. return NULL;
  227. bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
  228. }
  229. return bio;
  230. }
  231. static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
  232. gfp_t gfp_mask)
  233. {
  234. iov_iter_extraction_t extraction_flags = 0;
  235. unsigned int max_sectors = queue_max_hw_sectors(rq->q);
  236. unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
  237. struct bio *bio;
  238. int ret;
  239. int j;
  240. if (!iov_iter_count(iter))
  241. return -EINVAL;
  242. bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
  243. if (bio == NULL)
  244. return -ENOMEM;
  245. if (blk_queue_pci_p2pdma(rq->q))
  246. extraction_flags |= ITER_ALLOW_P2PDMA;
  247. if (iov_iter_extract_will_pin(iter))
  248. bio_set_flag(bio, BIO_PAGE_PINNED);
  249. while (iov_iter_count(iter)) {
  250. struct page *stack_pages[UIO_FASTIOV];
  251. struct page **pages = stack_pages;
  252. ssize_t bytes;
  253. size_t offs;
  254. int npages;
  255. if (nr_vecs > ARRAY_SIZE(stack_pages))
  256. pages = NULL;
  257. bytes = iov_iter_extract_pages(iter, &pages, LONG_MAX,
  258. nr_vecs, extraction_flags, &offs);
  259. if (unlikely(bytes <= 0)) {
  260. ret = bytes ? bytes : -EFAULT;
  261. goto out_unmap;
  262. }
  263. npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
  264. if (unlikely(offs & queue_dma_alignment(rq->q)))
  265. j = 0;
  266. else {
  267. for (j = 0; j < npages; j++) {
  268. struct page *page = pages[j];
  269. unsigned int n = PAGE_SIZE - offs;
  270. bool same_page = false;
  271. if (n > bytes)
  272. n = bytes;
  273. if (!bio_add_hw_page(rq->q, bio, page, n, offs,
  274. max_sectors, &same_page))
  275. break;
  276. if (same_page)
  277. bio_release_page(bio, page);
  278. bytes -= n;
  279. offs = 0;
  280. }
  281. }
  282. /*
  283. * release the pages we didn't map into the bio, if any
  284. */
  285. while (j < npages)
  286. bio_release_page(bio, pages[j++]);
  287. if (pages != stack_pages)
  288. kvfree(pages);
  289. /* couldn't stuff something into bio? */
  290. if (bytes) {
  291. iov_iter_revert(iter, bytes);
  292. break;
  293. }
  294. }
  295. ret = blk_rq_append_bio(rq, bio);
  296. if (ret)
  297. goto out_unmap;
  298. return 0;
  299. out_unmap:
  300. bio_release_pages(bio, false);
  301. blk_mq_map_bio_put(bio);
  302. return ret;
  303. }
  304. static void bio_invalidate_vmalloc_pages(struct bio *bio)
  305. {
  306. #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
  307. if (bio->bi_private && !op_is_write(bio_op(bio))) {
  308. unsigned long i, len = 0;
  309. for (i = 0; i < bio->bi_vcnt; i++)
  310. len += bio->bi_io_vec[i].bv_len;
  311. invalidate_kernel_vmap_range(bio->bi_private, len);
  312. }
  313. #endif
  314. }
  315. static void bio_map_kern_endio(struct bio *bio)
  316. {
  317. bio_invalidate_vmalloc_pages(bio);
  318. bio_uninit(bio);
  319. kfree(bio);
  320. }
  321. /**
  322. * bio_map_kern - map kernel address into bio
  323. * @q: the struct request_queue for the bio
  324. * @data: pointer to buffer to map
  325. * @len: length in bytes
  326. * @gfp_mask: allocation flags for bio allocation
  327. *
  328. * Map the kernel address into a bio suitable for io to a block
  329. * device. Returns an error pointer in case of error.
  330. */
  331. static struct bio *bio_map_kern(struct request_queue *q, void *data,
  332. unsigned int len, gfp_t gfp_mask)
  333. {
  334. unsigned long kaddr = (unsigned long)data;
  335. unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  336. unsigned long start = kaddr >> PAGE_SHIFT;
  337. const int nr_pages = end - start;
  338. bool is_vmalloc = is_vmalloc_addr(data);
  339. struct page *page;
  340. int offset, i;
  341. struct bio *bio;
  342. bio = bio_kmalloc(nr_pages, gfp_mask);
  343. if (!bio)
  344. return ERR_PTR(-ENOMEM);
  345. bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
  346. if (is_vmalloc) {
  347. flush_kernel_vmap_range(data, len);
  348. bio->bi_private = data;
  349. }
  350. offset = offset_in_page(kaddr);
  351. for (i = 0; i < nr_pages; i++) {
  352. unsigned int bytes = PAGE_SIZE - offset;
  353. if (len <= 0)
  354. break;
  355. if (bytes > len)
  356. bytes = len;
  357. if (!is_vmalloc)
  358. page = virt_to_page(data);
  359. else
  360. page = vmalloc_to_page(data);
  361. if (bio_add_pc_page(q, bio, page, bytes,
  362. offset) < bytes) {
  363. /* we don't support partial mappings */
  364. bio_uninit(bio);
  365. kfree(bio);
  366. return ERR_PTR(-EINVAL);
  367. }
  368. data += bytes;
  369. len -= bytes;
  370. offset = 0;
  371. }
  372. bio->bi_end_io = bio_map_kern_endio;
  373. return bio;
  374. }
  375. static void bio_copy_kern_endio(struct bio *bio)
  376. {
  377. bio_free_pages(bio);
  378. bio_uninit(bio);
  379. kfree(bio);
  380. }
  381. static void bio_copy_kern_endio_read(struct bio *bio)
  382. {
  383. char *p = bio->bi_private;
  384. struct bio_vec *bvec;
  385. struct bvec_iter_all iter_all;
  386. bio_for_each_segment_all(bvec, bio, iter_all) {
  387. memcpy_from_bvec(p, bvec);
  388. p += bvec->bv_len;
  389. }
  390. bio_copy_kern_endio(bio);
  391. }
  392. /**
  393. * bio_copy_kern - copy kernel address into bio
  394. * @q: the struct request_queue for the bio
  395. * @data: pointer to buffer to copy
  396. * @len: length in bytes
  397. * @gfp_mask: allocation flags for bio and page allocation
  398. * @reading: data direction is READ
  399. *
  400. * copy the kernel address into a bio suitable for io to a block
  401. * device. Returns an error pointer in case of error.
  402. */
  403. static struct bio *bio_copy_kern(struct request_queue *q, void *data,
  404. unsigned int len, gfp_t gfp_mask, int reading)
  405. {
  406. unsigned long kaddr = (unsigned long)data;
  407. unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  408. unsigned long start = kaddr >> PAGE_SHIFT;
  409. struct bio *bio;
  410. void *p = data;
  411. int nr_pages = 0;
  412. /*
  413. * Overflow, abort
  414. */
  415. if (end < start)
  416. return ERR_PTR(-EINVAL);
  417. nr_pages = end - start;
  418. bio = bio_kmalloc(nr_pages, gfp_mask);
  419. if (!bio)
  420. return ERR_PTR(-ENOMEM);
  421. bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
  422. while (len) {
  423. struct page *page;
  424. unsigned int bytes = PAGE_SIZE;
  425. if (bytes > len)
  426. bytes = len;
  427. page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
  428. if (!page)
  429. goto cleanup;
  430. if (!reading)
  431. memcpy(page_address(page), p, bytes);
  432. if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
  433. break;
  434. len -= bytes;
  435. p += bytes;
  436. }
  437. if (reading) {
  438. bio->bi_end_io = bio_copy_kern_endio_read;
  439. bio->bi_private = data;
  440. } else {
  441. bio->bi_end_io = bio_copy_kern_endio;
  442. }
  443. return bio;
  444. cleanup:
  445. bio_free_pages(bio);
  446. bio_uninit(bio);
  447. kfree(bio);
  448. return ERR_PTR(-ENOMEM);
  449. }
  450. /*
  451. * Append a bio to a passthrough request. Only works if the bio can be merged
  452. * into the request based on the driver constraints.
  453. */
  454. int blk_rq_append_bio(struct request *rq, struct bio *bio)
  455. {
  456. struct bvec_iter iter;
  457. struct bio_vec bv;
  458. unsigned int nr_segs = 0;
  459. bio_for_each_bvec(bv, bio, iter)
  460. nr_segs++;
  461. if (!rq->bio) {
  462. blk_rq_bio_prep(rq, bio, nr_segs);
  463. } else {
  464. if (!ll_back_merge_fn(rq, bio, nr_segs))
  465. return -EINVAL;
  466. rq->biotail->bi_next = bio;
  467. rq->biotail = bio;
  468. rq->__data_len += (bio)->bi_iter.bi_size;
  469. bio_crypt_free_ctx(bio);
  470. }
  471. return 0;
  472. }
  473. EXPORT_SYMBOL(blk_rq_append_bio);
  474. /* Prepare bio for passthrough IO given ITER_BVEC iter */
  475. static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
  476. {
  477. const struct queue_limits *lim = &rq->q->limits;
  478. unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
  479. unsigned int nsegs;
  480. struct bio *bio;
  481. int ret;
  482. if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes)
  483. return -EINVAL;
  484. /* reuse the bvecs from the iterator instead of allocating new ones */
  485. bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
  486. if (!bio)
  487. return -ENOMEM;
  488. bio_iov_bvec_set(bio, (struct iov_iter *)iter);
  489. /* check that the data layout matches the hardware restrictions */
  490. ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
  491. if (ret) {
  492. /* if we would have to split the bio, copy instead */
  493. if (ret > 0)
  494. ret = -EREMOTEIO;
  495. blk_mq_map_bio_put(bio);
  496. return ret;
  497. }
  498. blk_rq_bio_prep(rq, bio, nsegs);
  499. return 0;
  500. }
  501. /**
  502. * blk_rq_map_user_iov - map user data to a request, for passthrough requests
  503. * @q: request queue where request should be inserted
  504. * @rq: request to map data to
  505. * @map_data: pointer to the rq_map_data holding pages (if necessary)
  506. * @iter: iovec iterator
  507. * @gfp_mask: memory allocation flags
  508. *
  509. * Description:
  510. * Data will be mapped directly for zero copy I/O, if possible. Otherwise
  511. * a kernel bounce buffer is used.
  512. *
  513. * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
  514. * still in process context.
  515. */
  516. int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
  517. struct rq_map_data *map_data,
  518. const struct iov_iter *iter, gfp_t gfp_mask)
  519. {
  520. bool copy = false, map_bvec = false;
  521. unsigned long align = blk_lim_dma_alignment_and_pad(&q->limits);
  522. struct bio *bio = NULL;
  523. struct iov_iter i;
  524. int ret = -EINVAL;
  525. if (map_data)
  526. copy = true;
  527. else if (blk_queue_may_bounce(q))
  528. copy = true;
  529. else if (iov_iter_alignment(iter) & align)
  530. copy = true;
  531. else if (iov_iter_is_bvec(iter))
  532. map_bvec = true;
  533. else if (!user_backed_iter(iter))
  534. copy = true;
  535. else if (queue_virt_boundary(q))
  536. copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
  537. if (map_bvec) {
  538. ret = blk_rq_map_user_bvec(rq, iter);
  539. if (!ret)
  540. return 0;
  541. if (ret != -EREMOTEIO)
  542. goto fail;
  543. /* fall back to copying the data on limits mismatches */
  544. copy = true;
  545. }
  546. i = *iter;
  547. do {
  548. if (copy)
  549. ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
  550. else
  551. ret = bio_map_user_iov(rq, &i, gfp_mask);
  552. if (ret)
  553. goto unmap_rq;
  554. if (!bio)
  555. bio = rq->bio;
  556. } while (iov_iter_count(&i));
  557. return 0;
  558. unmap_rq:
  559. blk_rq_unmap_user(bio);
  560. fail:
  561. rq->bio = NULL;
  562. return ret;
  563. }
  564. EXPORT_SYMBOL(blk_rq_map_user_iov);
  565. int blk_rq_map_user(struct request_queue *q, struct request *rq,
  566. struct rq_map_data *map_data, void __user *ubuf,
  567. unsigned long len, gfp_t gfp_mask)
  568. {
  569. struct iov_iter i;
  570. int ret = import_ubuf(rq_data_dir(rq), ubuf, len, &i);
  571. if (unlikely(ret < 0))
  572. return ret;
  573. return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
  574. }
  575. EXPORT_SYMBOL(blk_rq_map_user);
  576. int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data,
  577. void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask,
  578. bool vec, int iov_count, bool check_iter_count, int rw)
  579. {
  580. int ret = 0;
  581. if (vec) {
  582. struct iovec fast_iov[UIO_FASTIOV];
  583. struct iovec *iov = fast_iov;
  584. struct iov_iter iter;
  585. ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len,
  586. UIO_FASTIOV, &iov, &iter);
  587. if (ret < 0)
  588. return ret;
  589. if (iov_count) {
  590. /* SG_IO howto says that the shorter of the two wins */
  591. iov_iter_truncate(&iter, buf_len);
  592. if (check_iter_count && !iov_iter_count(&iter)) {
  593. kfree(iov);
  594. return -EINVAL;
  595. }
  596. }
  597. ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
  598. gfp_mask);
  599. kfree(iov);
  600. } else if (buf_len) {
  601. ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len,
  602. gfp_mask);
  603. }
  604. return ret;
  605. }
  606. EXPORT_SYMBOL(blk_rq_map_user_io);
  607. /**
  608. * blk_rq_unmap_user - unmap a request with user data
  609. * @bio: start of bio list
  610. *
  611. * Description:
  612. * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
  613. * supply the original rq->bio from the blk_rq_map_user() return, since
  614. * the I/O completion may have changed rq->bio.
  615. */
  616. int blk_rq_unmap_user(struct bio *bio)
  617. {
  618. struct bio *next_bio;
  619. int ret = 0, ret2;
  620. while (bio) {
  621. if (bio->bi_private) {
  622. ret2 = bio_uncopy_user(bio);
  623. if (ret2 && !ret)
  624. ret = ret2;
  625. } else {
  626. bio_release_pages(bio, bio_data_dir(bio) == READ);
  627. }
  628. if (bio_integrity(bio))
  629. bio_integrity_unmap_user(bio);
  630. next_bio = bio;
  631. bio = bio->bi_next;
  632. blk_mq_map_bio_put(next_bio);
  633. }
  634. return ret;
  635. }
  636. EXPORT_SYMBOL(blk_rq_unmap_user);
  637. /**
  638. * blk_rq_map_kern - map kernel data to a request, for passthrough requests
  639. * @q: request queue where request should be inserted
  640. * @rq: request to fill
  641. * @kbuf: the kernel buffer
  642. * @len: length of user data
  643. * @gfp_mask: memory allocation flags
  644. *
  645. * Description:
  646. * Data will be mapped directly if possible. Otherwise a bounce
  647. * buffer is used. Can be called multiple times to append multiple
  648. * buffers.
  649. */
  650. int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
  651. unsigned int len, gfp_t gfp_mask)
  652. {
  653. int reading = rq_data_dir(rq) == READ;
  654. unsigned long addr = (unsigned long) kbuf;
  655. struct bio *bio;
  656. int ret;
  657. if (len > (queue_max_hw_sectors(q) << 9))
  658. return -EINVAL;
  659. if (!len || !kbuf)
  660. return -EINVAL;
  661. if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
  662. blk_queue_may_bounce(q))
  663. bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
  664. else
  665. bio = bio_map_kern(q, kbuf, len, gfp_mask);
  666. if (IS_ERR(bio))
  667. return PTR_ERR(bio);
  668. bio->bi_opf &= ~REQ_OP_MASK;
  669. bio->bi_opf |= req_op(rq);
  670. ret = blk_rq_append_bio(rq, bio);
  671. if (unlikely(ret)) {
  672. bio_uninit(bio);
  673. kfree(bio);
  674. }
  675. return ret;
  676. }
  677. EXPORT_SYMBOL(blk_rq_map_kern);