kunit_iov_iter.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* I/O iterator tests. This can only test kernel-backed iterator types.
  3. *
  4. * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/module.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/mm.h>
  11. #include <linux/uio.h>
  12. #include <linux/bvec.h>
  13. #include <linux/folio_queue.h>
  14. #include <kunit/test.h>
  15. MODULE_DESCRIPTION("iov_iter testing");
  16. MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
  17. MODULE_LICENSE("GPL");
  18. struct kvec_test_range {
  19. int from, to;
  20. };
  21. static const struct kvec_test_range kvec_test_ranges[] = {
  22. { 0x00002, 0x00002 },
  23. { 0x00027, 0x03000 },
  24. { 0x05193, 0x18794 },
  25. { 0x20000, 0x20000 },
  26. { 0x20000, 0x24000 },
  27. { 0x24000, 0x27001 },
  28. { 0x29000, 0xffffb },
  29. { 0xffffd, 0xffffe },
  30. { -1 }
  31. };
  32. static inline u8 pattern(unsigned long x)
  33. {
  34. return x & 0xff;
  35. }
  36. static void iov_kunit_unmap(void *data)
  37. {
  38. vunmap(data);
  39. }
  40. static void *__init iov_kunit_create_buffer(struct kunit *test,
  41. struct page ***ppages,
  42. size_t npages)
  43. {
  44. struct page **pages;
  45. unsigned long got;
  46. void *buffer;
  47. pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL);
  48. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
  49. *ppages = pages;
  50. got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
  51. if (got != npages) {
  52. release_pages(pages, got);
  53. KUNIT_ASSERT_EQ(test, got, npages);
  54. }
  55. for (int i = 0; i < npages; i++)
  56. pages[i]->index = i;
  57. buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
  58. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
  59. kunit_add_action_or_reset(test, iov_kunit_unmap, buffer);
  60. return buffer;
  61. }
  62. static void __init iov_kunit_load_kvec(struct kunit *test,
  63. struct iov_iter *iter, int dir,
  64. struct kvec *kvec, unsigned int kvmax,
  65. void *buffer, size_t bufsize,
  66. const struct kvec_test_range *pr)
  67. {
  68. size_t size = 0;
  69. int i;
  70. for (i = 0; i < kvmax; i++, pr++) {
  71. if (pr->from < 0)
  72. break;
  73. KUNIT_ASSERT_GE(test, pr->to, pr->from);
  74. KUNIT_ASSERT_LE(test, pr->to, bufsize);
  75. kvec[i].iov_base = buffer + pr->from;
  76. kvec[i].iov_len = pr->to - pr->from;
  77. size += pr->to - pr->from;
  78. }
  79. KUNIT_ASSERT_LE(test, size, bufsize);
  80. iov_iter_kvec(iter, dir, kvec, i, size);
  81. }
  82. /*
  83. * Test copying to a ITER_KVEC-type iterator.
  84. */
  85. static void __init iov_kunit_copy_to_kvec(struct kunit *test)
  86. {
  87. const struct kvec_test_range *pr;
  88. struct iov_iter iter;
  89. struct page **spages, **bpages;
  90. struct kvec kvec[8];
  91. u8 *scratch, *buffer;
  92. size_t bufsize, npages, size, copied;
  93. int i, patt;
  94. bufsize = 0x100000;
  95. npages = bufsize / PAGE_SIZE;
  96. scratch = iov_kunit_create_buffer(test, &spages, npages);
  97. for (i = 0; i < bufsize; i++)
  98. scratch[i] = pattern(i);
  99. buffer = iov_kunit_create_buffer(test, &bpages, npages);
  100. memset(buffer, 0, bufsize);
  101. iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
  102. buffer, bufsize, kvec_test_ranges);
  103. size = iter.count;
  104. copied = copy_to_iter(scratch, size, &iter);
  105. KUNIT_EXPECT_EQ(test, copied, size);
  106. KUNIT_EXPECT_EQ(test, iter.count, 0);
  107. KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
  108. /* Build the expected image in the scratch buffer. */
  109. patt = 0;
  110. memset(scratch, 0, bufsize);
  111. for (pr = kvec_test_ranges; pr->from >= 0; pr++)
  112. for (i = pr->from; i < pr->to; i++)
  113. scratch[i] = pattern(patt++);
  114. /* Compare the images */
  115. for (i = 0; i < bufsize; i++) {
  116. KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
  117. if (buffer[i] != scratch[i])
  118. return;
  119. }
  120. KUNIT_SUCCEED(test);
  121. }
  122. /*
  123. * Test copying from a ITER_KVEC-type iterator.
  124. */
  125. static void __init iov_kunit_copy_from_kvec(struct kunit *test)
  126. {
  127. const struct kvec_test_range *pr;
  128. struct iov_iter iter;
  129. struct page **spages, **bpages;
  130. struct kvec kvec[8];
  131. u8 *scratch, *buffer;
  132. size_t bufsize, npages, size, copied;
  133. int i, j;
  134. bufsize = 0x100000;
  135. npages = bufsize / PAGE_SIZE;
  136. buffer = iov_kunit_create_buffer(test, &bpages, npages);
  137. for (i = 0; i < bufsize; i++)
  138. buffer[i] = pattern(i);
  139. scratch = iov_kunit_create_buffer(test, &spages, npages);
  140. memset(scratch, 0, bufsize);
  141. iov_kunit_load_kvec(test, &iter, WRITE, kvec, ARRAY_SIZE(kvec),
  142. buffer, bufsize, kvec_test_ranges);
  143. size = min(iter.count, bufsize);
  144. copied = copy_from_iter(scratch, size, &iter);
  145. KUNIT_EXPECT_EQ(test, copied, size);
  146. KUNIT_EXPECT_EQ(test, iter.count, 0);
  147. KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
  148. /* Build the expected image in the main buffer. */
  149. i = 0;
  150. memset(buffer, 0, bufsize);
  151. for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
  152. for (j = pr->from; j < pr->to; j++) {
  153. buffer[i++] = pattern(j);
  154. if (i >= bufsize)
  155. goto stop;
  156. }
  157. }
  158. stop:
  159. /* Compare the images */
  160. for (i = 0; i < bufsize; i++) {
  161. KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
  162. if (scratch[i] != buffer[i])
  163. return;
  164. }
  165. KUNIT_SUCCEED(test);
  166. }
  167. struct bvec_test_range {
  168. int page, from, to;
  169. };
  170. static const struct bvec_test_range bvec_test_ranges[] = {
  171. { 0, 0x0002, 0x0002 },
  172. { 1, 0x0027, 0x0893 },
  173. { 2, 0x0193, 0x0794 },
  174. { 3, 0x0000, 0x1000 },
  175. { 4, 0x0000, 0x1000 },
  176. { 5, 0x0000, 0x1000 },
  177. { 6, 0x0000, 0x0ffb },
  178. { 6, 0x0ffd, 0x0ffe },
  179. { -1, -1, -1 }
  180. };
  181. static void __init iov_kunit_load_bvec(struct kunit *test,
  182. struct iov_iter *iter, int dir,
  183. struct bio_vec *bvec, unsigned int bvmax,
  184. struct page **pages, size_t npages,
  185. size_t bufsize,
  186. const struct bvec_test_range *pr)
  187. {
  188. struct page *can_merge = NULL, *page;
  189. size_t size = 0;
  190. int i;
  191. for (i = 0; i < bvmax; i++, pr++) {
  192. if (pr->from < 0)
  193. break;
  194. KUNIT_ASSERT_LT(test, pr->page, npages);
  195. KUNIT_ASSERT_LT(test, pr->page * PAGE_SIZE, bufsize);
  196. KUNIT_ASSERT_GE(test, pr->from, 0);
  197. KUNIT_ASSERT_GE(test, pr->to, pr->from);
  198. KUNIT_ASSERT_LE(test, pr->to, PAGE_SIZE);
  199. page = pages[pr->page];
  200. if (pr->from == 0 && pr->from != pr->to && page == can_merge) {
  201. i--;
  202. bvec[i].bv_len += pr->to;
  203. } else {
  204. bvec_set_page(&bvec[i], page, pr->to - pr->from, pr->from);
  205. }
  206. size += pr->to - pr->from;
  207. if ((pr->to & ~PAGE_MASK) == 0)
  208. can_merge = page + pr->to / PAGE_SIZE;
  209. else
  210. can_merge = NULL;
  211. }
  212. iov_iter_bvec(iter, dir, bvec, i, size);
  213. }
  214. /*
  215. * Test copying to a ITER_BVEC-type iterator.
  216. */
  217. static void __init iov_kunit_copy_to_bvec(struct kunit *test)
  218. {
  219. const struct bvec_test_range *pr;
  220. struct iov_iter iter;
  221. struct bio_vec bvec[8];
  222. struct page **spages, **bpages;
  223. u8 *scratch, *buffer;
  224. size_t bufsize, npages, size, copied;
  225. int i, b, patt;
  226. bufsize = 0x100000;
  227. npages = bufsize / PAGE_SIZE;
  228. scratch = iov_kunit_create_buffer(test, &spages, npages);
  229. for (i = 0; i < bufsize; i++)
  230. scratch[i] = pattern(i);
  231. buffer = iov_kunit_create_buffer(test, &bpages, npages);
  232. memset(buffer, 0, bufsize);
  233. iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
  234. bpages, npages, bufsize, bvec_test_ranges);
  235. size = iter.count;
  236. copied = copy_to_iter(scratch, size, &iter);
  237. KUNIT_EXPECT_EQ(test, copied, size);
  238. KUNIT_EXPECT_EQ(test, iter.count, 0);
  239. KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
  240. /* Build the expected image in the scratch buffer. */
  241. b = 0;
  242. patt = 0;
  243. memset(scratch, 0, bufsize);
  244. for (pr = bvec_test_ranges; pr->from >= 0; pr++, b++) {
  245. u8 *p = scratch + pr->page * PAGE_SIZE;
  246. for (i = pr->from; i < pr->to; i++)
  247. p[i] = pattern(patt++);
  248. }
  249. /* Compare the images */
  250. for (i = 0; i < bufsize; i++) {
  251. KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
  252. if (buffer[i] != scratch[i])
  253. return;
  254. }
  255. KUNIT_SUCCEED(test);
  256. }
  257. /*
  258. * Test copying from a ITER_BVEC-type iterator.
  259. */
  260. static void __init iov_kunit_copy_from_bvec(struct kunit *test)
  261. {
  262. const struct bvec_test_range *pr;
  263. struct iov_iter iter;
  264. struct bio_vec bvec[8];
  265. struct page **spages, **bpages;
  266. u8 *scratch, *buffer;
  267. size_t bufsize, npages, size, copied;
  268. int i, j;
  269. bufsize = 0x100000;
  270. npages = bufsize / PAGE_SIZE;
  271. buffer = iov_kunit_create_buffer(test, &bpages, npages);
  272. for (i = 0; i < bufsize; i++)
  273. buffer[i] = pattern(i);
  274. scratch = iov_kunit_create_buffer(test, &spages, npages);
  275. memset(scratch, 0, bufsize);
  276. iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec),
  277. bpages, npages, bufsize, bvec_test_ranges);
  278. size = iter.count;
  279. copied = copy_from_iter(scratch, size, &iter);
  280. KUNIT_EXPECT_EQ(test, copied, size);
  281. KUNIT_EXPECT_EQ(test, iter.count, 0);
  282. KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
  283. /* Build the expected image in the main buffer. */
  284. i = 0;
  285. memset(buffer, 0, bufsize);
  286. for (pr = bvec_test_ranges; pr->from >= 0; pr++) {
  287. size_t patt = pr->page * PAGE_SIZE;
  288. for (j = pr->from; j < pr->to; j++) {
  289. buffer[i++] = pattern(patt + j);
  290. if (i >= bufsize)
  291. goto stop;
  292. }
  293. }
  294. stop:
  295. /* Compare the images */
  296. for (i = 0; i < bufsize; i++) {
  297. KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
  298. if (scratch[i] != buffer[i])
  299. return;
  300. }
  301. KUNIT_SUCCEED(test);
  302. }
  303. static void iov_kunit_destroy_folioq(void *data)
  304. {
  305. struct folio_queue *folioq, *next;
  306. for (folioq = data; folioq; folioq = next) {
  307. next = folioq->next;
  308. for (int i = 0; i < folioq_nr_slots(folioq); i++)
  309. if (folioq_folio(folioq, i))
  310. folio_put(folioq_folio(folioq, i));
  311. kfree(folioq);
  312. }
  313. }
  314. static void __init iov_kunit_load_folioq(struct kunit *test,
  315. struct iov_iter *iter, int dir,
  316. struct folio_queue *folioq,
  317. struct page **pages, size_t npages)
  318. {
  319. struct folio_queue *p = folioq;
  320. size_t size = 0;
  321. int i;
  322. for (i = 0; i < npages; i++) {
  323. if (folioq_full(p)) {
  324. p->next = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
  325. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p->next);
  326. folioq_init(p->next);
  327. p->next->prev = p;
  328. p = p->next;
  329. }
  330. folioq_append(p, page_folio(pages[i]));
  331. size += PAGE_SIZE;
  332. }
  333. iov_iter_folio_queue(iter, dir, folioq, 0, 0, size);
  334. }
  335. static struct folio_queue *iov_kunit_create_folioq(struct kunit *test)
  336. {
  337. struct folio_queue *folioq;
  338. folioq = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
  339. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, folioq);
  340. kunit_add_action_or_reset(test, iov_kunit_destroy_folioq, folioq);
  341. folioq_init(folioq);
  342. return folioq;
  343. }
  344. /*
  345. * Test copying to a ITER_FOLIOQ-type iterator.
  346. */
  347. static void __init iov_kunit_copy_to_folioq(struct kunit *test)
  348. {
  349. const struct kvec_test_range *pr;
  350. struct iov_iter iter;
  351. struct folio_queue *folioq;
  352. struct page **spages, **bpages;
  353. u8 *scratch, *buffer;
  354. size_t bufsize, npages, size, copied;
  355. int i, patt;
  356. bufsize = 0x100000;
  357. npages = bufsize / PAGE_SIZE;
  358. folioq = iov_kunit_create_folioq(test);
  359. scratch = iov_kunit_create_buffer(test, &spages, npages);
  360. for (i = 0; i < bufsize; i++)
  361. scratch[i] = pattern(i);
  362. buffer = iov_kunit_create_buffer(test, &bpages, npages);
  363. memset(buffer, 0, bufsize);
  364. iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
  365. i = 0;
  366. for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
  367. size = pr->to - pr->from;
  368. KUNIT_ASSERT_LE(test, pr->to, bufsize);
  369. iov_iter_folio_queue(&iter, READ, folioq, 0, 0, pr->to);
  370. iov_iter_advance(&iter, pr->from);
  371. copied = copy_to_iter(scratch + i, size, &iter);
  372. KUNIT_EXPECT_EQ(test, copied, size);
  373. KUNIT_EXPECT_EQ(test, iter.count, 0);
  374. KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
  375. i += size;
  376. if (test->status == KUNIT_FAILURE)
  377. goto stop;
  378. }
  379. /* Build the expected image in the scratch buffer. */
  380. patt = 0;
  381. memset(scratch, 0, bufsize);
  382. for (pr = kvec_test_ranges; pr->from >= 0; pr++)
  383. for (i = pr->from; i < pr->to; i++)
  384. scratch[i] = pattern(patt++);
  385. /* Compare the images */
  386. for (i = 0; i < bufsize; i++) {
  387. KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
  388. if (buffer[i] != scratch[i])
  389. return;
  390. }
  391. stop:
  392. KUNIT_SUCCEED(test);
  393. }
  394. /*
  395. * Test copying from a ITER_FOLIOQ-type iterator.
  396. */
  397. static void __init iov_kunit_copy_from_folioq(struct kunit *test)
  398. {
  399. const struct kvec_test_range *pr;
  400. struct iov_iter iter;
  401. struct folio_queue *folioq;
  402. struct page **spages, **bpages;
  403. u8 *scratch, *buffer;
  404. size_t bufsize, npages, size, copied;
  405. int i, j;
  406. bufsize = 0x100000;
  407. npages = bufsize / PAGE_SIZE;
  408. folioq = iov_kunit_create_folioq(test);
  409. buffer = iov_kunit_create_buffer(test, &bpages, npages);
  410. for (i = 0; i < bufsize; i++)
  411. buffer[i] = pattern(i);
  412. scratch = iov_kunit_create_buffer(test, &spages, npages);
  413. memset(scratch, 0, bufsize);
  414. iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
  415. i = 0;
  416. for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
  417. size = pr->to - pr->from;
  418. KUNIT_ASSERT_LE(test, pr->to, bufsize);
  419. iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
  420. iov_iter_advance(&iter, pr->from);
  421. copied = copy_from_iter(scratch + i, size, &iter);
  422. KUNIT_EXPECT_EQ(test, copied, size);
  423. KUNIT_EXPECT_EQ(test, iter.count, 0);
  424. KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
  425. i += size;
  426. }
  427. /* Build the expected image in the main buffer. */
  428. i = 0;
  429. memset(buffer, 0, bufsize);
  430. for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
  431. for (j = pr->from; j < pr->to; j++) {
  432. buffer[i++] = pattern(j);
  433. if (i >= bufsize)
  434. goto stop;
  435. }
  436. }
  437. stop:
  438. /* Compare the images */
  439. for (i = 0; i < bufsize; i++) {
  440. KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
  441. if (scratch[i] != buffer[i])
  442. return;
  443. }
  444. KUNIT_SUCCEED(test);
  445. }
  446. static void iov_kunit_destroy_xarray(void *data)
  447. {
  448. struct xarray *xarray = data;
  449. xa_destroy(xarray);
  450. kfree(xarray);
  451. }
  452. static void __init iov_kunit_load_xarray(struct kunit *test,
  453. struct iov_iter *iter, int dir,
  454. struct xarray *xarray,
  455. struct page **pages, size_t npages)
  456. {
  457. size_t size = 0;
  458. int i;
  459. for (i = 0; i < npages; i++) {
  460. void *x = xa_store(xarray, i, pages[i], GFP_KERNEL);
  461. KUNIT_ASSERT_FALSE(test, xa_is_err(x));
  462. size += PAGE_SIZE;
  463. }
  464. iov_iter_xarray(iter, dir, xarray, 0, size);
  465. }
  466. static struct xarray *iov_kunit_create_xarray(struct kunit *test)
  467. {
  468. struct xarray *xarray;
  469. xarray = kzalloc(sizeof(struct xarray), GFP_KERNEL);
  470. xa_init(xarray);
  471. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
  472. kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);
  473. return xarray;
  474. }
  475. /*
  476. * Test copying to a ITER_XARRAY-type iterator.
  477. */
  478. static void __init iov_kunit_copy_to_xarray(struct kunit *test)
  479. {
  480. const struct kvec_test_range *pr;
  481. struct iov_iter iter;
  482. struct xarray *xarray;
  483. struct page **spages, **bpages;
  484. u8 *scratch, *buffer;
  485. size_t bufsize, npages, size, copied;
  486. int i, patt;
  487. bufsize = 0x100000;
  488. npages = bufsize / PAGE_SIZE;
  489. xarray = iov_kunit_create_xarray(test);
  490. scratch = iov_kunit_create_buffer(test, &spages, npages);
  491. for (i = 0; i < bufsize; i++)
  492. scratch[i] = pattern(i);
  493. buffer = iov_kunit_create_buffer(test, &bpages, npages);
  494. memset(buffer, 0, bufsize);
  495. iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
  496. i = 0;
  497. for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
  498. size = pr->to - pr->from;
  499. KUNIT_ASSERT_LE(test, pr->to, bufsize);
  500. iov_iter_xarray(&iter, READ, xarray, pr->from, size);
  501. copied = copy_to_iter(scratch + i, size, &iter);
  502. KUNIT_EXPECT_EQ(test, copied, size);
  503. KUNIT_EXPECT_EQ(test, iter.count, 0);
  504. KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
  505. i += size;
  506. }
  507. /* Build the expected image in the scratch buffer. */
  508. patt = 0;
  509. memset(scratch, 0, bufsize);
  510. for (pr = kvec_test_ranges; pr->from >= 0; pr++)
  511. for (i = pr->from; i < pr->to; i++)
  512. scratch[i] = pattern(patt++);
  513. /* Compare the images */
  514. for (i = 0; i < bufsize; i++) {
  515. KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
  516. if (buffer[i] != scratch[i])
  517. return;
  518. }
  519. KUNIT_SUCCEED(test);
  520. }
  521. /*
  522. * Test copying from a ITER_XARRAY-type iterator.
  523. */
  524. static void __init iov_kunit_copy_from_xarray(struct kunit *test)
  525. {
  526. const struct kvec_test_range *pr;
  527. struct iov_iter iter;
  528. struct xarray *xarray;
  529. struct page **spages, **bpages;
  530. u8 *scratch, *buffer;
  531. size_t bufsize, npages, size, copied;
  532. int i, j;
  533. bufsize = 0x100000;
  534. npages = bufsize / PAGE_SIZE;
  535. xarray = iov_kunit_create_xarray(test);
  536. buffer = iov_kunit_create_buffer(test, &bpages, npages);
  537. for (i = 0; i < bufsize; i++)
  538. buffer[i] = pattern(i);
  539. scratch = iov_kunit_create_buffer(test, &spages, npages);
  540. memset(scratch, 0, bufsize);
  541. iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
  542. i = 0;
  543. for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
  544. size = pr->to - pr->from;
  545. KUNIT_ASSERT_LE(test, pr->to, bufsize);
  546. iov_iter_xarray(&iter, WRITE, xarray, pr->from, size);
  547. copied = copy_from_iter(scratch + i, size, &iter);
  548. KUNIT_EXPECT_EQ(test, copied, size);
  549. KUNIT_EXPECT_EQ(test, iter.count, 0);
  550. KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
  551. i += size;
  552. }
  553. /* Build the expected image in the main buffer. */
  554. i = 0;
  555. memset(buffer, 0, bufsize);
  556. for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
  557. for (j = pr->from; j < pr->to; j++) {
  558. buffer[i++] = pattern(j);
  559. if (i >= bufsize)
  560. goto stop;
  561. }
  562. }
  563. stop:
  564. /* Compare the images */
  565. for (i = 0; i < bufsize; i++) {
  566. KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
  567. if (scratch[i] != buffer[i])
  568. return;
  569. }
  570. KUNIT_SUCCEED(test);
  571. }
  572. /*
  573. * Test the extraction of ITER_KVEC-type iterators.
  574. */
  575. static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
  576. {
  577. const struct kvec_test_range *pr;
  578. struct iov_iter iter;
  579. struct page **bpages, *pagelist[8], **pages = pagelist;
  580. struct kvec kvec[8];
  581. u8 *buffer;
  582. ssize_t len;
  583. size_t bufsize, size = 0, npages;
  584. int i, from;
  585. bufsize = 0x100000;
  586. npages = bufsize / PAGE_SIZE;
  587. buffer = iov_kunit_create_buffer(test, &bpages, npages);
  588. iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
  589. buffer, bufsize, kvec_test_ranges);
  590. size = iter.count;
  591. pr = kvec_test_ranges;
  592. from = pr->from;
  593. do {
  594. size_t offset0 = LONG_MAX;
  595. for (i = 0; i < ARRAY_SIZE(pagelist); i++)
  596. pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
  597. len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
  598. ARRAY_SIZE(pagelist), 0, &offset0);
  599. KUNIT_EXPECT_GE(test, len, 0);
  600. if (len < 0)
  601. break;
  602. KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
  603. KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
  604. KUNIT_EXPECT_LE(test, len, size);
  605. KUNIT_EXPECT_EQ(test, iter.count, size - len);
  606. size -= len;
  607. if (len == 0)
  608. break;
  609. for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
  610. struct page *p;
  611. ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
  612. int ix;
  613. KUNIT_ASSERT_GE(test, part, 0);
  614. while (from == pr->to) {
  615. pr++;
  616. from = pr->from;
  617. if (from < 0)
  618. goto stop;
  619. }
  620. ix = from / PAGE_SIZE;
  621. KUNIT_ASSERT_LT(test, ix, npages);
  622. p = bpages[ix];
  623. KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
  624. KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
  625. from += part;
  626. len -= part;
  627. KUNIT_ASSERT_GE(test, len, 0);
  628. if (len == 0)
  629. break;
  630. offset0 = 0;
  631. }
  632. if (test->status == KUNIT_FAILURE)
  633. break;
  634. } while (iov_iter_count(&iter) > 0);
  635. stop:
  636. KUNIT_EXPECT_EQ(test, size, 0);
  637. KUNIT_EXPECT_EQ(test, iter.count, 0);
  638. KUNIT_SUCCEED(test);
  639. }
  640. /*
  641. * Test the extraction of ITER_BVEC-type iterators.
  642. */
  643. static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
  644. {
  645. const struct bvec_test_range *pr;
  646. struct iov_iter iter;
  647. struct page **bpages, *pagelist[8], **pages = pagelist;
  648. struct bio_vec bvec[8];
  649. ssize_t len;
  650. size_t bufsize, size = 0, npages;
  651. int i, from;
  652. bufsize = 0x100000;
  653. npages = bufsize / PAGE_SIZE;
  654. iov_kunit_create_buffer(test, &bpages, npages);
  655. iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
  656. bpages, npages, bufsize, bvec_test_ranges);
  657. size = iter.count;
  658. pr = bvec_test_ranges;
  659. from = pr->from;
  660. do {
  661. size_t offset0 = LONG_MAX;
  662. for (i = 0; i < ARRAY_SIZE(pagelist); i++)
  663. pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
  664. len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
  665. ARRAY_SIZE(pagelist), 0, &offset0);
  666. KUNIT_EXPECT_GE(test, len, 0);
  667. if (len < 0)
  668. break;
  669. KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
  670. KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
  671. KUNIT_EXPECT_LE(test, len, size);
  672. KUNIT_EXPECT_EQ(test, iter.count, size - len);
  673. size -= len;
  674. if (len == 0)
  675. break;
  676. for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
  677. struct page *p;
  678. ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
  679. int ix;
  680. KUNIT_ASSERT_GE(test, part, 0);
  681. while (from == pr->to) {
  682. pr++;
  683. from = pr->from;
  684. if (from < 0)
  685. goto stop;
  686. }
  687. ix = pr->page + from / PAGE_SIZE;
  688. KUNIT_ASSERT_LT(test, ix, npages);
  689. p = bpages[ix];
  690. KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
  691. KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
  692. from += part;
  693. len -= part;
  694. KUNIT_ASSERT_GE(test, len, 0);
  695. if (len == 0)
  696. break;
  697. offset0 = 0;
  698. }
  699. if (test->status == KUNIT_FAILURE)
  700. break;
  701. } while (iov_iter_count(&iter) > 0);
  702. stop:
  703. KUNIT_EXPECT_EQ(test, size, 0);
  704. KUNIT_EXPECT_EQ(test, iter.count, 0);
  705. KUNIT_SUCCEED(test);
  706. }
  707. /*
  708. * Test the extraction of ITER_FOLIOQ-type iterators.
  709. */
  710. static void __init iov_kunit_extract_pages_folioq(struct kunit *test)
  711. {
  712. const struct kvec_test_range *pr;
  713. struct folio_queue *folioq;
  714. struct iov_iter iter;
  715. struct page **bpages, *pagelist[8], **pages = pagelist;
  716. ssize_t len;
  717. size_t bufsize, size = 0, npages;
  718. int i, from;
  719. bufsize = 0x100000;
  720. npages = bufsize / PAGE_SIZE;
  721. folioq = iov_kunit_create_folioq(test);
  722. iov_kunit_create_buffer(test, &bpages, npages);
  723. iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
  724. for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
  725. from = pr->from;
  726. size = pr->to - from;
  727. KUNIT_ASSERT_LE(test, pr->to, bufsize);
  728. iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
  729. iov_iter_advance(&iter, from);
  730. do {
  731. size_t offset0 = LONG_MAX;
  732. for (i = 0; i < ARRAY_SIZE(pagelist); i++)
  733. pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
  734. len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
  735. ARRAY_SIZE(pagelist), 0, &offset0);
  736. KUNIT_EXPECT_GE(test, len, 0);
  737. if (len < 0)
  738. break;
  739. KUNIT_EXPECT_LE(test, len, size);
  740. KUNIT_EXPECT_EQ(test, iter.count, size - len);
  741. if (len == 0)
  742. break;
  743. size -= len;
  744. KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
  745. KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
  746. for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
  747. struct page *p;
  748. ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
  749. int ix;
  750. KUNIT_ASSERT_GE(test, part, 0);
  751. ix = from / PAGE_SIZE;
  752. KUNIT_ASSERT_LT(test, ix, npages);
  753. p = bpages[ix];
  754. KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
  755. KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
  756. from += part;
  757. len -= part;
  758. KUNIT_ASSERT_GE(test, len, 0);
  759. if (len == 0)
  760. break;
  761. offset0 = 0;
  762. }
  763. if (test->status == KUNIT_FAILURE)
  764. goto stop;
  765. } while (iov_iter_count(&iter) > 0);
  766. KUNIT_EXPECT_EQ(test, size, 0);
  767. KUNIT_EXPECT_EQ(test, iter.count, 0);
  768. }
  769. stop:
  770. KUNIT_SUCCEED(test);
  771. }
  772. /*
  773. * Test the extraction of ITER_XARRAY-type iterators.
  774. */
  775. static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
  776. {
  777. const struct kvec_test_range *pr;
  778. struct iov_iter iter;
  779. struct xarray *xarray;
  780. struct page **bpages, *pagelist[8], **pages = pagelist;
  781. ssize_t len;
  782. size_t bufsize, size = 0, npages;
  783. int i, from;
  784. bufsize = 0x100000;
  785. npages = bufsize / PAGE_SIZE;
  786. xarray = iov_kunit_create_xarray(test);
  787. iov_kunit_create_buffer(test, &bpages, npages);
  788. iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
  789. for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
  790. from = pr->from;
  791. size = pr->to - from;
  792. KUNIT_ASSERT_LE(test, pr->to, bufsize);
  793. iov_iter_xarray(&iter, WRITE, xarray, from, size);
  794. do {
  795. size_t offset0 = LONG_MAX;
  796. for (i = 0; i < ARRAY_SIZE(pagelist); i++)
  797. pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
  798. len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
  799. ARRAY_SIZE(pagelist), 0, &offset0);
  800. KUNIT_EXPECT_GE(test, len, 0);
  801. if (len < 0)
  802. break;
  803. KUNIT_EXPECT_LE(test, len, size);
  804. KUNIT_EXPECT_EQ(test, iter.count, size - len);
  805. if (len == 0)
  806. break;
  807. size -= len;
  808. KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
  809. KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
  810. for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
  811. struct page *p;
  812. ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
  813. int ix;
  814. KUNIT_ASSERT_GE(test, part, 0);
  815. ix = from / PAGE_SIZE;
  816. KUNIT_ASSERT_LT(test, ix, npages);
  817. p = bpages[ix];
  818. KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
  819. KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
  820. from += part;
  821. len -= part;
  822. KUNIT_ASSERT_GE(test, len, 0);
  823. if (len == 0)
  824. break;
  825. offset0 = 0;
  826. }
  827. if (test->status == KUNIT_FAILURE)
  828. goto stop;
  829. } while (iov_iter_count(&iter) > 0);
  830. KUNIT_EXPECT_EQ(test, size, 0);
  831. KUNIT_EXPECT_EQ(test, iter.count, 0);
  832. KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to - pr->from);
  833. }
  834. stop:
  835. KUNIT_SUCCEED(test);
  836. }
  837. static struct kunit_case __refdata iov_kunit_cases[] = {
  838. KUNIT_CASE(iov_kunit_copy_to_kvec),
  839. KUNIT_CASE(iov_kunit_copy_from_kvec),
  840. KUNIT_CASE(iov_kunit_copy_to_bvec),
  841. KUNIT_CASE(iov_kunit_copy_from_bvec),
  842. KUNIT_CASE(iov_kunit_copy_to_folioq),
  843. KUNIT_CASE(iov_kunit_copy_from_folioq),
  844. KUNIT_CASE(iov_kunit_copy_to_xarray),
  845. KUNIT_CASE(iov_kunit_copy_from_xarray),
  846. KUNIT_CASE(iov_kunit_extract_pages_kvec),
  847. KUNIT_CASE(iov_kunit_extract_pages_bvec),
  848. KUNIT_CASE(iov_kunit_extract_pages_folioq),
  849. KUNIT_CASE(iov_kunit_extract_pages_xarray),
  850. {}
  851. };
  852. static struct kunit_suite iov_kunit_suite = {
  853. .name = "iov_iter",
  854. .test_cases = iov_kunit_cases,
  855. };
  856. kunit_test_suites(&iov_kunit_suite);