extent-map-tests.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2017 Oracle. All rights reserved.
  4. */
  5. #include <linux/types.h>
  6. #include "btrfs-tests.h"
  7. #include "../ctree.h"
  8. #include "../btrfs_inode.h"
  9. #include "../volumes.h"
  10. #include "../disk-io.h"
  11. #include "../block-group.h"
  12. static int free_extent_map_tree(struct btrfs_inode *inode)
  13. {
  14. struct extent_map_tree *em_tree = &inode->extent_tree;
  15. struct extent_map *em;
  16. struct rb_node *node;
  17. int ret = 0;
  18. write_lock(&em_tree->lock);
  19. while (!RB_EMPTY_ROOT(&em_tree->root)) {
  20. node = rb_first(&em_tree->root);
  21. em = rb_entry(node, struct extent_map, rb_node);
  22. remove_extent_mapping(inode, em);
  23. #ifdef CONFIG_BTRFS_DEBUG
  24. if (refcount_read(&em->refs) != 1) {
  25. ret = -EINVAL;
  26. test_err(
  27. "em leak: em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu offset %llu) refs %d",
  28. em->start, em->len, em->disk_bytenr,
  29. em->disk_num_bytes, em->offset,
  30. refcount_read(&em->refs));
  31. refcount_set(&em->refs, 1);
  32. }
  33. #endif
  34. free_extent_map(em);
  35. }
  36. write_unlock(&em_tree->lock);
  37. return ret;
  38. }
  39. /*
  40. * Test scenario:
  41. *
  42. * Suppose that no extent map has been loaded into memory yet, there is a file
  43. * extent [0, 16K), followed by another file extent [16K, 20K), two dio reads
  44. * are entering btrfs_get_extent() concurrently, t1 is reading [8K, 16K), t2 is
  45. * reading [0, 8K)
  46. *
  47. * t1 t2
  48. * btrfs_get_extent() btrfs_get_extent()
  49. * -> lookup_extent_mapping() ->lookup_extent_mapping()
  50. * -> add_extent_mapping(0, 16K)
  51. * -> return em
  52. * ->add_extent_mapping(0, 16K)
  53. * -> #handle -EEXIST
  54. */
  55. static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
  56. {
  57. struct extent_map_tree *em_tree = &inode->extent_tree;
  58. struct extent_map *em;
  59. u64 start = 0;
  60. u64 len = SZ_8K;
  61. int ret;
  62. int ret2;
  63. em = alloc_extent_map();
  64. if (!em) {
  65. test_std_err(TEST_ALLOC_EXTENT_MAP);
  66. return -ENOMEM;
  67. }
  68. /* Add [0, 16K) */
  69. em->start = 0;
  70. em->len = SZ_16K;
  71. em->disk_bytenr = 0;
  72. em->disk_num_bytes = SZ_16K;
  73. em->ram_bytes = SZ_16K;
  74. write_lock(&em_tree->lock);
  75. ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
  76. write_unlock(&em_tree->lock);
  77. if (ret < 0) {
  78. test_err("cannot add extent range [0, 16K)");
  79. goto out;
  80. }
  81. free_extent_map(em);
  82. /* Add [16K, 20K) following [0, 16K) */
  83. em = alloc_extent_map();
  84. if (!em) {
  85. test_std_err(TEST_ALLOC_EXTENT_MAP);
  86. ret = -ENOMEM;
  87. goto out;
  88. }
  89. em->start = SZ_16K;
  90. em->len = SZ_4K;
  91. em->disk_bytenr = SZ_32K; /* avoid merging */
  92. em->disk_num_bytes = SZ_4K;
  93. em->ram_bytes = SZ_4K;
  94. write_lock(&em_tree->lock);
  95. ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
  96. write_unlock(&em_tree->lock);
  97. if (ret < 0) {
  98. test_err("cannot add extent range [16K, 20K)");
  99. goto out;
  100. }
  101. free_extent_map(em);
  102. em = alloc_extent_map();
  103. if (!em) {
  104. test_std_err(TEST_ALLOC_EXTENT_MAP);
  105. ret = -ENOMEM;
  106. goto out;
  107. }
  108. /* Add [0, 8K), should return [0, 16K) instead. */
  109. em->start = start;
  110. em->len = len;
  111. em->disk_bytenr = start;
  112. em->disk_num_bytes = len;
  113. em->ram_bytes = len;
  114. write_lock(&em_tree->lock);
  115. ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
  116. write_unlock(&em_tree->lock);
  117. if (ret) {
  118. test_err("case1 [%llu %llu]: ret %d", start, start + len, ret);
  119. goto out;
  120. }
  121. if (!em) {
  122. test_err("case1 [%llu %llu]: no extent map returned",
  123. start, start + len);
  124. ret = -ENOENT;
  125. goto out;
  126. }
  127. if (em->start != 0 || extent_map_end(em) != SZ_16K ||
  128. em->disk_bytenr != 0 || em->disk_num_bytes != SZ_16K) {
  129. test_err(
  130. "case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu",
  131. start, start + len, ret, em->start, em->len,
  132. em->disk_bytenr, em->disk_num_bytes);
  133. ret = -EINVAL;
  134. }
  135. free_extent_map(em);
  136. out:
  137. ret2 = free_extent_map_tree(inode);
  138. if (ret == 0)
  139. ret = ret2;
  140. return ret;
  141. }
  142. /*
  143. * Test scenario:
  144. *
  145. * Reading the inline ending up with EEXIST, ie. read an inline
  146. * extent and discard page cache and read it again.
  147. */
  148. static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
  149. {
  150. struct extent_map_tree *em_tree = &inode->extent_tree;
  151. struct extent_map *em;
  152. int ret;
  153. int ret2;
  154. em = alloc_extent_map();
  155. if (!em) {
  156. test_std_err(TEST_ALLOC_EXTENT_MAP);
  157. return -ENOMEM;
  158. }
  159. /* Add [0, 1K) */
  160. em->start = 0;
  161. em->len = SZ_1K;
  162. em->disk_bytenr = EXTENT_MAP_INLINE;
  163. em->disk_num_bytes = 0;
  164. em->ram_bytes = SZ_1K;
  165. write_lock(&em_tree->lock);
  166. ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
  167. write_unlock(&em_tree->lock);
  168. if (ret < 0) {
  169. test_err("cannot add extent range [0, 1K)");
  170. goto out;
  171. }
  172. free_extent_map(em);
  173. /* Add [4K, 8K) following [0, 1K) */
  174. em = alloc_extent_map();
  175. if (!em) {
  176. test_std_err(TEST_ALLOC_EXTENT_MAP);
  177. ret = -ENOMEM;
  178. goto out;
  179. }
  180. em->start = SZ_4K;
  181. em->len = SZ_4K;
  182. em->disk_bytenr = SZ_4K;
  183. em->disk_num_bytes = SZ_4K;
  184. em->ram_bytes = SZ_4K;
  185. write_lock(&em_tree->lock);
  186. ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
  187. write_unlock(&em_tree->lock);
  188. if (ret < 0) {
  189. test_err("cannot add extent range [4K, 8K)");
  190. goto out;
  191. }
  192. free_extent_map(em);
  193. em = alloc_extent_map();
  194. if (!em) {
  195. test_std_err(TEST_ALLOC_EXTENT_MAP);
  196. ret = -ENOMEM;
  197. goto out;
  198. }
  199. /* Add [0, 1K) */
  200. em->start = 0;
  201. em->len = SZ_1K;
  202. em->disk_bytenr = EXTENT_MAP_INLINE;
  203. em->disk_num_bytes = 0;
  204. em->ram_bytes = SZ_1K;
  205. write_lock(&em_tree->lock);
  206. ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
  207. write_unlock(&em_tree->lock);
  208. if (ret) {
  209. test_err("case2 [0 1K]: ret %d", ret);
  210. goto out;
  211. }
  212. if (!em) {
  213. test_err("case2 [0 1K]: no extent map returned");
  214. ret = -ENOENT;
  215. goto out;
  216. }
  217. if (em->start != 0 || extent_map_end(em) != SZ_1K ||
  218. em->disk_bytenr != EXTENT_MAP_INLINE) {
  219. test_err(
  220. "case2 [0 1K]: ret %d return a wrong em (start %llu len %llu disk_bytenr %llu",
  221. ret, em->start, em->len, em->disk_bytenr);
  222. ret = -EINVAL;
  223. }
  224. free_extent_map(em);
  225. out:
  226. ret2 = free_extent_map_tree(inode);
  227. if (ret == 0)
  228. ret = ret2;
  229. return ret;
  230. }
  231. static int __test_case_3(struct btrfs_fs_info *fs_info,
  232. struct btrfs_inode *inode, u64 start)
  233. {
  234. struct extent_map_tree *em_tree = &inode->extent_tree;
  235. struct extent_map *em;
  236. u64 len = SZ_4K;
  237. int ret;
  238. int ret2;
  239. em = alloc_extent_map();
  240. if (!em) {
  241. test_std_err(TEST_ALLOC_EXTENT_MAP);
  242. return -ENOMEM;
  243. }
  244. /* Add [4K, 8K) */
  245. em->start = SZ_4K;
  246. em->len = SZ_4K;
  247. em->disk_bytenr = SZ_4K;
  248. em->disk_num_bytes = SZ_4K;
  249. em->ram_bytes = SZ_4K;
  250. write_lock(&em_tree->lock);
  251. ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
  252. write_unlock(&em_tree->lock);
  253. if (ret < 0) {
  254. test_err("cannot add extent range [4K, 8K)");
  255. goto out;
  256. }
  257. free_extent_map(em);
  258. em = alloc_extent_map();
  259. if (!em) {
  260. test_std_err(TEST_ALLOC_EXTENT_MAP);
  261. ret = -ENOMEM;
  262. goto out;
  263. }
  264. /* Add [0, 16K) */
  265. em->start = 0;
  266. em->len = SZ_16K;
  267. em->disk_bytenr = 0;
  268. em->disk_num_bytes = SZ_16K;
  269. em->ram_bytes = SZ_16K;
  270. write_lock(&em_tree->lock);
  271. ret = btrfs_add_extent_mapping(inode, &em, start, len);
  272. write_unlock(&em_tree->lock);
  273. if (ret) {
  274. test_err("case3 [%llu %llu): ret %d",
  275. start, start + len, ret);
  276. goto out;
  277. }
  278. if (!em) {
  279. test_err("case3 [%llu %llu): no extent map returned",
  280. start, start + len);
  281. ret = -ENOENT;
  282. goto out;
  283. }
  284. /*
  285. * Since bytes within em are contiguous, em->block_start is identical to
  286. * em->start.
  287. */
  288. if (start < em->start || start + len > extent_map_end(em) ||
  289. em->start != extent_map_block_start(em)) {
  290. test_err(
  291. "case3 [%llu %llu): ret %d em (start %llu len %llu disk_bytenr %llu block_len %llu)",
  292. start, start + len, ret, em->start, em->len,
  293. em->disk_bytenr, em->disk_num_bytes);
  294. ret = -EINVAL;
  295. }
  296. free_extent_map(em);
  297. out:
  298. ret2 = free_extent_map_tree(inode);
  299. if (ret == 0)
  300. ret = ret2;
  301. return ret;
  302. }
  303. /*
  304. * Test scenario:
  305. *
  306. * Suppose that no extent map has been loaded into memory yet.
  307. * There is a file extent [0, 16K), two jobs are running concurrently
  308. * against it, t1 is buffered writing to [4K, 8K) and t2 is doing dio
  309. * read from [0, 4K) or [8K, 12K) or [12K, 16K).
  310. *
  311. * t1 goes ahead of t2 and adds em [4K, 8K) into tree.
  312. *
  313. * t1 t2
  314. * cow_file_range() btrfs_get_extent()
  315. * -> lookup_extent_mapping()
  316. * -> add_extent_mapping()
  317. * -> add_extent_mapping()
  318. */
  319. static int test_case_3(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
  320. {
  321. int ret;
  322. ret = __test_case_3(fs_info, inode, 0);
  323. if (ret)
  324. return ret;
  325. ret = __test_case_3(fs_info, inode, SZ_8K);
  326. if (ret)
  327. return ret;
  328. ret = __test_case_3(fs_info, inode, (12 * SZ_1K));
  329. return ret;
  330. }
  331. static int __test_case_4(struct btrfs_fs_info *fs_info,
  332. struct btrfs_inode *inode, u64 start)
  333. {
  334. struct extent_map_tree *em_tree = &inode->extent_tree;
  335. struct extent_map *em;
  336. u64 len = SZ_4K;
  337. int ret;
  338. int ret2;
  339. em = alloc_extent_map();
  340. if (!em) {
  341. test_std_err(TEST_ALLOC_EXTENT_MAP);
  342. return -ENOMEM;
  343. }
  344. /* Add [0K, 8K) */
  345. em->start = 0;
  346. em->len = SZ_8K;
  347. em->disk_bytenr = 0;
  348. em->disk_num_bytes = SZ_8K;
  349. em->ram_bytes = SZ_8K;
  350. write_lock(&em_tree->lock);
  351. ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
  352. write_unlock(&em_tree->lock);
  353. if (ret < 0) {
  354. test_err("cannot add extent range [0, 8K)");
  355. goto out;
  356. }
  357. free_extent_map(em);
  358. em = alloc_extent_map();
  359. if (!em) {
  360. test_std_err(TEST_ALLOC_EXTENT_MAP);
  361. ret = -ENOMEM;
  362. goto out;
  363. }
  364. /* Add [8K, 32K) */
  365. em->start = SZ_8K;
  366. em->len = 24 * SZ_1K;
  367. em->disk_bytenr = SZ_16K; /* avoid merging */
  368. em->disk_num_bytes = 24 * SZ_1K;
  369. em->ram_bytes = 24 * SZ_1K;
  370. write_lock(&em_tree->lock);
  371. ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
  372. write_unlock(&em_tree->lock);
  373. if (ret < 0) {
  374. test_err("cannot add extent range [8K, 32K)");
  375. goto out;
  376. }
  377. free_extent_map(em);
  378. em = alloc_extent_map();
  379. if (!em) {
  380. test_std_err(TEST_ALLOC_EXTENT_MAP);
  381. ret = -ENOMEM;
  382. goto out;
  383. }
  384. /* Add [0K, 32K) */
  385. em->start = 0;
  386. em->len = SZ_32K;
  387. em->disk_bytenr = 0;
  388. em->disk_num_bytes = SZ_32K;
  389. em->ram_bytes = SZ_32K;
  390. write_lock(&em_tree->lock);
  391. ret = btrfs_add_extent_mapping(inode, &em, start, len);
  392. write_unlock(&em_tree->lock);
  393. if (ret) {
  394. test_err("case4 [%llu %llu): ret %d",
  395. start, start + len, ret);
  396. goto out;
  397. }
  398. if (!em) {
  399. test_err("case4 [%llu %llu): no extent map returned",
  400. start, start + len);
  401. ret = -ENOENT;
  402. goto out;
  403. }
  404. if (start < em->start || start + len > extent_map_end(em)) {
  405. test_err(
  406. "case4 [%llu %llu): ret %d, added wrong em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu)",
  407. start, start + len, ret, em->start, em->len,
  408. em->disk_bytenr, em->disk_num_bytes);
  409. ret = -EINVAL;
  410. }
  411. free_extent_map(em);
  412. out:
  413. ret2 = free_extent_map_tree(inode);
  414. if (ret == 0)
  415. ret = ret2;
  416. return ret;
  417. }
  418. /*
  419. * Test scenario:
  420. *
  421. * Suppose that no extent map has been loaded into memory yet.
  422. * There is a file extent [0, 32K), two jobs are running concurrently
  423. * against it, t1 is doing dio write to [8K, 32K) and t2 is doing dio
  424. * read from [0, 4K) or [4K, 8K).
  425. *
  426. * t1 goes ahead of t2 and splits em [0, 32K) to em [0K, 8K) and [8K 32K).
  427. *
  428. * t1 t2
  429. * btrfs_get_blocks_direct() btrfs_get_blocks_direct()
  430. * -> btrfs_get_extent() -> btrfs_get_extent()
  431. * -> lookup_extent_mapping()
  432. * -> add_extent_mapping() -> lookup_extent_mapping()
  433. * # load [0, 32K)
  434. * -> btrfs_new_extent_direct()
  435. * -> btrfs_drop_extent_cache()
  436. * # split [0, 32K)
  437. * -> add_extent_mapping()
  438. * # add [8K, 32K)
  439. * -> add_extent_mapping()
  440. * # handle -EEXIST when adding
  441. * # [0, 32K)
  442. */
  443. static int test_case_4(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
  444. {
  445. int ret;
  446. ret = __test_case_4(fs_info, inode, 0);
  447. if (ret)
  448. return ret;
  449. ret = __test_case_4(fs_info, inode, SZ_4K);
  450. return ret;
  451. }
  452. static int add_compressed_extent(struct btrfs_inode *inode,
  453. u64 start, u64 len, u64 block_start)
  454. {
  455. struct extent_map_tree *em_tree = &inode->extent_tree;
  456. struct extent_map *em;
  457. int ret;
  458. em = alloc_extent_map();
  459. if (!em) {
  460. test_std_err(TEST_ALLOC_EXTENT_MAP);
  461. return -ENOMEM;
  462. }
  463. em->start = start;
  464. em->len = len;
  465. em->disk_bytenr = block_start;
  466. em->disk_num_bytes = SZ_4K;
  467. em->ram_bytes = len;
  468. em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
  469. write_lock(&em_tree->lock);
  470. ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
  471. write_unlock(&em_tree->lock);
  472. free_extent_map(em);
  473. if (ret < 0) {
  474. test_err("cannot add extent map [%llu, %llu)", start, start + len);
  475. return ret;
  476. }
  477. return 0;
  478. }
  479. struct extent_range {
  480. u64 start;
  481. u64 len;
  482. };
  483. /* The valid states of the tree after every drop, as described below. */
  484. struct extent_range valid_ranges[][7] = {
  485. {
  486. { .start = 0, .len = SZ_8K }, /* [0, 8K) */
  487. { .start = SZ_4K * 3, .len = SZ_4K * 3}, /* [12k, 24k) */
  488. { .start = SZ_4K * 6, .len = SZ_4K * 3}, /* [24k, 36k) */
  489. { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
  490. { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */
  491. },
  492. {
  493. { .start = 0, .len = SZ_8K }, /* [0, 8K) */
  494. { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */
  495. { .start = SZ_4K * 6, .len = SZ_4K * 3}, /* [24k, 36k) */
  496. { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
  497. { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */
  498. },
  499. {
  500. { .start = 0, .len = SZ_8K }, /* [0, 8K) */
  501. { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */
  502. { .start = SZ_4K * 6, .len = SZ_4K}, /* [24k, 28k) */
  503. { .start = SZ_32K, .len = SZ_4K}, /* [32k, 36k) */
  504. { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
  505. { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */
  506. },
  507. {
  508. { .start = 0, .len = SZ_8K}, /* [0, 8K) */
  509. { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */
  510. { .start = SZ_4K * 6, .len = SZ_4K}, /* [24k, 28k) */
  511. }
  512. };
  513. static int validate_range(struct extent_map_tree *em_tree, int index)
  514. {
  515. struct rb_node *n;
  516. int i;
  517. for (i = 0, n = rb_first(&em_tree->root);
  518. valid_ranges[index][i].len && n;
  519. i++, n = rb_next(n)) {
  520. struct extent_map *entry = rb_entry(n, struct extent_map, rb_node);
  521. if (entry->start != valid_ranges[index][i].start) {
  522. test_err("mapping has start %llu expected %llu",
  523. entry->start, valid_ranges[index][i].start);
  524. return -EINVAL;
  525. }
  526. if (entry->len != valid_ranges[index][i].len) {
  527. test_err("mapping has len %llu expected %llu",
  528. entry->len, valid_ranges[index][i].len);
  529. return -EINVAL;
  530. }
  531. }
  532. /*
  533. * We exited because we don't have any more entries in the extent_map
  534. * but we still expect more valid entries.
  535. */
  536. if (valid_ranges[index][i].len) {
  537. test_err("missing an entry");
  538. return -EINVAL;
  539. }
  540. /* We exited the loop but still have entries in the extent map. */
  541. if (n) {
  542. test_err("we have a left over entry in the extent map we didn't expect");
  543. return -EINVAL;
  544. }
  545. return 0;
  546. }
  547. /*
  548. * Test scenario:
  549. *
  550. * Test the various edge cases of btrfs_drop_extent_map_range, create the
  551. * following ranges
  552. *
  553. * [0, 12k)[12k, 24k)[24k, 36k)[36k, 40k)[40k,64k)
  554. *
  555. * And then we'll drop:
  556. *
  557. * [8k, 12k) - test the single front split
  558. * [12k, 20k) - test the single back split
  559. * [28k, 32k) - test the double split
  560. * [32k, 64k) - test whole em dropping
  561. *
  562. * They'll have the EXTENT_FLAG_COMPRESSED flag set to keep the em tree from
  563. * merging the em's.
  564. */
  565. static int test_case_5(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
  566. {
  567. u64 start, end;
  568. int ret;
  569. int ret2;
  570. test_msg("Running btrfs_drop_extent_map_range tests");
  571. /* [0, 12k) */
  572. ret = add_compressed_extent(inode, 0, SZ_4K * 3, 0);
  573. if (ret) {
  574. test_err("cannot add extent range [0, 12K)");
  575. goto out;
  576. }
  577. /* [12k, 24k) */
  578. ret = add_compressed_extent(inode, SZ_4K * 3, SZ_4K * 3, SZ_4K);
  579. if (ret) {
  580. test_err("cannot add extent range [12k, 24k)");
  581. goto out;
  582. }
  583. /* [24k, 36k) */
  584. ret = add_compressed_extent(inode, SZ_4K * 6, SZ_4K * 3, SZ_8K);
  585. if (ret) {
  586. test_err("cannot add extent range [12k, 24k)");
  587. goto out;
  588. }
  589. /* [36k, 40k) */
  590. ret = add_compressed_extent(inode, SZ_32K + SZ_4K, SZ_4K, SZ_4K * 3);
  591. if (ret) {
  592. test_err("cannot add extent range [12k, 24k)");
  593. goto out;
  594. }
  595. /* [40k, 64k) */
  596. ret = add_compressed_extent(inode, SZ_4K * 10, SZ_4K * 6, SZ_16K);
  597. if (ret) {
  598. test_err("cannot add extent range [12k, 24k)");
  599. goto out;
  600. }
  601. /* Drop [8k, 12k) */
  602. start = SZ_8K;
  603. end = (3 * SZ_4K) - 1;
  604. btrfs_drop_extent_map_range(inode, start, end, false);
  605. ret = validate_range(&inode->extent_tree, 0);
  606. if (ret)
  607. goto out;
  608. /* Drop [12k, 20k) */
  609. start = SZ_4K * 3;
  610. end = SZ_16K + SZ_4K - 1;
  611. btrfs_drop_extent_map_range(inode, start, end, false);
  612. ret = validate_range(&inode->extent_tree, 1);
  613. if (ret)
  614. goto out;
  615. /* Drop [28k, 32k) */
  616. start = SZ_32K - SZ_4K;
  617. end = SZ_32K - 1;
  618. btrfs_drop_extent_map_range(inode, start, end, false);
  619. ret = validate_range(&inode->extent_tree, 2);
  620. if (ret)
  621. goto out;
  622. /* Drop [32k, 64k) */
  623. start = SZ_32K;
  624. end = SZ_64K - 1;
  625. btrfs_drop_extent_map_range(inode, start, end, false);
  626. ret = validate_range(&inode->extent_tree, 3);
  627. if (ret)
  628. goto out;
  629. out:
  630. ret2 = free_extent_map_tree(inode);
  631. if (ret == 0)
  632. ret = ret2;
  633. return ret;
  634. }
  635. /*
  636. * Test the btrfs_add_extent_mapping helper which will attempt to create an em
  637. * for areas between two existing ems. Validate it doesn't do this when there
  638. * are two unmerged em's side by side.
  639. */
  640. static int test_case_6(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
  641. {
  642. struct extent_map_tree *em_tree = &inode->extent_tree;
  643. struct extent_map *em = NULL;
  644. int ret;
  645. int ret2;
  646. ret = add_compressed_extent(inode, 0, SZ_4K, 0);
  647. if (ret)
  648. goto out;
  649. ret = add_compressed_extent(inode, SZ_4K, SZ_4K, 0);
  650. if (ret)
  651. goto out;
  652. em = alloc_extent_map();
  653. if (!em) {
  654. test_std_err(TEST_ALLOC_EXTENT_MAP);
  655. ret = -ENOMEM;
  656. goto out;
  657. }
  658. em->start = SZ_4K;
  659. em->len = SZ_4K;
  660. em->disk_bytenr = SZ_16K;
  661. em->disk_num_bytes = SZ_16K;
  662. em->ram_bytes = SZ_16K;
  663. write_lock(&em_tree->lock);
  664. ret = btrfs_add_extent_mapping(inode, &em, 0, SZ_8K);
  665. write_unlock(&em_tree->lock);
  666. if (ret != 0) {
  667. test_err("got an error when adding our em: %d", ret);
  668. goto out;
  669. }
  670. ret = -EINVAL;
  671. if (em->start != 0) {
  672. test_err("unexpected em->start at %llu, wanted 0", em->start);
  673. goto out;
  674. }
  675. if (em->len != SZ_4K) {
  676. test_err("unexpected em->len %llu, expected 4K", em->len);
  677. goto out;
  678. }
  679. ret = 0;
  680. out:
  681. free_extent_map(em);
  682. ret2 = free_extent_map_tree(inode);
  683. if (ret == 0)
  684. ret = ret2;
  685. return ret;
  686. }
  687. /*
  688. * Regression test for btrfs_drop_extent_map_range. Calling with skip_pinned ==
  689. * true would mess up the start/end calculations and subsequent splits would be
  690. * incorrect.
  691. */
  692. static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
  693. {
  694. struct extent_map_tree *em_tree = &inode->extent_tree;
  695. struct extent_map *em;
  696. int ret;
  697. int ret2;
  698. test_msg("Running btrfs_drop_extent_cache with pinned");
  699. em = alloc_extent_map();
  700. if (!em) {
  701. test_std_err(TEST_ALLOC_EXTENT_MAP);
  702. return -ENOMEM;
  703. }
  704. /* [0, 16K), pinned */
  705. em->start = 0;
  706. em->len = SZ_16K;
  707. em->disk_bytenr = 0;
  708. em->disk_num_bytes = SZ_4K;
  709. em->ram_bytes = SZ_16K;
  710. em->flags |= (EXTENT_FLAG_PINNED | EXTENT_FLAG_COMPRESS_ZLIB);
  711. write_lock(&em_tree->lock);
  712. ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
  713. write_unlock(&em_tree->lock);
  714. if (ret < 0) {
  715. test_err("couldn't add extent map");
  716. goto out;
  717. }
  718. free_extent_map(em);
  719. em = alloc_extent_map();
  720. if (!em) {
  721. test_std_err(TEST_ALLOC_EXTENT_MAP);
  722. ret = -ENOMEM;
  723. goto out;
  724. }
  725. /* [32K, 48K), not pinned */
  726. em->start = SZ_32K;
  727. em->len = SZ_16K;
  728. em->disk_bytenr = SZ_32K;
  729. em->disk_num_bytes = SZ_16K;
  730. em->ram_bytes = SZ_16K;
  731. write_lock(&em_tree->lock);
  732. ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
  733. write_unlock(&em_tree->lock);
  734. if (ret < 0) {
  735. test_err("couldn't add extent map");
  736. goto out;
  737. }
  738. free_extent_map(em);
  739. /*
  740. * Drop [0, 36K) This should skip the [0, 4K) extent and then split the
  741. * [32K, 48K) extent.
  742. */
  743. btrfs_drop_extent_map_range(inode, 0, (36 * SZ_1K) - 1, true);
  744. /* Make sure our extent maps look sane. */
  745. ret = -EINVAL;
  746. em = lookup_extent_mapping(em_tree, 0, SZ_16K);
  747. if (!em) {
  748. test_err("didn't find an em at 0 as expected");
  749. goto out;
  750. }
  751. if (em->start != 0) {
  752. test_err("em->start is %llu, expected 0", em->start);
  753. goto out;
  754. }
  755. if (em->len != SZ_16K) {
  756. test_err("em->len is %llu, expected 16K", em->len);
  757. goto out;
  758. }
  759. free_extent_map(em);
  760. read_lock(&em_tree->lock);
  761. em = lookup_extent_mapping(em_tree, SZ_16K, SZ_16K);
  762. read_unlock(&em_tree->lock);
  763. if (em) {
  764. test_err("found an em when we weren't expecting one");
  765. goto out;
  766. }
  767. read_lock(&em_tree->lock);
  768. em = lookup_extent_mapping(em_tree, SZ_32K, SZ_16K);
  769. read_unlock(&em_tree->lock);
  770. if (!em) {
  771. test_err("didn't find an em at 32K as expected");
  772. goto out;
  773. }
  774. if (em->start != (36 * SZ_1K)) {
  775. test_err("em->start is %llu, expected 36K", em->start);
  776. goto out;
  777. }
  778. if (em->len != (12 * SZ_1K)) {
  779. test_err("em->len is %llu, expected 12K", em->len);
  780. goto out;
  781. }
  782. if (extent_map_block_start(em) != SZ_32K + SZ_4K) {
  783. test_err("em->block_start is %llu, expected 36K",
  784. extent_map_block_start(em));
  785. goto out;
  786. }
  787. free_extent_map(em);
  788. read_lock(&em_tree->lock);
  789. em = lookup_extent_mapping(em_tree, 48 * SZ_1K, (u64)-1);
  790. read_unlock(&em_tree->lock);
  791. if (em) {
  792. test_err("found an unexpected em above 48K");
  793. goto out;
  794. }
  795. ret = 0;
  796. out:
  797. free_extent_map(em);
  798. /* Unpin our extent to prevent warning when removing it below. */
  799. ret2 = unpin_extent_cache(inode, 0, SZ_16K, 0);
  800. if (ret == 0)
  801. ret = ret2;
  802. ret2 = free_extent_map_tree(inode);
  803. if (ret == 0)
  804. ret = ret2;
  805. return ret;
  806. }
  807. /*
  808. * Test a regression for compressed extent map adjustment when we attempt to
  809. * add an extent map that is partially overlapped by another existing extent
  810. * map. The resulting extent map offset was left unchanged despite having
  811. * incremented its start offset.
  812. */
  813. static int test_case_8(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
  814. {
  815. struct extent_map_tree *em_tree = &inode->extent_tree;
  816. struct extent_map *em;
  817. int ret;
  818. int ret2;
  819. em = alloc_extent_map();
  820. if (!em) {
  821. test_std_err(TEST_ALLOC_EXTENT_MAP);
  822. return -ENOMEM;
  823. }
  824. /* Compressed extent for the file range [120K, 128K). */
  825. em->start = SZ_1K * 120;
  826. em->len = SZ_8K;
  827. em->disk_num_bytes = SZ_4K;
  828. em->ram_bytes = SZ_8K;
  829. em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
  830. write_lock(&em_tree->lock);
  831. ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
  832. write_unlock(&em_tree->lock);
  833. free_extent_map(em);
  834. if (ret < 0) {
  835. test_err("couldn't add extent map for range [120K, 128K)");
  836. goto out;
  837. }
  838. em = alloc_extent_map();
  839. if (!em) {
  840. test_std_err(TEST_ALLOC_EXTENT_MAP);
  841. ret = -ENOMEM;
  842. goto out;
  843. }
  844. /*
  845. * Compressed extent for the file range [108K, 144K), which overlaps
  846. * with the [120K, 128K) we previously inserted.
  847. */
  848. em->start = SZ_1K * 108;
  849. em->len = SZ_1K * 36;
  850. em->disk_num_bytes = SZ_4K;
  851. em->ram_bytes = SZ_1K * 36;
  852. em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
  853. /*
  854. * Try to add the extent map but with a search range of [140K, 144K),
  855. * this should succeed and adjust the extent map to the range
  856. * [128K, 144K), with a length of 16K and an offset of 20K.
  857. *
  858. * This simulates a scenario where in the subvolume tree of an inode we
  859. * have a compressed file extent item for the range [108K, 144K) and we
  860. * have an overlapping compressed extent map for the range [120K, 128K),
  861. * which was created by an encoded write, but its ordered extent was not
  862. * yet completed, so the subvolume tree doesn't have yet the file extent
  863. * item for that range - we only have the extent map in the inode's
  864. * extent map tree.
  865. */
  866. write_lock(&em_tree->lock);
  867. ret = btrfs_add_extent_mapping(inode, &em, SZ_1K * 140, SZ_4K);
  868. write_unlock(&em_tree->lock);
  869. free_extent_map(em);
  870. if (ret < 0) {
  871. test_err("couldn't add extent map for range [108K, 144K)");
  872. goto out;
  873. }
  874. if (em->start != SZ_128K) {
  875. test_err("unexpected extent map start %llu (should be 128K)", em->start);
  876. ret = -EINVAL;
  877. goto out;
  878. }
  879. if (em->len != SZ_16K) {
  880. test_err("unexpected extent map length %llu (should be 16K)", em->len);
  881. ret = -EINVAL;
  882. goto out;
  883. }
  884. if (em->offset != SZ_1K * 20) {
  885. test_err("unexpected extent map offset %llu (should be 20K)", em->offset);
  886. ret = -EINVAL;
  887. goto out;
  888. }
  889. out:
  890. ret2 = free_extent_map_tree(inode);
  891. if (ret == 0)
  892. ret = ret2;
  893. return ret;
  894. }
  895. struct rmap_test_vector {
  896. u64 raid_type;
  897. u64 physical_start;
  898. u64 data_stripe_size;
  899. u64 num_data_stripes;
  900. u64 num_stripes;
  901. /* Assume we won't have more than 5 physical stripes */
  902. u64 data_stripe_phys_start[5];
  903. bool expected_mapped_addr;
  904. /* Physical to logical addresses */
  905. u64 mapped_logical[5];
  906. };
  907. static int test_rmap_block(struct btrfs_fs_info *fs_info,
  908. struct rmap_test_vector *test)
  909. {
  910. struct btrfs_chunk_map *map;
  911. u64 *logical = NULL;
  912. int i, out_ndaddrs, out_stripe_len;
  913. int ret;
  914. map = btrfs_alloc_chunk_map(test->num_stripes, GFP_KERNEL);
  915. if (!map) {
  916. test_std_err(TEST_ALLOC_CHUNK_MAP);
  917. return -ENOMEM;
  918. }
  919. /* Start at 4GiB logical address */
  920. map->start = SZ_4G;
  921. map->chunk_len = test->data_stripe_size * test->num_data_stripes;
  922. map->stripe_size = test->data_stripe_size;
  923. map->num_stripes = test->num_stripes;
  924. map->type = test->raid_type;
  925. for (i = 0; i < map->num_stripes; i++) {
  926. struct btrfs_device *dev = btrfs_alloc_dummy_device(fs_info);
  927. if (IS_ERR(dev)) {
  928. test_err("cannot allocate device");
  929. ret = PTR_ERR(dev);
  930. goto out;
  931. }
  932. map->stripes[i].dev = dev;
  933. map->stripes[i].physical = test->data_stripe_phys_start[i];
  934. }
  935. ret = btrfs_add_chunk_map(fs_info, map);
  936. if (ret) {
  937. test_err("error adding chunk map to mapping tree");
  938. btrfs_free_chunk_map(map);
  939. goto out_free;
  940. }
  941. ret = btrfs_rmap_block(fs_info, map->start, btrfs_sb_offset(1),
  942. &logical, &out_ndaddrs, &out_stripe_len);
  943. if (ret || (out_ndaddrs == 0 && test->expected_mapped_addr)) {
  944. test_err("didn't rmap anything but expected %d",
  945. test->expected_mapped_addr);
  946. goto out;
  947. }
  948. if (out_stripe_len != BTRFS_STRIPE_LEN) {
  949. test_err("calculated stripe length doesn't match");
  950. goto out;
  951. }
  952. if (out_ndaddrs != test->expected_mapped_addr) {
  953. for (i = 0; i < out_ndaddrs; i++)
  954. test_msg("mapped %llu", logical[i]);
  955. test_err("unexpected number of mapped addresses: %d", out_ndaddrs);
  956. goto out;
  957. }
  958. for (i = 0; i < out_ndaddrs; i++) {
  959. if (logical[i] != test->mapped_logical[i]) {
  960. test_err("unexpected logical address mapped");
  961. goto out;
  962. }
  963. }
  964. ret = 0;
  965. out:
  966. btrfs_remove_chunk_map(fs_info, map);
  967. out_free:
  968. kfree(logical);
  969. return ret;
  970. }
  971. int btrfs_test_extent_map(void)
  972. {
  973. struct btrfs_fs_info *fs_info = NULL;
  974. struct inode *inode;
  975. struct btrfs_root *root = NULL;
  976. int ret = 0, i;
  977. struct rmap_test_vector rmap_tests[] = {
  978. {
  979. /*
  980. * Test a chunk with 2 data stripes one of which
  981. * intersects the physical address of the super block
  982. * is correctly recognised.
  983. */
  984. .raid_type = BTRFS_BLOCK_GROUP_RAID1,
  985. .physical_start = SZ_64M - SZ_4M,
  986. .data_stripe_size = SZ_256M,
  987. .num_data_stripes = 2,
  988. .num_stripes = 2,
  989. .data_stripe_phys_start =
  990. {SZ_64M - SZ_4M, SZ_64M - SZ_4M + SZ_256M},
  991. .expected_mapped_addr = true,
  992. .mapped_logical= {SZ_4G + SZ_4M}
  993. },
  994. {
  995. /*
  996. * Test that out-of-range physical addresses are
  997. * ignored
  998. */
  999. /* SINGLE chunk type */
  1000. .raid_type = 0,
  1001. .physical_start = SZ_4G,
  1002. .data_stripe_size = SZ_256M,
  1003. .num_data_stripes = 1,
  1004. .num_stripes = 1,
  1005. .data_stripe_phys_start = {SZ_256M},
  1006. .expected_mapped_addr = false,
  1007. .mapped_logical = {0}
  1008. }
  1009. };
  1010. test_msg("running extent_map tests");
  1011. /*
  1012. * Note: the fs_info is not set up completely, we only need
  1013. * fs_info::fsid for the tracepoint.
  1014. */
  1015. fs_info = btrfs_alloc_dummy_fs_info(PAGE_SIZE, PAGE_SIZE);
  1016. if (!fs_info) {
  1017. test_std_err(TEST_ALLOC_FS_INFO);
  1018. return -ENOMEM;
  1019. }
  1020. inode = btrfs_new_test_inode();
  1021. if (!inode) {
  1022. test_std_err(TEST_ALLOC_INODE);
  1023. ret = -ENOMEM;
  1024. goto out;
  1025. }
  1026. root = btrfs_alloc_dummy_root(fs_info);
  1027. if (IS_ERR(root)) {
  1028. test_std_err(TEST_ALLOC_ROOT);
  1029. ret = PTR_ERR(root);
  1030. root = NULL;
  1031. goto out;
  1032. }
  1033. BTRFS_I(inode)->root = root;
  1034. ret = test_case_1(fs_info, BTRFS_I(inode));
  1035. if (ret)
  1036. goto out;
  1037. ret = test_case_2(fs_info, BTRFS_I(inode));
  1038. if (ret)
  1039. goto out;
  1040. ret = test_case_3(fs_info, BTRFS_I(inode));
  1041. if (ret)
  1042. goto out;
  1043. ret = test_case_4(fs_info, BTRFS_I(inode));
  1044. if (ret)
  1045. goto out;
  1046. ret = test_case_5(fs_info, BTRFS_I(inode));
  1047. if (ret)
  1048. goto out;
  1049. ret = test_case_6(fs_info, BTRFS_I(inode));
  1050. if (ret)
  1051. goto out;
  1052. ret = test_case_7(fs_info, BTRFS_I(inode));
  1053. if (ret)
  1054. goto out;
  1055. ret = test_case_8(fs_info, BTRFS_I(inode));
  1056. if (ret)
  1057. goto out;
  1058. test_msg("running rmap tests");
  1059. for (i = 0; i < ARRAY_SIZE(rmap_tests); i++) {
  1060. ret = test_rmap_block(fs_info, &rmap_tests[i]);
  1061. if (ret)
  1062. goto out;
  1063. }
  1064. out:
  1065. iput(inode);
  1066. btrfs_free_dummy_root(root);
  1067. btrfs_free_dummy_fs_info(fs_info);
  1068. return ret;
  1069. }