tree-checker.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) Qu Wenruo 2017. All rights reserved.
  4. */
  5. /*
  6. * The module is used to catch unexpected/corrupted tree block data.
  7. * Such behavior can be caused either by a fuzzed image or bugs.
  8. *
  9. * The objective is to do leaf/node validation checks when tree block is read
  10. * from disk, and check *every* possible member, so other code won't
  11. * need to checking them again.
  12. *
  13. * Due to the potential and unwanted damage, every checker needs to be
  14. * carefully reviewed otherwise so it does not prevent mount of valid images.
  15. */
  16. #include "ctree.h"
  17. #include "tree-checker.h"
  18. #include "disk-io.h"
  19. #include "compression.h"
  20. #include "volumes.h"
  21. /*
  22. * Error message should follow the following format:
  23. * corrupt <type>: <identifier>, <reason>[, <bad_value>]
  24. *
  25. * @type: leaf or node
  26. * @identifier: the necessary info to locate the leaf/node.
  27. * It's recommened to decode key.objecitd/offset if it's
  28. * meaningful.
  29. * @reason: describe the error
  30. * @bad_value: optional, it's recommened to output bad value and its
  31. * expected value (range).
  32. *
  33. * Since comma is used to separate the components, only space is allowed
  34. * inside each component.
  35. */
  36. /*
  37. * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt.
  38. * Allows callers to customize the output.
  39. */
  40. __printf(4, 5)
  41. __cold
  42. static void generic_err(const struct btrfs_fs_info *fs_info,
  43. const struct extent_buffer *eb, int slot,
  44. const char *fmt, ...)
  45. {
  46. struct va_format vaf;
  47. va_list args;
  48. va_start(args, fmt);
  49. vaf.fmt = fmt;
  50. vaf.va = &args;
  51. btrfs_crit(fs_info,
  52. "corrupt %s: root=%llu block=%llu slot=%d, %pV",
  53. btrfs_header_level(eb) == 0 ? "leaf" : "node",
  54. btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, &vaf);
  55. va_end(args);
  56. }
  57. /*
  58. * Customized reporter for extent data item, since its key objectid and
  59. * offset has its own meaning.
  60. */
  61. __printf(4, 5)
  62. __cold
  63. static void file_extent_err(const struct btrfs_fs_info *fs_info,
  64. const struct extent_buffer *eb, int slot,
  65. const char *fmt, ...)
  66. {
  67. struct btrfs_key key;
  68. struct va_format vaf;
  69. va_list args;
  70. btrfs_item_key_to_cpu(eb, &key, slot);
  71. va_start(args, fmt);
  72. vaf.fmt = fmt;
  73. vaf.va = &args;
  74. btrfs_crit(fs_info,
  75. "corrupt %s: root=%llu block=%llu slot=%d ino=%llu file_offset=%llu, %pV",
  76. btrfs_header_level(eb) == 0 ? "leaf" : "node",
  77. btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
  78. key.objectid, key.offset, &vaf);
  79. va_end(args);
  80. }
  81. /*
  82. * Return 0 if the btrfs_file_extent_##name is aligned to @alignment
  83. * Else return 1
  84. */
  85. #define CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, name, alignment) \
  86. ({ \
  87. if (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))) \
  88. file_extent_err((fs_info), (leaf), (slot), \
  89. "invalid %s for file extent, have %llu, should be aligned to %u", \
  90. (#name), btrfs_file_extent_##name((leaf), (fi)), \
  91. (alignment)); \
  92. (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))); \
  93. })
  94. static int check_extent_data_item(struct btrfs_fs_info *fs_info,
  95. struct extent_buffer *leaf,
  96. struct btrfs_key *key, int slot)
  97. {
  98. struct btrfs_file_extent_item *fi;
  99. u32 sectorsize = fs_info->sectorsize;
  100. u32 item_size = btrfs_item_size_nr(leaf, slot);
  101. if (!IS_ALIGNED(key->offset, sectorsize)) {
  102. file_extent_err(fs_info, leaf, slot,
  103. "unaligned file_offset for file extent, have %llu should be aligned to %u",
  104. key->offset, sectorsize);
  105. return -EUCLEAN;
  106. }
  107. fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
  108. if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) {
  109. file_extent_err(fs_info, leaf, slot,
  110. "invalid type for file extent, have %u expect range [0, %u]",
  111. btrfs_file_extent_type(leaf, fi),
  112. BTRFS_FILE_EXTENT_TYPES);
  113. return -EUCLEAN;
  114. }
  115. /*
  116. * Support for new compression/encrption must introduce incompat flag,
  117. * and must be caught in open_ctree().
  118. */
  119. if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
  120. file_extent_err(fs_info, leaf, slot,
  121. "invalid compression for file extent, have %u expect range [0, %u]",
  122. btrfs_file_extent_compression(leaf, fi),
  123. BTRFS_COMPRESS_TYPES);
  124. return -EUCLEAN;
  125. }
  126. if (btrfs_file_extent_encryption(leaf, fi)) {
  127. file_extent_err(fs_info, leaf, slot,
  128. "invalid encryption for file extent, have %u expect 0",
  129. btrfs_file_extent_encryption(leaf, fi));
  130. return -EUCLEAN;
  131. }
  132. if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
  133. /* Inline extent must have 0 as key offset */
  134. if (key->offset) {
  135. file_extent_err(fs_info, leaf, slot,
  136. "invalid file_offset for inline file extent, have %llu expect 0",
  137. key->offset);
  138. return -EUCLEAN;
  139. }
  140. /* Compressed inline extent has no on-disk size, skip it */
  141. if (btrfs_file_extent_compression(leaf, fi) !=
  142. BTRFS_COMPRESS_NONE)
  143. return 0;
  144. /* Uncompressed inline extent size must match item size */
  145. if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START +
  146. btrfs_file_extent_ram_bytes(leaf, fi)) {
  147. file_extent_err(fs_info, leaf, slot,
  148. "invalid ram_bytes for uncompressed inline extent, have %u expect %llu",
  149. item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START +
  150. btrfs_file_extent_ram_bytes(leaf, fi));
  151. return -EUCLEAN;
  152. }
  153. return 0;
  154. }
  155. /* Regular or preallocated extent has fixed item size */
  156. if (item_size != sizeof(*fi)) {
  157. file_extent_err(fs_info, leaf, slot,
  158. "invalid item size for reg/prealloc file extent, have %u expect %zu",
  159. item_size, sizeof(*fi));
  160. return -EUCLEAN;
  161. }
  162. if (CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, ram_bytes, sectorsize) ||
  163. CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, disk_bytenr, sectorsize) ||
  164. CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, disk_num_bytes, sectorsize) ||
  165. CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, offset, sectorsize) ||
  166. CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, num_bytes, sectorsize))
  167. return -EUCLEAN;
  168. return 0;
  169. }
  170. static int check_csum_item(struct btrfs_fs_info *fs_info,
  171. struct extent_buffer *leaf, struct btrfs_key *key,
  172. int slot)
  173. {
  174. u32 sectorsize = fs_info->sectorsize;
  175. u32 csumsize = btrfs_super_csum_size(fs_info->super_copy);
  176. if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) {
  177. generic_err(fs_info, leaf, slot,
  178. "invalid key objectid for csum item, have %llu expect %llu",
  179. key->objectid, BTRFS_EXTENT_CSUM_OBJECTID);
  180. return -EUCLEAN;
  181. }
  182. if (!IS_ALIGNED(key->offset, sectorsize)) {
  183. generic_err(fs_info, leaf, slot,
  184. "unaligned key offset for csum item, have %llu should be aligned to %u",
  185. key->offset, sectorsize);
  186. return -EUCLEAN;
  187. }
  188. if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) {
  189. generic_err(fs_info, leaf, slot,
  190. "unaligned item size for csum item, have %u should be aligned to %u",
  191. btrfs_item_size_nr(leaf, slot), csumsize);
  192. return -EUCLEAN;
  193. }
  194. return 0;
  195. }
  196. /*
  197. * Customized reported for dir_item, only important new info is key->objectid,
  198. * which represents inode number
  199. */
  200. __printf(4, 5)
  201. __cold
  202. static void dir_item_err(const struct btrfs_fs_info *fs_info,
  203. const struct extent_buffer *eb, int slot,
  204. const char *fmt, ...)
  205. {
  206. struct btrfs_key key;
  207. struct va_format vaf;
  208. va_list args;
  209. btrfs_item_key_to_cpu(eb, &key, slot);
  210. va_start(args, fmt);
  211. vaf.fmt = fmt;
  212. vaf.va = &args;
  213. btrfs_crit(fs_info,
  214. "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
  215. btrfs_header_level(eb) == 0 ? "leaf" : "node",
  216. btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
  217. key.objectid, &vaf);
  218. va_end(args);
  219. }
  220. static int check_dir_item(struct btrfs_fs_info *fs_info,
  221. struct extent_buffer *leaf,
  222. struct btrfs_key *key, int slot)
  223. {
  224. struct btrfs_dir_item *di;
  225. u32 item_size = btrfs_item_size_nr(leaf, slot);
  226. u32 cur = 0;
  227. di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
  228. while (cur < item_size) {
  229. u32 name_len;
  230. u32 data_len;
  231. u32 max_name_len;
  232. u32 total_size;
  233. u32 name_hash;
  234. u8 dir_type;
  235. /* header itself should not cross item boundary */
  236. if (cur + sizeof(*di) > item_size) {
  237. dir_item_err(fs_info, leaf, slot,
  238. "dir item header crosses item boundary, have %zu boundary %u",
  239. cur + sizeof(*di), item_size);
  240. return -EUCLEAN;
  241. }
  242. /* dir type check */
  243. dir_type = btrfs_dir_type(leaf, di);
  244. if (dir_type >= BTRFS_FT_MAX) {
  245. dir_item_err(fs_info, leaf, slot,
  246. "invalid dir item type, have %u expect [0, %u)",
  247. dir_type, BTRFS_FT_MAX);
  248. return -EUCLEAN;
  249. }
  250. if (key->type == BTRFS_XATTR_ITEM_KEY &&
  251. dir_type != BTRFS_FT_XATTR) {
  252. dir_item_err(fs_info, leaf, slot,
  253. "invalid dir item type for XATTR key, have %u expect %u",
  254. dir_type, BTRFS_FT_XATTR);
  255. return -EUCLEAN;
  256. }
  257. if (dir_type == BTRFS_FT_XATTR &&
  258. key->type != BTRFS_XATTR_ITEM_KEY) {
  259. dir_item_err(fs_info, leaf, slot,
  260. "xattr dir type found for non-XATTR key");
  261. return -EUCLEAN;
  262. }
  263. if (dir_type == BTRFS_FT_XATTR)
  264. max_name_len = XATTR_NAME_MAX;
  265. else
  266. max_name_len = BTRFS_NAME_LEN;
  267. /* Name/data length check */
  268. name_len = btrfs_dir_name_len(leaf, di);
  269. data_len = btrfs_dir_data_len(leaf, di);
  270. if (name_len > max_name_len) {
  271. dir_item_err(fs_info, leaf, slot,
  272. "dir item name len too long, have %u max %u",
  273. name_len, max_name_len);
  274. return -EUCLEAN;
  275. }
  276. if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(fs_info)) {
  277. dir_item_err(fs_info, leaf, slot,
  278. "dir item name and data len too long, have %u max %u",
  279. name_len + data_len,
  280. BTRFS_MAX_XATTR_SIZE(fs_info));
  281. return -EUCLEAN;
  282. }
  283. if (data_len && dir_type != BTRFS_FT_XATTR) {
  284. dir_item_err(fs_info, leaf, slot,
  285. "dir item with invalid data len, have %u expect 0",
  286. data_len);
  287. return -EUCLEAN;
  288. }
  289. total_size = sizeof(*di) + name_len + data_len;
  290. /* header and name/data should not cross item boundary */
  291. if (cur + total_size > item_size) {
  292. dir_item_err(fs_info, leaf, slot,
  293. "dir item data crosses item boundary, have %u boundary %u",
  294. cur + total_size, item_size);
  295. return -EUCLEAN;
  296. }
  297. /*
  298. * Special check for XATTR/DIR_ITEM, as key->offset is name
  299. * hash, should match its name
  300. */
  301. if (key->type == BTRFS_DIR_ITEM_KEY ||
  302. key->type == BTRFS_XATTR_ITEM_KEY) {
  303. char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
  304. read_extent_buffer(leaf, namebuf,
  305. (unsigned long)(di + 1), name_len);
  306. name_hash = btrfs_name_hash(namebuf, name_len);
  307. if (key->offset != name_hash) {
  308. dir_item_err(fs_info, leaf, slot,
  309. "name hash mismatch with key, have 0x%016x expect 0x%016llx",
  310. name_hash, key->offset);
  311. return -EUCLEAN;
  312. }
  313. }
  314. cur += total_size;
  315. di = (struct btrfs_dir_item *)((void *)di + total_size);
  316. }
  317. return 0;
  318. }
  319. __printf(4, 5)
  320. __cold
  321. static void block_group_err(const struct btrfs_fs_info *fs_info,
  322. const struct extent_buffer *eb, int slot,
  323. const char *fmt, ...)
  324. {
  325. struct btrfs_key key;
  326. struct va_format vaf;
  327. va_list args;
  328. btrfs_item_key_to_cpu(eb, &key, slot);
  329. va_start(args, fmt);
  330. vaf.fmt = fmt;
  331. vaf.va = &args;
  332. btrfs_crit(fs_info,
  333. "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
  334. btrfs_header_level(eb) == 0 ? "leaf" : "node",
  335. btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
  336. key.objectid, key.offset, &vaf);
  337. va_end(args);
  338. }
  339. static int check_block_group_item(struct btrfs_fs_info *fs_info,
  340. struct extent_buffer *leaf,
  341. struct btrfs_key *key, int slot)
  342. {
  343. struct btrfs_block_group_item bgi;
  344. u32 item_size = btrfs_item_size_nr(leaf, slot);
  345. u64 flags;
  346. u64 type;
  347. /*
  348. * Here we don't really care about alignment since extent allocator can
  349. * handle it. We care more about the size.
  350. */
  351. if (key->offset == 0) {
  352. block_group_err(fs_info, leaf, slot,
  353. "invalid block group size 0");
  354. return -EUCLEAN;
  355. }
  356. if (item_size != sizeof(bgi)) {
  357. block_group_err(fs_info, leaf, slot,
  358. "invalid item size, have %u expect %zu",
  359. item_size, sizeof(bgi));
  360. return -EUCLEAN;
  361. }
  362. read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
  363. sizeof(bgi));
  364. if (btrfs_block_group_chunk_objectid(&bgi) !=
  365. BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
  366. block_group_err(fs_info, leaf, slot,
  367. "invalid block group chunk objectid, have %llu expect %llu",
  368. btrfs_block_group_chunk_objectid(&bgi),
  369. BTRFS_FIRST_CHUNK_TREE_OBJECTID);
  370. return -EUCLEAN;
  371. }
  372. if (btrfs_block_group_used(&bgi) > key->offset) {
  373. block_group_err(fs_info, leaf, slot,
  374. "invalid block group used, have %llu expect [0, %llu)",
  375. btrfs_block_group_used(&bgi), key->offset);
  376. return -EUCLEAN;
  377. }
  378. flags = btrfs_block_group_flags(&bgi);
  379. if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
  380. block_group_err(fs_info, leaf, slot,
  381. "invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
  382. flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
  383. hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
  384. return -EUCLEAN;
  385. }
  386. type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
  387. if (type != BTRFS_BLOCK_GROUP_DATA &&
  388. type != BTRFS_BLOCK_GROUP_METADATA &&
  389. type != BTRFS_BLOCK_GROUP_SYSTEM &&
  390. type != (BTRFS_BLOCK_GROUP_METADATA |
  391. BTRFS_BLOCK_GROUP_DATA)) {
  392. block_group_err(fs_info, leaf, slot,
  393. "invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
  394. type, hweight64(type),
  395. BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
  396. BTRFS_BLOCK_GROUP_SYSTEM,
  397. BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA);
  398. return -EUCLEAN;
  399. }
  400. return 0;
  401. }
  402. __printf(5, 6)
  403. __cold
  404. static void chunk_err(const struct btrfs_fs_info *fs_info,
  405. const struct extent_buffer *leaf,
  406. const struct btrfs_chunk *chunk, u64 logical,
  407. const char *fmt, ...)
  408. {
  409. bool is_sb;
  410. struct va_format vaf;
  411. va_list args;
  412. int i;
  413. int slot = -1;
  414. /* Only superblock eb is able to have such small offset */
  415. is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET);
  416. if (!is_sb) {
  417. /*
  418. * Get the slot number by iterating through all slots, this
  419. * would provide better readability.
  420. */
  421. for (i = 0; i < btrfs_header_nritems(leaf); i++) {
  422. if (btrfs_item_ptr_offset(leaf, i) ==
  423. (unsigned long)chunk) {
  424. slot = i;
  425. break;
  426. }
  427. }
  428. }
  429. va_start(args, fmt);
  430. vaf.fmt = fmt;
  431. vaf.va = &args;
  432. if (is_sb)
  433. btrfs_crit(fs_info,
  434. "corrupt superblock syschunk array: chunk_start=%llu, %pV",
  435. logical, &vaf);
  436. else
  437. btrfs_crit(fs_info,
  438. "corrupt leaf: root=%llu block=%llu slot=%d chunk_start=%llu, %pV",
  439. BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot,
  440. logical, &vaf);
  441. va_end(args);
  442. }
  443. /*
  444. * The common chunk check which could also work on super block sys chunk array.
  445. *
  446. * Return -EUCLEAN if anything is corrupted.
  447. * Return 0 if everything is OK.
  448. */
  449. int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
  450. struct extent_buffer *leaf,
  451. struct btrfs_chunk *chunk, u64 logical)
  452. {
  453. u64 length;
  454. u64 stripe_len;
  455. u16 num_stripes;
  456. u16 sub_stripes;
  457. u64 type;
  458. u64 features;
  459. bool mixed = false;
  460. length = btrfs_chunk_length(leaf, chunk);
  461. stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
  462. num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
  463. sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
  464. type = btrfs_chunk_type(leaf, chunk);
  465. if (!num_stripes) {
  466. chunk_err(fs_info, leaf, chunk, logical,
  467. "invalid chunk num_stripes, have %u", num_stripes);
  468. return -EUCLEAN;
  469. }
  470. if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
  471. chunk_err(fs_info, leaf, chunk, logical,
  472. "invalid chunk logical, have %llu should aligned to %u",
  473. logical, fs_info->sectorsize);
  474. return -EUCLEAN;
  475. }
  476. if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
  477. chunk_err(fs_info, leaf, chunk, logical,
  478. "invalid chunk sectorsize, have %u expect %u",
  479. btrfs_chunk_sector_size(leaf, chunk),
  480. fs_info->sectorsize);
  481. return -EUCLEAN;
  482. }
  483. if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
  484. chunk_err(fs_info, leaf, chunk, logical,
  485. "invalid chunk length, have %llu", length);
  486. return -EUCLEAN;
  487. }
  488. if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
  489. chunk_err(fs_info, leaf, chunk, logical,
  490. "invalid chunk stripe length: %llu",
  491. stripe_len);
  492. return -EUCLEAN;
  493. }
  494. if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
  495. type) {
  496. chunk_err(fs_info, leaf, chunk, logical,
  497. "unrecognized chunk type: 0x%llx",
  498. ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
  499. BTRFS_BLOCK_GROUP_PROFILE_MASK) &
  500. btrfs_chunk_type(leaf, chunk));
  501. return -EUCLEAN;
  502. }
  503. if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
  504. (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
  505. chunk_err(fs_info, leaf, chunk, logical,
  506. "invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
  507. type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
  508. return -EUCLEAN;
  509. }
  510. if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
  511. chunk_err(fs_info, leaf, chunk, logical,
  512. "missing chunk type flag, have 0x%llx one bit must be set in 0x%llx",
  513. type, BTRFS_BLOCK_GROUP_TYPE_MASK);
  514. return -EUCLEAN;
  515. }
  516. if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
  517. (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
  518. chunk_err(fs_info, leaf, chunk, logical,
  519. "system chunk with data or metadata type: 0x%llx",
  520. type);
  521. return -EUCLEAN;
  522. }
  523. features = btrfs_super_incompat_flags(fs_info->super_copy);
  524. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  525. mixed = true;
  526. if (!mixed) {
  527. if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
  528. (type & BTRFS_BLOCK_GROUP_DATA)) {
  529. chunk_err(fs_info, leaf, chunk, logical,
  530. "mixed chunk type in non-mixed mode: 0x%llx", type);
  531. return -EUCLEAN;
  532. }
  533. }
  534. if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
  535. (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
  536. (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
  537. (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
  538. (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
  539. ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && num_stripes != 1)) {
  540. chunk_err(fs_info, leaf, chunk, logical,
  541. "invalid num_stripes:sub_stripes %u:%u for profile %llu",
  542. num_stripes, sub_stripes,
  543. type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
  544. return -EUCLEAN;
  545. }
  546. return 0;
  547. }
  548. __printf(4, 5)
  549. __cold
  550. static void dev_item_err(const struct btrfs_fs_info *fs_info,
  551. const struct extent_buffer *eb, int slot,
  552. const char *fmt, ...)
  553. {
  554. struct btrfs_key key;
  555. struct va_format vaf;
  556. va_list args;
  557. btrfs_item_key_to_cpu(eb, &key, slot);
  558. va_start(args, fmt);
  559. vaf.fmt = fmt;
  560. vaf.va = &args;
  561. btrfs_crit(fs_info,
  562. "corrupt %s: root=%llu block=%llu slot=%d devid=%llu %pV",
  563. btrfs_header_level(eb) == 0 ? "leaf" : "node",
  564. btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
  565. key.objectid, &vaf);
  566. va_end(args);
  567. }
  568. static int check_dev_item(struct btrfs_fs_info *fs_info,
  569. struct extent_buffer *leaf,
  570. struct btrfs_key *key, int slot)
  571. {
  572. struct btrfs_dev_item *ditem;
  573. if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) {
  574. dev_item_err(fs_info, leaf, slot,
  575. "invalid objectid: has=%llu expect=%llu",
  576. key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
  577. return -EUCLEAN;
  578. }
  579. ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
  580. if (btrfs_device_id(leaf, ditem) != key->offset) {
  581. dev_item_err(fs_info, leaf, slot,
  582. "devid mismatch: key has=%llu item has=%llu",
  583. key->offset, btrfs_device_id(leaf, ditem));
  584. return -EUCLEAN;
  585. }
  586. /*
  587. * For device total_bytes, we don't have reliable way to check it, as
  588. * it can be 0 for device removal. Device size check can only be done
  589. * by dev extents check.
  590. */
  591. if (btrfs_device_bytes_used(leaf, ditem) >
  592. btrfs_device_total_bytes(leaf, ditem)) {
  593. dev_item_err(fs_info, leaf, slot,
  594. "invalid bytes used: have %llu expect [0, %llu]",
  595. btrfs_device_bytes_used(leaf, ditem),
  596. btrfs_device_total_bytes(leaf, ditem));
  597. return -EUCLEAN;
  598. }
  599. /*
  600. * Remaining members like io_align/type/gen/dev_group aren't really
  601. * utilized. Skip them to make later usage of them easier.
  602. */
  603. return 0;
  604. }
  605. /* Inode item error output has the same format as dir_item_err() */
  606. #define inode_item_err(fs_info, eb, slot, fmt, ...) \
  607. dir_item_err(fs_info, eb, slot, fmt, __VA_ARGS__)
  608. static int check_inode_item(struct btrfs_fs_info *fs_info,
  609. struct extent_buffer *leaf,
  610. struct btrfs_key *key, int slot)
  611. {
  612. struct btrfs_inode_item *iitem;
  613. u64 super_gen = btrfs_super_generation(fs_info->super_copy);
  614. u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
  615. u32 mode;
  616. if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID ||
  617. key->objectid > BTRFS_LAST_FREE_OBJECTID) &&
  618. key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID &&
  619. key->objectid != BTRFS_FREE_INO_OBJECTID) {
  620. generic_err(fs_info, leaf, slot,
  621. "invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu",
  622. key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID,
  623. BTRFS_FIRST_FREE_OBJECTID,
  624. BTRFS_LAST_FREE_OBJECTID,
  625. BTRFS_FREE_INO_OBJECTID);
  626. return -EUCLEAN;
  627. }
  628. if (key->offset != 0) {
  629. inode_item_err(fs_info, leaf, slot,
  630. "invalid key offset: has %llu expect 0",
  631. key->offset);
  632. return -EUCLEAN;
  633. }
  634. iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
  635. /* Here we use super block generation + 1 to handle log tree */
  636. if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) {
  637. inode_item_err(fs_info, leaf, slot,
  638. "invalid inode generation: has %llu expect (0, %llu]",
  639. btrfs_inode_generation(leaf, iitem),
  640. super_gen + 1);
  641. return -EUCLEAN;
  642. }
  643. /* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
  644. if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
  645. inode_item_err(fs_info, leaf, slot,
  646. "invalid inode transid: has %llu expect [0, %llu]",
  647. btrfs_inode_transid(leaf, iitem), super_gen + 1);
  648. return -EUCLEAN;
  649. }
  650. /*
  651. * For size and nbytes it's better not to be too strict, as for dir
  652. * item its size/nbytes can easily get wrong, but doesn't affect
  653. * anything in the fs. So here we skip the check.
  654. */
  655. mode = btrfs_inode_mode(leaf, iitem);
  656. if (mode & ~valid_mask) {
  657. inode_item_err(fs_info, leaf, slot,
  658. "unknown mode bit detected: 0x%x",
  659. mode & ~valid_mask);
  660. return -EUCLEAN;
  661. }
  662. /*
  663. * S_IFMT is not bit mapped so we can't completely rely on is_power_of_2,
  664. * but is_power_of_2() can save us from checking FIFO/CHR/DIR/REG.
  665. * Only needs to check BLK, LNK and SOCKS
  666. */
  667. if (!is_power_of_2(mode & S_IFMT)) {
  668. if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) {
  669. inode_item_err(fs_info, leaf, slot,
  670. "invalid mode: has 0%o expect valid S_IF* bit(s)",
  671. mode & S_IFMT);
  672. return -EUCLEAN;
  673. }
  674. }
  675. if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) {
  676. inode_item_err(fs_info, leaf, slot,
  677. "invalid nlink: has %u expect no more than 1 for dir",
  678. btrfs_inode_nlink(leaf, iitem));
  679. return -EUCLEAN;
  680. }
  681. if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) {
  682. inode_item_err(fs_info, leaf, slot,
  683. "unknown flags detected: 0x%llx",
  684. btrfs_inode_flags(leaf, iitem) &
  685. ~BTRFS_INODE_FLAG_MASK);
  686. return -EUCLEAN;
  687. }
  688. return 0;
  689. }
  690. /*
  691. * Common point to switch the item-specific validation.
  692. */
  693. static int check_leaf_item(struct btrfs_fs_info *fs_info,
  694. struct extent_buffer *leaf,
  695. struct btrfs_key *key, int slot)
  696. {
  697. int ret = 0;
  698. struct btrfs_chunk *chunk;
  699. switch (key->type) {
  700. case BTRFS_EXTENT_DATA_KEY:
  701. ret = check_extent_data_item(fs_info, leaf, key, slot);
  702. break;
  703. case BTRFS_EXTENT_CSUM_KEY:
  704. ret = check_csum_item(fs_info, leaf, key, slot);
  705. break;
  706. case BTRFS_DIR_ITEM_KEY:
  707. case BTRFS_DIR_INDEX_KEY:
  708. case BTRFS_XATTR_ITEM_KEY:
  709. ret = check_dir_item(fs_info, leaf, key, slot);
  710. break;
  711. case BTRFS_BLOCK_GROUP_ITEM_KEY:
  712. ret = check_block_group_item(fs_info, leaf, key, slot);
  713. break;
  714. case BTRFS_CHUNK_ITEM_KEY:
  715. chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
  716. ret = btrfs_check_chunk_valid(fs_info, leaf, chunk,
  717. key->offset);
  718. break;
  719. case BTRFS_DEV_ITEM_KEY:
  720. ret = check_dev_item(fs_info, leaf, key, slot);
  721. break;
  722. case BTRFS_INODE_ITEM_KEY:
  723. ret = check_inode_item(fs_info, leaf, key, slot);
  724. break;
  725. }
  726. return ret;
  727. }
  728. static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf,
  729. bool check_item_data)
  730. {
  731. /* No valid key type is 0, so all key should be larger than this key */
  732. struct btrfs_key prev_key = {0, 0, 0};
  733. struct btrfs_key key;
  734. u32 nritems = btrfs_header_nritems(leaf);
  735. int slot;
  736. if (btrfs_header_level(leaf) != 0) {
  737. generic_err(fs_info, leaf, 0,
  738. "invalid level for leaf, have %d expect 0",
  739. btrfs_header_level(leaf));
  740. return -EUCLEAN;
  741. }
  742. /*
  743. * Extent buffers from a relocation tree have a owner field that
  744. * corresponds to the subvolume tree they are based on. So just from an
  745. * extent buffer alone we can not find out what is the id of the
  746. * corresponding subvolume tree, so we can not figure out if the extent
  747. * buffer corresponds to the root of the relocation tree or not. So
  748. * skip this check for relocation trees.
  749. */
  750. if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
  751. u64 owner = btrfs_header_owner(leaf);
  752. struct btrfs_root *check_root;
  753. /* These trees must never be empty */
  754. if (owner == BTRFS_ROOT_TREE_OBJECTID ||
  755. owner == BTRFS_CHUNK_TREE_OBJECTID ||
  756. owner == BTRFS_EXTENT_TREE_OBJECTID ||
  757. owner == BTRFS_DEV_TREE_OBJECTID ||
  758. owner == BTRFS_FS_TREE_OBJECTID ||
  759. owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
  760. generic_err(fs_info, leaf, 0,
  761. "invalid root, root %llu must never be empty",
  762. owner);
  763. return -EUCLEAN;
  764. }
  765. /* Unknown tree */
  766. if (owner == 0) {
  767. generic_err(fs_info, leaf, 0,
  768. "invalid owner, root 0 is not defined");
  769. return -EUCLEAN;
  770. }
  771. key.objectid = owner;
  772. key.type = BTRFS_ROOT_ITEM_KEY;
  773. key.offset = (u64)-1;
  774. check_root = btrfs_get_fs_root(fs_info, &key, false);
  775. /*
  776. * The only reason we also check NULL here is that during
  777. * open_ctree() some roots has not yet been set up.
  778. */
  779. if (!IS_ERR_OR_NULL(check_root)) {
  780. struct extent_buffer *eb;
  781. eb = btrfs_root_node(check_root);
  782. /* if leaf is the root, then it's fine */
  783. if (leaf != eb) {
  784. generic_err(fs_info, leaf, 0,
  785. "invalid nritems, have %u should not be 0 for non-root leaf",
  786. nritems);
  787. free_extent_buffer(eb);
  788. return -EUCLEAN;
  789. }
  790. free_extent_buffer(eb);
  791. }
  792. return 0;
  793. }
  794. if (nritems == 0)
  795. return 0;
  796. /*
  797. * Check the following things to make sure this is a good leaf, and
  798. * leaf users won't need to bother with similar sanity checks:
  799. *
  800. * 1) key ordering
  801. * 2) item offset and size
  802. * No overlap, no hole, all inside the leaf.
  803. * 3) item content
  804. * If possible, do comprehensive sanity check.
  805. * NOTE: All checks must only rely on the item data itself.
  806. */
  807. for (slot = 0; slot < nritems; slot++) {
  808. u32 item_end_expected;
  809. int ret;
  810. btrfs_item_key_to_cpu(leaf, &key, slot);
  811. /* Make sure the keys are in the right order */
  812. if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) {
  813. generic_err(fs_info, leaf, slot,
  814. "bad key order, prev (%llu %u %llu) current (%llu %u %llu)",
  815. prev_key.objectid, prev_key.type,
  816. prev_key.offset, key.objectid, key.type,
  817. key.offset);
  818. return -EUCLEAN;
  819. }
  820. /*
  821. * Make sure the offset and ends are right, remember that the
  822. * item data starts at the end of the leaf and grows towards the
  823. * front.
  824. */
  825. if (slot == 0)
  826. item_end_expected = BTRFS_LEAF_DATA_SIZE(fs_info);
  827. else
  828. item_end_expected = btrfs_item_offset_nr(leaf,
  829. slot - 1);
  830. if (btrfs_item_end_nr(leaf, slot) != item_end_expected) {
  831. generic_err(fs_info, leaf, slot,
  832. "unexpected item end, have %u expect %u",
  833. btrfs_item_end_nr(leaf, slot),
  834. item_end_expected);
  835. return -EUCLEAN;
  836. }
  837. /*
  838. * Check to make sure that we don't point outside of the leaf,
  839. * just in case all the items are consistent to each other, but
  840. * all point outside of the leaf.
  841. */
  842. if (btrfs_item_end_nr(leaf, slot) >
  843. BTRFS_LEAF_DATA_SIZE(fs_info)) {
  844. generic_err(fs_info, leaf, slot,
  845. "slot end outside of leaf, have %u expect range [0, %u]",
  846. btrfs_item_end_nr(leaf, slot),
  847. BTRFS_LEAF_DATA_SIZE(fs_info));
  848. return -EUCLEAN;
  849. }
  850. /* Also check if the item pointer overlaps with btrfs item. */
  851. if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) >
  852. btrfs_item_ptr_offset(leaf, slot)) {
  853. generic_err(fs_info, leaf, slot,
  854. "slot overlaps with its data, item end %lu data start %lu",
  855. btrfs_item_nr_offset(slot) +
  856. sizeof(struct btrfs_item),
  857. btrfs_item_ptr_offset(leaf, slot));
  858. return -EUCLEAN;
  859. }
  860. if (check_item_data) {
  861. /*
  862. * Check if the item size and content meet other
  863. * criteria
  864. */
  865. ret = check_leaf_item(fs_info, leaf, &key, slot);
  866. if (ret < 0)
  867. return ret;
  868. }
  869. prev_key.objectid = key.objectid;
  870. prev_key.type = key.type;
  871. prev_key.offset = key.offset;
  872. }
  873. return 0;
  874. }
  875. int btrfs_check_leaf_full(struct btrfs_fs_info *fs_info,
  876. struct extent_buffer *leaf)
  877. {
  878. return check_leaf(fs_info, leaf, true);
  879. }
  880. int btrfs_check_leaf_relaxed(struct btrfs_fs_info *fs_info,
  881. struct extent_buffer *leaf)
  882. {
  883. return check_leaf(fs_info, leaf, false);
  884. }
  885. int btrfs_check_node(struct btrfs_fs_info *fs_info, struct extent_buffer *node)
  886. {
  887. unsigned long nr = btrfs_header_nritems(node);
  888. struct btrfs_key key, next_key;
  889. int slot;
  890. int level = btrfs_header_level(node);
  891. u64 bytenr;
  892. int ret = 0;
  893. if (level <= 0 || level >= BTRFS_MAX_LEVEL) {
  894. generic_err(fs_info, node, 0,
  895. "invalid level for node, have %d expect [1, %d]",
  896. level, BTRFS_MAX_LEVEL - 1);
  897. return -EUCLEAN;
  898. }
  899. if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info)) {
  900. btrfs_crit(fs_info,
  901. "corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]",
  902. btrfs_header_owner(node), node->start,
  903. nr == 0 ? "small" : "large", nr,
  904. BTRFS_NODEPTRS_PER_BLOCK(fs_info));
  905. return -EUCLEAN;
  906. }
  907. for (slot = 0; slot < nr - 1; slot++) {
  908. bytenr = btrfs_node_blockptr(node, slot);
  909. btrfs_node_key_to_cpu(node, &key, slot);
  910. btrfs_node_key_to_cpu(node, &next_key, slot + 1);
  911. if (!bytenr) {
  912. generic_err(fs_info, node, slot,
  913. "invalid NULL node pointer");
  914. ret = -EUCLEAN;
  915. goto out;
  916. }
  917. if (!IS_ALIGNED(bytenr, fs_info->sectorsize)) {
  918. generic_err(fs_info, node, slot,
  919. "unaligned pointer, have %llu should be aligned to %u",
  920. bytenr, fs_info->sectorsize);
  921. ret = -EUCLEAN;
  922. goto out;
  923. }
  924. if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
  925. generic_err(fs_info, node, slot,
  926. "bad key order, current (%llu %u %llu) next (%llu %u %llu)",
  927. key.objectid, key.type, key.offset,
  928. next_key.objectid, next_key.type,
  929. next_key.offset);
  930. ret = -EUCLEAN;
  931. goto out;
  932. }
  933. }
  934. out:
  935. return ret;
  936. }