tree-mod-log.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "messages.h"
  3. #include "tree-mod-log.h"
  4. #include "disk-io.h"
  5. #include "fs.h"
  6. #include "accessors.h"
  7. #include "tree-checker.h"
  8. struct tree_mod_root {
  9. u64 logical;
  10. u8 level;
  11. };
  12. struct tree_mod_elem {
  13. struct rb_node node;
  14. u64 logical;
  15. u64 seq;
  16. enum btrfs_mod_log_op op;
  17. /*
  18. * This is used for BTRFS_MOD_LOG_KEY_* and BTRFS_MOD_LOG_MOVE_KEYS
  19. * operations.
  20. */
  21. int slot;
  22. /* This is used for BTRFS_MOD_LOG_KEY* and BTRFS_MOD_LOG_ROOT_REPLACE. */
  23. u64 generation;
  24. /* Those are used for op == BTRFS_MOD_LOG_KEY_{REPLACE,REMOVE}. */
  25. struct btrfs_disk_key key;
  26. u64 blockptr;
  27. /* This is used for op == BTRFS_MOD_LOG_MOVE_KEYS. */
  28. struct {
  29. int dst_slot;
  30. int nr_items;
  31. } move;
  32. /* This is used for op == BTRFS_MOD_LOG_ROOT_REPLACE. */
  33. struct tree_mod_root old_root;
  34. };
  35. /*
  36. * Pull a new tree mod seq number for our operation.
  37. */
  38. static u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
  39. {
  40. return atomic64_inc_return(&fs_info->tree_mod_seq);
  41. }
  42. /*
  43. * This adds a new blocker to the tree mod log's blocker list if the @elem
  44. * passed does not already have a sequence number set. So when a caller expects
  45. * to record tree modifications, it should ensure to set elem->seq to zero
  46. * before calling btrfs_get_tree_mod_seq.
  47. * Returns a fresh, unused tree log modification sequence number, even if no new
  48. * blocker was added.
  49. */
  50. u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
  51. struct btrfs_seq_list *elem)
  52. {
  53. write_lock(&fs_info->tree_mod_log_lock);
  54. if (!elem->seq) {
  55. elem->seq = btrfs_inc_tree_mod_seq(fs_info);
  56. list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
  57. set_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags);
  58. }
  59. write_unlock(&fs_info->tree_mod_log_lock);
  60. return elem->seq;
  61. }
  62. void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
  63. struct btrfs_seq_list *elem)
  64. {
  65. struct rb_root *tm_root;
  66. struct rb_node *node;
  67. struct rb_node *next;
  68. struct tree_mod_elem *tm;
  69. u64 min_seq = BTRFS_SEQ_LAST;
  70. u64 seq_putting = elem->seq;
  71. if (!seq_putting)
  72. return;
  73. write_lock(&fs_info->tree_mod_log_lock);
  74. list_del(&elem->list);
  75. elem->seq = 0;
  76. if (list_empty(&fs_info->tree_mod_seq_list)) {
  77. clear_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags);
  78. } else {
  79. struct btrfs_seq_list *first;
  80. first = list_first_entry(&fs_info->tree_mod_seq_list,
  81. struct btrfs_seq_list, list);
  82. if (seq_putting > first->seq) {
  83. /*
  84. * Blocker with lower sequence number exists, we cannot
  85. * remove anything from the log.
  86. */
  87. write_unlock(&fs_info->tree_mod_log_lock);
  88. return;
  89. }
  90. min_seq = first->seq;
  91. }
  92. /*
  93. * Anything that's lower than the lowest existing (read: blocked)
  94. * sequence number can be removed from the tree.
  95. */
  96. tm_root = &fs_info->tree_mod_log;
  97. for (node = rb_first(tm_root); node; node = next) {
  98. next = rb_next(node);
  99. tm = rb_entry(node, struct tree_mod_elem, node);
  100. if (tm->seq >= min_seq)
  101. continue;
  102. rb_erase(node, tm_root);
  103. kfree(tm);
  104. }
  105. write_unlock(&fs_info->tree_mod_log_lock);
  106. }
  107. /*
  108. * Key order of the log:
  109. * node/leaf start address -> sequence
  110. *
  111. * The 'start address' is the logical address of the *new* root node for root
  112. * replace operations, or the logical address of the affected block for all
  113. * other operations.
  114. */
  115. static noinline int tree_mod_log_insert(struct btrfs_fs_info *fs_info,
  116. struct tree_mod_elem *tm)
  117. {
  118. struct rb_root *tm_root;
  119. struct rb_node **new;
  120. struct rb_node *parent = NULL;
  121. struct tree_mod_elem *cur;
  122. lockdep_assert_held_write(&fs_info->tree_mod_log_lock);
  123. tm->seq = btrfs_inc_tree_mod_seq(fs_info);
  124. tm_root = &fs_info->tree_mod_log;
  125. new = &tm_root->rb_node;
  126. while (*new) {
  127. cur = rb_entry(*new, struct tree_mod_elem, node);
  128. parent = *new;
  129. if (cur->logical < tm->logical)
  130. new = &((*new)->rb_left);
  131. else if (cur->logical > tm->logical)
  132. new = &((*new)->rb_right);
  133. else if (cur->seq < tm->seq)
  134. new = &((*new)->rb_left);
  135. else if (cur->seq > tm->seq)
  136. new = &((*new)->rb_right);
  137. else
  138. return -EEXIST;
  139. }
  140. rb_link_node(&tm->node, parent, new);
  141. rb_insert_color(&tm->node, tm_root);
  142. return 0;
  143. }
  144. /*
  145. * Determines if logging can be omitted. Returns true if it can. Otherwise, it
  146. * returns false with the tree_mod_log_lock acquired. The caller must hold
  147. * this until all tree mod log insertions are recorded in the rb tree and then
  148. * write unlock fs_info::tree_mod_log_lock.
  149. */
  150. static bool tree_mod_dont_log(struct btrfs_fs_info *fs_info, const struct extent_buffer *eb)
  151. {
  152. if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
  153. return true;
  154. if (eb && btrfs_header_level(eb) == 0)
  155. return true;
  156. write_lock(&fs_info->tree_mod_log_lock);
  157. if (list_empty(&(fs_info)->tree_mod_seq_list)) {
  158. write_unlock(&fs_info->tree_mod_log_lock);
  159. return true;
  160. }
  161. return false;
  162. }
  163. /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
  164. static bool tree_mod_need_log(const struct btrfs_fs_info *fs_info,
  165. const struct extent_buffer *eb)
  166. {
  167. if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
  168. return false;
  169. if (eb && btrfs_header_level(eb) == 0)
  170. return false;
  171. return true;
  172. }
  173. static struct tree_mod_elem *alloc_tree_mod_elem(const struct extent_buffer *eb,
  174. int slot,
  175. enum btrfs_mod_log_op op)
  176. {
  177. struct tree_mod_elem *tm;
  178. tm = kzalloc(sizeof(*tm), GFP_NOFS);
  179. if (!tm)
  180. return NULL;
  181. tm->logical = eb->start;
  182. if (op != BTRFS_MOD_LOG_KEY_ADD) {
  183. btrfs_node_key(eb, &tm->key, slot);
  184. tm->blockptr = btrfs_node_blockptr(eb, slot);
  185. }
  186. tm->op = op;
  187. tm->slot = slot;
  188. tm->generation = btrfs_node_ptr_generation(eb, slot);
  189. RB_CLEAR_NODE(&tm->node);
  190. return tm;
  191. }
  192. int btrfs_tree_mod_log_insert_key(const struct extent_buffer *eb, int slot,
  193. enum btrfs_mod_log_op op)
  194. {
  195. struct tree_mod_elem *tm;
  196. int ret = 0;
  197. if (!tree_mod_need_log(eb->fs_info, eb))
  198. return 0;
  199. tm = alloc_tree_mod_elem(eb, slot, op);
  200. if (!tm)
  201. ret = -ENOMEM;
  202. if (tree_mod_dont_log(eb->fs_info, eb)) {
  203. kfree(tm);
  204. /*
  205. * Don't error if we failed to allocate memory because we don't
  206. * need to log.
  207. */
  208. return 0;
  209. } else if (ret != 0) {
  210. /*
  211. * We previously failed to allocate memory and we need to log,
  212. * so we have to fail.
  213. */
  214. goto out_unlock;
  215. }
  216. ret = tree_mod_log_insert(eb->fs_info, tm);
  217. out_unlock:
  218. write_unlock(&eb->fs_info->tree_mod_log_lock);
  219. if (ret)
  220. kfree(tm);
  221. return ret;
  222. }
  223. static struct tree_mod_elem *tree_mod_log_alloc_move(const struct extent_buffer *eb,
  224. int dst_slot, int src_slot,
  225. int nr_items)
  226. {
  227. struct tree_mod_elem *tm;
  228. tm = kzalloc(sizeof(*tm), GFP_NOFS);
  229. if (!tm)
  230. return ERR_PTR(-ENOMEM);
  231. tm->logical = eb->start;
  232. tm->slot = src_slot;
  233. tm->move.dst_slot = dst_slot;
  234. tm->move.nr_items = nr_items;
  235. tm->op = BTRFS_MOD_LOG_MOVE_KEYS;
  236. RB_CLEAR_NODE(&tm->node);
  237. return tm;
  238. }
  239. int btrfs_tree_mod_log_insert_move(const struct extent_buffer *eb,
  240. int dst_slot, int src_slot,
  241. int nr_items)
  242. {
  243. struct tree_mod_elem *tm = NULL;
  244. struct tree_mod_elem **tm_list = NULL;
  245. int ret = 0;
  246. int i;
  247. bool locked = false;
  248. if (!tree_mod_need_log(eb->fs_info, eb))
  249. return 0;
  250. tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
  251. if (!tm_list) {
  252. ret = -ENOMEM;
  253. goto lock;
  254. }
  255. tm = tree_mod_log_alloc_move(eb, dst_slot, src_slot, nr_items);
  256. if (IS_ERR(tm)) {
  257. ret = PTR_ERR(tm);
  258. tm = NULL;
  259. goto lock;
  260. }
  261. for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
  262. tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
  263. BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING);
  264. if (!tm_list[i]) {
  265. ret = -ENOMEM;
  266. goto lock;
  267. }
  268. }
  269. lock:
  270. if (tree_mod_dont_log(eb->fs_info, eb)) {
  271. /*
  272. * Don't error if we failed to allocate memory because we don't
  273. * need to log.
  274. */
  275. ret = 0;
  276. goto free_tms;
  277. }
  278. locked = true;
  279. /*
  280. * We previously failed to allocate memory and we need to log, so we
  281. * have to fail.
  282. */
  283. if (ret != 0)
  284. goto free_tms;
  285. /*
  286. * When we override something during the move, we log these removals.
  287. * This can only happen when we move towards the beginning of the
  288. * buffer, i.e. dst_slot < src_slot.
  289. */
  290. for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
  291. ret = tree_mod_log_insert(eb->fs_info, tm_list[i]);
  292. if (ret)
  293. goto free_tms;
  294. }
  295. ret = tree_mod_log_insert(eb->fs_info, tm);
  296. if (ret)
  297. goto free_tms;
  298. write_unlock(&eb->fs_info->tree_mod_log_lock);
  299. kfree(tm_list);
  300. return 0;
  301. free_tms:
  302. if (tm_list) {
  303. for (i = 0; i < nr_items; i++) {
  304. if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
  305. rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
  306. kfree(tm_list[i]);
  307. }
  308. }
  309. if (locked)
  310. write_unlock(&eb->fs_info->tree_mod_log_lock);
  311. kfree(tm_list);
  312. kfree(tm);
  313. return ret;
  314. }
  315. static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
  316. struct tree_mod_elem **tm_list,
  317. int nritems)
  318. {
  319. int i, j;
  320. int ret;
  321. for (i = nritems - 1; i >= 0; i--) {
  322. ret = tree_mod_log_insert(fs_info, tm_list[i]);
  323. if (ret) {
  324. for (j = nritems - 1; j > i; j--)
  325. rb_erase(&tm_list[j]->node,
  326. &fs_info->tree_mod_log);
  327. return ret;
  328. }
  329. }
  330. return 0;
  331. }
  332. int btrfs_tree_mod_log_insert_root(struct extent_buffer *old_root,
  333. struct extent_buffer *new_root,
  334. bool log_removal)
  335. {
  336. struct btrfs_fs_info *fs_info = old_root->fs_info;
  337. struct tree_mod_elem *tm = NULL;
  338. struct tree_mod_elem **tm_list = NULL;
  339. int nritems = 0;
  340. int ret = 0;
  341. int i;
  342. if (!tree_mod_need_log(fs_info, NULL))
  343. return 0;
  344. if (log_removal && btrfs_header_level(old_root) > 0) {
  345. nritems = btrfs_header_nritems(old_root);
  346. tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
  347. GFP_NOFS);
  348. if (!tm_list) {
  349. ret = -ENOMEM;
  350. goto lock;
  351. }
  352. for (i = 0; i < nritems; i++) {
  353. tm_list[i] = alloc_tree_mod_elem(old_root, i,
  354. BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING);
  355. if (!tm_list[i]) {
  356. ret = -ENOMEM;
  357. goto lock;
  358. }
  359. }
  360. }
  361. tm = kzalloc(sizeof(*tm), GFP_NOFS);
  362. if (!tm) {
  363. ret = -ENOMEM;
  364. goto lock;
  365. }
  366. tm->logical = new_root->start;
  367. tm->old_root.logical = old_root->start;
  368. tm->old_root.level = btrfs_header_level(old_root);
  369. tm->generation = btrfs_header_generation(old_root);
  370. tm->op = BTRFS_MOD_LOG_ROOT_REPLACE;
  371. lock:
  372. if (tree_mod_dont_log(fs_info, NULL)) {
  373. /*
  374. * Don't error if we failed to allocate memory because we don't
  375. * need to log.
  376. */
  377. ret = 0;
  378. goto free_tms;
  379. } else if (ret != 0) {
  380. /*
  381. * We previously failed to allocate memory and we need to log,
  382. * so we have to fail.
  383. */
  384. goto out_unlock;
  385. }
  386. if (tm_list)
  387. ret = tree_mod_log_free_eb(fs_info, tm_list, nritems);
  388. if (!ret)
  389. ret = tree_mod_log_insert(fs_info, tm);
  390. out_unlock:
  391. write_unlock(&fs_info->tree_mod_log_lock);
  392. if (ret)
  393. goto free_tms;
  394. kfree(tm_list);
  395. return ret;
  396. free_tms:
  397. if (tm_list) {
  398. for (i = 0; i < nritems; i++)
  399. kfree(tm_list[i]);
  400. kfree(tm_list);
  401. }
  402. kfree(tm);
  403. return ret;
  404. }
  405. static struct tree_mod_elem *__tree_mod_log_search(struct btrfs_fs_info *fs_info,
  406. u64 start, u64 min_seq,
  407. bool smallest)
  408. {
  409. struct rb_root *tm_root;
  410. struct rb_node *node;
  411. struct tree_mod_elem *cur = NULL;
  412. struct tree_mod_elem *found = NULL;
  413. read_lock(&fs_info->tree_mod_log_lock);
  414. tm_root = &fs_info->tree_mod_log;
  415. node = tm_root->rb_node;
  416. while (node) {
  417. cur = rb_entry(node, struct tree_mod_elem, node);
  418. if (cur->logical < start) {
  419. node = node->rb_left;
  420. } else if (cur->logical > start) {
  421. node = node->rb_right;
  422. } else if (cur->seq < min_seq) {
  423. node = node->rb_left;
  424. } else if (!smallest) {
  425. /* We want the node with the highest seq */
  426. if (found)
  427. BUG_ON(found->seq > cur->seq);
  428. found = cur;
  429. node = node->rb_left;
  430. } else if (cur->seq > min_seq) {
  431. /* We want the node with the smallest seq */
  432. if (found)
  433. BUG_ON(found->seq < cur->seq);
  434. found = cur;
  435. node = node->rb_right;
  436. } else {
  437. found = cur;
  438. break;
  439. }
  440. }
  441. read_unlock(&fs_info->tree_mod_log_lock);
  442. return found;
  443. }
  444. /*
  445. * This returns the element from the log with the smallest time sequence
  446. * value that's in the log (the oldest log item). Any element with a time
  447. * sequence lower than min_seq will be ignored.
  448. */
  449. static struct tree_mod_elem *tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info,
  450. u64 start, u64 min_seq)
  451. {
  452. return __tree_mod_log_search(fs_info, start, min_seq, true);
  453. }
  454. /*
  455. * This returns the element from the log with the largest time sequence
  456. * value that's in the log (the most recent log item). Any element with
  457. * a time sequence lower than min_seq will be ignored.
  458. */
  459. static struct tree_mod_elem *tree_mod_log_search(struct btrfs_fs_info *fs_info,
  460. u64 start, u64 min_seq)
  461. {
  462. return __tree_mod_log_search(fs_info, start, min_seq, false);
  463. }
  464. int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst,
  465. const struct extent_buffer *src,
  466. unsigned long dst_offset,
  467. unsigned long src_offset,
  468. int nr_items)
  469. {
  470. struct btrfs_fs_info *fs_info = dst->fs_info;
  471. int ret = 0;
  472. struct tree_mod_elem **tm_list = NULL;
  473. struct tree_mod_elem **tm_list_add = NULL;
  474. struct tree_mod_elem **tm_list_rem = NULL;
  475. int i;
  476. bool locked = false;
  477. struct tree_mod_elem *dst_move_tm = NULL;
  478. struct tree_mod_elem *src_move_tm = NULL;
  479. u32 dst_move_nr_items = btrfs_header_nritems(dst) - dst_offset;
  480. u32 src_move_nr_items = btrfs_header_nritems(src) - (src_offset + nr_items);
  481. if (!tree_mod_need_log(fs_info, NULL))
  482. return 0;
  483. if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
  484. return 0;
  485. tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
  486. GFP_NOFS);
  487. if (!tm_list) {
  488. ret = -ENOMEM;
  489. goto lock;
  490. }
  491. if (dst_move_nr_items) {
  492. dst_move_tm = tree_mod_log_alloc_move(dst, dst_offset + nr_items,
  493. dst_offset, dst_move_nr_items);
  494. if (IS_ERR(dst_move_tm)) {
  495. ret = PTR_ERR(dst_move_tm);
  496. dst_move_tm = NULL;
  497. goto lock;
  498. }
  499. }
  500. if (src_move_nr_items) {
  501. src_move_tm = tree_mod_log_alloc_move(src, src_offset,
  502. src_offset + nr_items,
  503. src_move_nr_items);
  504. if (IS_ERR(src_move_tm)) {
  505. ret = PTR_ERR(src_move_tm);
  506. src_move_tm = NULL;
  507. goto lock;
  508. }
  509. }
  510. tm_list_add = tm_list;
  511. tm_list_rem = tm_list + nr_items;
  512. for (i = 0; i < nr_items; i++) {
  513. tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
  514. BTRFS_MOD_LOG_KEY_REMOVE);
  515. if (!tm_list_rem[i]) {
  516. ret = -ENOMEM;
  517. goto lock;
  518. }
  519. tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
  520. BTRFS_MOD_LOG_KEY_ADD);
  521. if (!tm_list_add[i]) {
  522. ret = -ENOMEM;
  523. goto lock;
  524. }
  525. }
  526. lock:
  527. if (tree_mod_dont_log(fs_info, NULL)) {
  528. /*
  529. * Don't error if we failed to allocate memory because we don't
  530. * need to log.
  531. */
  532. ret = 0;
  533. goto free_tms;
  534. }
  535. locked = true;
  536. /*
  537. * We previously failed to allocate memory and we need to log, so we
  538. * have to fail.
  539. */
  540. if (ret != 0)
  541. goto free_tms;
  542. if (dst_move_tm) {
  543. ret = tree_mod_log_insert(fs_info, dst_move_tm);
  544. if (ret)
  545. goto free_tms;
  546. }
  547. for (i = 0; i < nr_items; i++) {
  548. ret = tree_mod_log_insert(fs_info, tm_list_rem[i]);
  549. if (ret)
  550. goto free_tms;
  551. ret = tree_mod_log_insert(fs_info, tm_list_add[i]);
  552. if (ret)
  553. goto free_tms;
  554. }
  555. if (src_move_tm) {
  556. ret = tree_mod_log_insert(fs_info, src_move_tm);
  557. if (ret)
  558. goto free_tms;
  559. }
  560. write_unlock(&fs_info->tree_mod_log_lock);
  561. kfree(tm_list);
  562. return 0;
  563. free_tms:
  564. if (dst_move_tm && !RB_EMPTY_NODE(&dst_move_tm->node))
  565. rb_erase(&dst_move_tm->node, &fs_info->tree_mod_log);
  566. kfree(dst_move_tm);
  567. if (src_move_tm && !RB_EMPTY_NODE(&src_move_tm->node))
  568. rb_erase(&src_move_tm->node, &fs_info->tree_mod_log);
  569. kfree(src_move_tm);
  570. if (tm_list) {
  571. for (i = 0; i < nr_items * 2; i++) {
  572. if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
  573. rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
  574. kfree(tm_list[i]);
  575. }
  576. }
  577. if (locked)
  578. write_unlock(&fs_info->tree_mod_log_lock);
  579. kfree(tm_list);
  580. return ret;
  581. }
  582. int btrfs_tree_mod_log_free_eb(struct extent_buffer *eb)
  583. {
  584. struct tree_mod_elem **tm_list = NULL;
  585. int nritems = 0;
  586. int i;
  587. int ret = 0;
  588. if (!tree_mod_need_log(eb->fs_info, eb))
  589. return 0;
  590. nritems = btrfs_header_nritems(eb);
  591. tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
  592. if (!tm_list) {
  593. ret = -ENOMEM;
  594. goto lock;
  595. }
  596. for (i = 0; i < nritems; i++) {
  597. tm_list[i] = alloc_tree_mod_elem(eb, i,
  598. BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING);
  599. if (!tm_list[i]) {
  600. ret = -ENOMEM;
  601. goto lock;
  602. }
  603. }
  604. lock:
  605. if (tree_mod_dont_log(eb->fs_info, eb)) {
  606. /*
  607. * Don't error if we failed to allocate memory because we don't
  608. * need to log.
  609. */
  610. ret = 0;
  611. goto free_tms;
  612. } else if (ret != 0) {
  613. /*
  614. * We previously failed to allocate memory and we need to log,
  615. * so we have to fail.
  616. */
  617. goto out_unlock;
  618. }
  619. ret = tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
  620. out_unlock:
  621. write_unlock(&eb->fs_info->tree_mod_log_lock);
  622. if (ret)
  623. goto free_tms;
  624. kfree(tm_list);
  625. return 0;
  626. free_tms:
  627. if (tm_list) {
  628. for (i = 0; i < nritems; i++)
  629. kfree(tm_list[i]);
  630. kfree(tm_list);
  631. }
  632. return ret;
  633. }
  634. /*
  635. * Returns the logical address of the oldest predecessor of the given root.
  636. * Entries older than time_seq are ignored.
  637. */
  638. static struct tree_mod_elem *tree_mod_log_oldest_root(struct extent_buffer *eb_root,
  639. u64 time_seq)
  640. {
  641. struct tree_mod_elem *tm;
  642. struct tree_mod_elem *found = NULL;
  643. u64 root_logical = eb_root->start;
  644. bool looped = false;
  645. if (!time_seq)
  646. return NULL;
  647. /*
  648. * The very last operation that's logged for a root is the replacement
  649. * operation (if it is replaced at all). This has the logical address
  650. * of the *new* root, making it the very first operation that's logged
  651. * for this root.
  652. */
  653. while (1) {
  654. tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
  655. time_seq);
  656. if (!looped && !tm)
  657. return NULL;
  658. /*
  659. * If there are no tree operation for the oldest root, we simply
  660. * return it. This should only happen if that (old) root is at
  661. * level 0.
  662. */
  663. if (!tm)
  664. break;
  665. /*
  666. * If there's an operation that's not a root replacement, we
  667. * found the oldest version of our root. Normally, we'll find a
  668. * BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
  669. */
  670. if (tm->op != BTRFS_MOD_LOG_ROOT_REPLACE)
  671. break;
  672. found = tm;
  673. root_logical = tm->old_root.logical;
  674. looped = true;
  675. }
  676. /* If there's no old root to return, return what we found instead */
  677. if (!found)
  678. found = tm;
  679. return found;
  680. }
  681. /*
  682. * tm is a pointer to the first operation to rewind within eb. Then, all
  683. * previous operations will be rewound (until we reach something older than
  684. * time_seq).
  685. */
  686. static void tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
  687. struct extent_buffer *eb,
  688. u64 time_seq,
  689. struct tree_mod_elem *first_tm)
  690. {
  691. u32 n;
  692. struct rb_node *next;
  693. struct tree_mod_elem *tm = first_tm;
  694. unsigned long o_dst;
  695. unsigned long o_src;
  696. unsigned long p_size = sizeof(struct btrfs_key_ptr);
  697. /*
  698. * max_slot tracks the maximum valid slot of the rewind eb at every
  699. * step of the rewind. This is in contrast with 'n' which eventually
  700. * matches the number of items, but can be wrong during moves or if
  701. * removes overlap on already valid slots (which is probably separately
  702. * a bug). We do this to validate the offsets of memmoves for rewinding
  703. * moves and detect invalid memmoves.
  704. *
  705. * Since a rewind eb can start empty, max_slot is a signed integer with
  706. * a special meaning for -1, which is that no slot is valid to move out
  707. * of. Any other negative value is invalid.
  708. */
  709. int max_slot;
  710. int move_src_end_slot;
  711. int move_dst_end_slot;
  712. n = btrfs_header_nritems(eb);
  713. max_slot = n - 1;
  714. read_lock(&fs_info->tree_mod_log_lock);
  715. while (tm && tm->seq >= time_seq) {
  716. ASSERT(max_slot >= -1);
  717. /*
  718. * All the operations are recorded with the operator used for
  719. * the modification. As we're going backwards, we do the
  720. * opposite of each operation here.
  721. */
  722. switch (tm->op) {
  723. case BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING:
  724. BUG_ON(tm->slot < n);
  725. fallthrough;
  726. case BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING:
  727. case BTRFS_MOD_LOG_KEY_REMOVE:
  728. btrfs_set_node_key(eb, &tm->key, tm->slot);
  729. btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
  730. btrfs_set_node_ptr_generation(eb, tm->slot,
  731. tm->generation);
  732. n++;
  733. if (tm->slot > max_slot)
  734. max_slot = tm->slot;
  735. break;
  736. case BTRFS_MOD_LOG_KEY_REPLACE:
  737. BUG_ON(tm->slot >= n);
  738. btrfs_set_node_key(eb, &tm->key, tm->slot);
  739. btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
  740. btrfs_set_node_ptr_generation(eb, tm->slot,
  741. tm->generation);
  742. break;
  743. case BTRFS_MOD_LOG_KEY_ADD:
  744. /*
  745. * It is possible we could have already removed keys
  746. * behind the known max slot, so this will be an
  747. * overestimate. In practice, the copy operation
  748. * inserts them in increasing order, and overestimating
  749. * just means we miss some warnings, so it's OK. It
  750. * isn't worth carefully tracking the full array of
  751. * valid slots to check against when moving.
  752. */
  753. if (tm->slot == max_slot)
  754. max_slot--;
  755. /* if a move operation is needed it's in the log */
  756. n--;
  757. break;
  758. case BTRFS_MOD_LOG_MOVE_KEYS:
  759. ASSERT(tm->move.nr_items > 0);
  760. move_src_end_slot = tm->move.dst_slot + tm->move.nr_items - 1;
  761. move_dst_end_slot = tm->slot + tm->move.nr_items - 1;
  762. o_dst = btrfs_node_key_ptr_offset(eb, tm->slot);
  763. o_src = btrfs_node_key_ptr_offset(eb, tm->move.dst_slot);
  764. if (WARN_ON(move_src_end_slot > max_slot ||
  765. tm->move.nr_items <= 0)) {
  766. btrfs_warn(fs_info,
  767. "move from invalid tree mod log slot eb %llu slot %d dst_slot %d nr_items %d seq %llu n %u max_slot %d",
  768. eb->start, tm->slot,
  769. tm->move.dst_slot, tm->move.nr_items,
  770. tm->seq, n, max_slot);
  771. }
  772. memmove_extent_buffer(eb, o_dst, o_src,
  773. tm->move.nr_items * p_size);
  774. max_slot = move_dst_end_slot;
  775. break;
  776. case BTRFS_MOD_LOG_ROOT_REPLACE:
  777. /*
  778. * This operation is special. For roots, this must be
  779. * handled explicitly before rewinding.
  780. * For non-roots, this operation may exist if the node
  781. * was a root: root A -> child B; then A gets empty and
  782. * B is promoted to the new root. In the mod log, we'll
  783. * have a root-replace operation for B, a tree block
  784. * that is no root. We simply ignore that operation.
  785. */
  786. break;
  787. }
  788. next = rb_next(&tm->node);
  789. if (!next)
  790. break;
  791. tm = rb_entry(next, struct tree_mod_elem, node);
  792. if (tm->logical != first_tm->logical)
  793. break;
  794. }
  795. read_unlock(&fs_info->tree_mod_log_lock);
  796. btrfs_set_header_nritems(eb, n);
  797. }
  798. /*
  799. * Called with eb read locked. If the buffer cannot be rewound, the same buffer
  800. * is returned. If rewind operations happen, a fresh buffer is returned. The
  801. * returned buffer is always read-locked. If the returned buffer is not the
  802. * input buffer, the lock on the input buffer is released and the input buffer
  803. * is freed (its refcount is decremented).
  804. */
  805. struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
  806. struct btrfs_path *path,
  807. struct extent_buffer *eb,
  808. u64 time_seq)
  809. {
  810. struct extent_buffer *eb_rewin;
  811. struct tree_mod_elem *tm;
  812. if (!time_seq)
  813. return eb;
  814. if (btrfs_header_level(eb) == 0)
  815. return eb;
  816. tm = tree_mod_log_search(fs_info, eb->start, time_seq);
  817. if (!tm)
  818. return eb;
  819. if (tm->op == BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
  820. BUG_ON(tm->slot != 0);
  821. eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
  822. if (!eb_rewin) {
  823. btrfs_tree_read_unlock(eb);
  824. free_extent_buffer(eb);
  825. return NULL;
  826. }
  827. btrfs_set_header_bytenr(eb_rewin, eb->start);
  828. btrfs_set_header_backref_rev(eb_rewin,
  829. btrfs_header_backref_rev(eb));
  830. btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
  831. btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
  832. } else {
  833. eb_rewin = btrfs_clone_extent_buffer(eb);
  834. if (!eb_rewin) {
  835. btrfs_tree_read_unlock(eb);
  836. free_extent_buffer(eb);
  837. return NULL;
  838. }
  839. }
  840. btrfs_tree_read_unlock(eb);
  841. free_extent_buffer(eb);
  842. btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
  843. eb_rewin, btrfs_header_level(eb_rewin));
  844. btrfs_tree_read_lock(eb_rewin);
  845. tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
  846. WARN_ON(btrfs_header_nritems(eb_rewin) >
  847. BTRFS_NODEPTRS_PER_BLOCK(fs_info));
  848. return eb_rewin;
  849. }
  850. /*
  851. * Rewind the state of @root's root node to the given @time_seq value.
  852. * If there are no changes, the current root->root_node is returned. If anything
  853. * changed in between, there's a fresh buffer allocated on which the rewind
  854. * operations are done. In any case, the returned buffer is read locked.
  855. * Returns NULL on error (with no locks held).
  856. */
  857. struct extent_buffer *btrfs_get_old_root(struct btrfs_root *root, u64 time_seq)
  858. {
  859. struct btrfs_fs_info *fs_info = root->fs_info;
  860. struct tree_mod_elem *tm;
  861. struct extent_buffer *eb = NULL;
  862. struct extent_buffer *eb_root;
  863. u64 eb_root_owner = 0;
  864. struct extent_buffer *old;
  865. struct tree_mod_root *old_root = NULL;
  866. u64 old_generation = 0;
  867. u64 logical;
  868. int level;
  869. eb_root = btrfs_read_lock_root_node(root);
  870. tm = tree_mod_log_oldest_root(eb_root, time_seq);
  871. if (!tm)
  872. return eb_root;
  873. if (tm->op == BTRFS_MOD_LOG_ROOT_REPLACE) {
  874. old_root = &tm->old_root;
  875. old_generation = tm->generation;
  876. logical = old_root->logical;
  877. level = old_root->level;
  878. } else {
  879. logical = eb_root->start;
  880. level = btrfs_header_level(eb_root);
  881. }
  882. tm = tree_mod_log_search(fs_info, logical, time_seq);
  883. if (old_root && tm && tm->op != BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
  884. struct btrfs_tree_parent_check check = { 0 };
  885. btrfs_tree_read_unlock(eb_root);
  886. free_extent_buffer(eb_root);
  887. check.level = level;
  888. check.owner_root = btrfs_root_id(root);
  889. old = read_tree_block(fs_info, logical, &check);
  890. if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
  891. if (!IS_ERR(old))
  892. free_extent_buffer(old);
  893. btrfs_warn(fs_info,
  894. "failed to read tree block %llu from get_old_root",
  895. logical);
  896. } else {
  897. struct tree_mod_elem *tm2;
  898. btrfs_tree_read_lock(old);
  899. eb = btrfs_clone_extent_buffer(old);
  900. /*
  901. * After the lookup for the most recent tree mod operation
  902. * above and before we locked and cloned the extent buffer
  903. * 'old', a new tree mod log operation may have been added.
  904. * So lookup for a more recent one to make sure the number
  905. * of mod log operations we replay is consistent with the
  906. * number of items we have in the cloned extent buffer,
  907. * otherwise we can hit a BUG_ON when rewinding the extent
  908. * buffer.
  909. */
  910. tm2 = tree_mod_log_search(fs_info, logical, time_seq);
  911. btrfs_tree_read_unlock(old);
  912. free_extent_buffer(old);
  913. ASSERT(tm2);
  914. ASSERT(tm2 == tm || tm2->seq > tm->seq);
  915. if (!tm2 || tm2->seq < tm->seq) {
  916. free_extent_buffer(eb);
  917. return NULL;
  918. }
  919. tm = tm2;
  920. }
  921. } else if (old_root) {
  922. eb_root_owner = btrfs_header_owner(eb_root);
  923. btrfs_tree_read_unlock(eb_root);
  924. free_extent_buffer(eb_root);
  925. eb = alloc_dummy_extent_buffer(fs_info, logical);
  926. } else {
  927. eb = btrfs_clone_extent_buffer(eb_root);
  928. btrfs_tree_read_unlock(eb_root);
  929. free_extent_buffer(eb_root);
  930. }
  931. if (!eb)
  932. return NULL;
  933. if (old_root) {
  934. btrfs_set_header_bytenr(eb, eb->start);
  935. btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
  936. btrfs_set_header_owner(eb, eb_root_owner);
  937. btrfs_set_header_level(eb, old_root->level);
  938. btrfs_set_header_generation(eb, old_generation);
  939. }
  940. btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
  941. btrfs_header_level(eb));
  942. btrfs_tree_read_lock(eb);
  943. if (tm)
  944. tree_mod_log_rewind(fs_info, eb, time_seq, tm);
  945. else
  946. WARN_ON(btrfs_header_level(eb) != 0);
  947. WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
  948. return eb;
  949. }
  950. int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
  951. {
  952. struct tree_mod_elem *tm;
  953. int level;
  954. struct extent_buffer *eb_root = btrfs_root_node(root);
  955. tm = tree_mod_log_oldest_root(eb_root, time_seq);
  956. if (tm && tm->op == BTRFS_MOD_LOG_ROOT_REPLACE)
  957. level = tm->old_root.level;
  958. else
  959. level = btrfs_header_level(eb_root);
  960. free_extent_buffer(eb_root);
  961. return level;
  962. }
  963. /*
  964. * Return the lowest sequence number in the tree modification log.
  965. *
  966. * Return the sequence number of the oldest tree modification log user, which
  967. * corresponds to the lowest sequence number of all existing users. If there are
  968. * no users it returns 0.
  969. */
  970. u64 btrfs_tree_mod_log_lowest_seq(struct btrfs_fs_info *fs_info)
  971. {
  972. u64 ret = 0;
  973. read_lock(&fs_info->tree_mod_log_lock);
  974. if (!list_empty(&fs_info->tree_mod_seq_list)) {
  975. struct btrfs_seq_list *elem;
  976. elem = list_first_entry(&fs_info->tree_mod_seq_list,
  977. struct btrfs_seq_list, list);
  978. ret = elem->seq;
  979. }
  980. read_unlock(&fs_info->tree_mod_log_lock);
  981. return ret;
  982. }