xfs_extent_busy.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
  4. * Copyright (c) 2010 David Chinner.
  5. * Copyright (c) 2011 Christoph Hellwig.
  6. * All Rights Reserved.
  7. */
  8. #include "xfs.h"
  9. #include "xfs_fs.h"
  10. #include "xfs_format.h"
  11. #include "xfs_log_format.h"
  12. #include "xfs_shared.h"
  13. #include "xfs_trans_resv.h"
  14. #include "xfs_mount.h"
  15. #include "xfs_alloc.h"
  16. #include "xfs_extent_busy.h"
  17. #include "xfs_trace.h"
  18. #include "xfs_trans.h"
  19. #include "xfs_log.h"
  20. #include "xfs_ag.h"
  21. static void
  22. xfs_extent_busy_insert_list(
  23. struct xfs_perag *pag,
  24. xfs_agblock_t bno,
  25. xfs_extlen_t len,
  26. unsigned int flags,
  27. struct list_head *busy_list)
  28. {
  29. struct xfs_extent_busy *new;
  30. struct xfs_extent_busy *busyp;
  31. struct rb_node **rbp;
  32. struct rb_node *parent = NULL;
  33. new = kzalloc(sizeof(struct xfs_extent_busy),
  34. GFP_KERNEL | __GFP_NOFAIL);
  35. new->agno = pag->pag_agno;
  36. new->bno = bno;
  37. new->length = len;
  38. INIT_LIST_HEAD(&new->list);
  39. new->flags = flags;
  40. /* trace before insert to be able to see failed inserts */
  41. trace_xfs_extent_busy(pag->pag_mount, pag->pag_agno, bno, len);
  42. spin_lock(&pag->pagb_lock);
  43. rbp = &pag->pagb_tree.rb_node;
  44. while (*rbp) {
  45. parent = *rbp;
  46. busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
  47. if (new->bno < busyp->bno) {
  48. rbp = &(*rbp)->rb_left;
  49. ASSERT(new->bno + new->length <= busyp->bno);
  50. } else if (new->bno > busyp->bno) {
  51. rbp = &(*rbp)->rb_right;
  52. ASSERT(bno >= busyp->bno + busyp->length);
  53. } else {
  54. ASSERT(0);
  55. }
  56. }
  57. rb_link_node(&new->rb_node, parent, rbp);
  58. rb_insert_color(&new->rb_node, &pag->pagb_tree);
  59. /* always process discard lists in fifo order */
  60. list_add_tail(&new->list, busy_list);
  61. spin_unlock(&pag->pagb_lock);
  62. }
  63. void
  64. xfs_extent_busy_insert(
  65. struct xfs_trans *tp,
  66. struct xfs_perag *pag,
  67. xfs_agblock_t bno,
  68. xfs_extlen_t len,
  69. unsigned int flags)
  70. {
  71. xfs_extent_busy_insert_list(pag, bno, len, flags, &tp->t_busy);
  72. }
  73. void
  74. xfs_extent_busy_insert_discard(
  75. struct xfs_perag *pag,
  76. xfs_agblock_t bno,
  77. xfs_extlen_t len,
  78. struct list_head *busy_list)
  79. {
  80. xfs_extent_busy_insert_list(pag, bno, len, XFS_EXTENT_BUSY_DISCARDED,
  81. busy_list);
  82. }
  83. /*
  84. * Search for a busy extent within the range of the extent we are about to
  85. * allocate. You need to be holding the busy extent tree lock when calling
  86. * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
  87. * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
  88. * match. This is done so that a non-zero return indicates an overlap that
  89. * will require a synchronous transaction, but it can still be
  90. * used to distinguish between a partial or exact match.
  91. */
  92. int
  93. xfs_extent_busy_search(
  94. struct xfs_mount *mp,
  95. struct xfs_perag *pag,
  96. xfs_agblock_t bno,
  97. xfs_extlen_t len)
  98. {
  99. struct rb_node *rbp;
  100. struct xfs_extent_busy *busyp;
  101. int match = 0;
  102. /* find closest start bno overlap */
  103. spin_lock(&pag->pagb_lock);
  104. rbp = pag->pagb_tree.rb_node;
  105. while (rbp) {
  106. busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
  107. if (bno < busyp->bno) {
  108. /* may overlap, but exact start block is lower */
  109. if (bno + len > busyp->bno)
  110. match = -1;
  111. rbp = rbp->rb_left;
  112. } else if (bno > busyp->bno) {
  113. /* may overlap, but exact start block is higher */
  114. if (bno < busyp->bno + busyp->length)
  115. match = -1;
  116. rbp = rbp->rb_right;
  117. } else {
  118. /* bno matches busyp, length determines exact match */
  119. match = (busyp->length == len) ? 1 : -1;
  120. break;
  121. }
  122. }
  123. spin_unlock(&pag->pagb_lock);
  124. return match;
  125. }
  126. /*
  127. * The found free extent [fbno, fend] overlaps part or all of the given busy
  128. * extent. If the overlap covers the beginning, the end, or all of the busy
  129. * extent, the overlapping portion can be made unbusy and used for the
  130. * allocation. We can't split a busy extent because we can't modify a
  131. * transaction/CIL context busy list, but we can update an entry's block
  132. * number or length.
  133. *
  134. * Returns true if the extent can safely be reused, or false if the search
  135. * needs to be restarted.
  136. */
  137. STATIC bool
  138. xfs_extent_busy_update_extent(
  139. struct xfs_mount *mp,
  140. struct xfs_perag *pag,
  141. struct xfs_extent_busy *busyp,
  142. xfs_agblock_t fbno,
  143. xfs_extlen_t flen,
  144. bool userdata) __releases(&pag->pagb_lock)
  145. __acquires(&pag->pagb_lock)
  146. {
  147. xfs_agblock_t fend = fbno + flen;
  148. xfs_agblock_t bbno = busyp->bno;
  149. xfs_agblock_t bend = bbno + busyp->length;
  150. /*
  151. * This extent is currently being discarded. Give the thread
  152. * performing the discard a chance to mark the extent unbusy
  153. * and retry.
  154. */
  155. if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
  156. spin_unlock(&pag->pagb_lock);
  157. delay(1);
  158. spin_lock(&pag->pagb_lock);
  159. return false;
  160. }
  161. /*
  162. * If there is a busy extent overlapping a user allocation, we have
  163. * no choice but to force the log and retry the search.
  164. *
  165. * Fortunately this does not happen during normal operation, but
  166. * only if the filesystem is very low on space and has to dip into
  167. * the AGFL for normal allocations.
  168. */
  169. if (userdata)
  170. goto out_force_log;
  171. if (bbno < fbno && bend > fend) {
  172. /*
  173. * Case 1:
  174. * bbno bend
  175. * +BBBBBBBBBBBBBBBBB+
  176. * +---------+
  177. * fbno fend
  178. */
  179. /*
  180. * We would have to split the busy extent to be able to track
  181. * it correct, which we cannot do because we would have to
  182. * modify the list of busy extents attached to the transaction
  183. * or CIL context, which is immutable.
  184. *
  185. * Force out the log to clear the busy extent and retry the
  186. * search.
  187. */
  188. goto out_force_log;
  189. } else if (bbno >= fbno && bend <= fend) {
  190. /*
  191. * Case 2:
  192. * bbno bend
  193. * +BBBBBBBBBBBBBBBBB+
  194. * +-----------------+
  195. * fbno fend
  196. *
  197. * Case 3:
  198. * bbno bend
  199. * +BBBBBBBBBBBBBBBBB+
  200. * +--------------------------+
  201. * fbno fend
  202. *
  203. * Case 4:
  204. * bbno bend
  205. * +BBBBBBBBBBBBBBBBB+
  206. * +--------------------------+
  207. * fbno fend
  208. *
  209. * Case 5:
  210. * bbno bend
  211. * +BBBBBBBBBBBBBBBBB+
  212. * +-----------------------------------+
  213. * fbno fend
  214. *
  215. */
  216. /*
  217. * The busy extent is fully covered by the extent we are
  218. * allocating, and can simply be removed from the rbtree.
  219. * However we cannot remove it from the immutable list
  220. * tracking busy extents in the transaction or CIL context,
  221. * so set the length to zero to mark it invalid.
  222. *
  223. * We also need to restart the busy extent search from the
  224. * tree root, because erasing the node can rearrange the
  225. * tree topology.
  226. */
  227. rb_erase(&busyp->rb_node, &pag->pagb_tree);
  228. busyp->length = 0;
  229. return false;
  230. } else if (fend < bend) {
  231. /*
  232. * Case 6:
  233. * bbno bend
  234. * +BBBBBBBBBBBBBBBBB+
  235. * +---------+
  236. * fbno fend
  237. *
  238. * Case 7:
  239. * bbno bend
  240. * +BBBBBBBBBBBBBBBBB+
  241. * +------------------+
  242. * fbno fend
  243. *
  244. */
  245. busyp->bno = fend;
  246. busyp->length = bend - fend;
  247. } else if (bbno < fbno) {
  248. /*
  249. * Case 8:
  250. * bbno bend
  251. * +BBBBBBBBBBBBBBBBB+
  252. * +-------------+
  253. * fbno fend
  254. *
  255. * Case 9:
  256. * bbno bend
  257. * +BBBBBBBBBBBBBBBBB+
  258. * +----------------------+
  259. * fbno fend
  260. */
  261. busyp->length = fbno - busyp->bno;
  262. } else {
  263. ASSERT(0);
  264. }
  265. trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
  266. return true;
  267. out_force_log:
  268. spin_unlock(&pag->pagb_lock);
  269. xfs_log_force(mp, XFS_LOG_SYNC);
  270. trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
  271. spin_lock(&pag->pagb_lock);
  272. return false;
  273. }
  274. /*
  275. * For a given extent [fbno, flen], make sure we can reuse it safely.
  276. */
  277. void
  278. xfs_extent_busy_reuse(
  279. struct xfs_mount *mp,
  280. struct xfs_perag *pag,
  281. xfs_agblock_t fbno,
  282. xfs_extlen_t flen,
  283. bool userdata)
  284. {
  285. struct rb_node *rbp;
  286. ASSERT(flen > 0);
  287. spin_lock(&pag->pagb_lock);
  288. restart:
  289. rbp = pag->pagb_tree.rb_node;
  290. while (rbp) {
  291. struct xfs_extent_busy *busyp =
  292. rb_entry(rbp, struct xfs_extent_busy, rb_node);
  293. xfs_agblock_t bbno = busyp->bno;
  294. xfs_agblock_t bend = bbno + busyp->length;
  295. if (fbno + flen <= bbno) {
  296. rbp = rbp->rb_left;
  297. continue;
  298. } else if (fbno >= bend) {
  299. rbp = rbp->rb_right;
  300. continue;
  301. }
  302. if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
  303. userdata))
  304. goto restart;
  305. }
  306. spin_unlock(&pag->pagb_lock);
  307. }
  308. /*
  309. * For a given extent [fbno, flen], search the busy extent list to find a
  310. * subset of the extent that is not busy. If *rlen is smaller than
  311. * args->minlen no suitable extent could be found, and the higher level
  312. * code needs to force out the log and retry the allocation.
  313. *
  314. * Return the current busy generation for the AG if the extent is busy. This
  315. * value can be used to wait for at least one of the currently busy extents
  316. * to be cleared. Note that the busy list is not guaranteed to be empty after
  317. * the gen is woken. The state of a specific extent must always be confirmed
  318. * with another call to xfs_extent_busy_trim() before it can be used.
  319. */
  320. bool
  321. xfs_extent_busy_trim(
  322. struct xfs_alloc_arg *args,
  323. xfs_agblock_t *bno,
  324. xfs_extlen_t *len,
  325. unsigned *busy_gen)
  326. {
  327. xfs_agblock_t fbno;
  328. xfs_extlen_t flen;
  329. struct rb_node *rbp;
  330. bool ret = false;
  331. ASSERT(*len > 0);
  332. spin_lock(&args->pag->pagb_lock);
  333. fbno = *bno;
  334. flen = *len;
  335. rbp = args->pag->pagb_tree.rb_node;
  336. while (rbp && flen >= args->minlen) {
  337. struct xfs_extent_busy *busyp =
  338. rb_entry(rbp, struct xfs_extent_busy, rb_node);
  339. xfs_agblock_t fend = fbno + flen;
  340. xfs_agblock_t bbno = busyp->bno;
  341. xfs_agblock_t bend = bbno + busyp->length;
  342. if (fend <= bbno) {
  343. rbp = rbp->rb_left;
  344. continue;
  345. } else if (fbno >= bend) {
  346. rbp = rbp->rb_right;
  347. continue;
  348. }
  349. if (bbno <= fbno) {
  350. /* start overlap */
  351. /*
  352. * Case 1:
  353. * bbno bend
  354. * +BBBBBBBBBBBBBBBBB+
  355. * +---------+
  356. * fbno fend
  357. *
  358. * Case 2:
  359. * bbno bend
  360. * +BBBBBBBBBBBBBBBBB+
  361. * +-------------+
  362. * fbno fend
  363. *
  364. * Case 3:
  365. * bbno bend
  366. * +BBBBBBBBBBBBBBBBB+
  367. * +-------------+
  368. * fbno fend
  369. *
  370. * Case 4:
  371. * bbno bend
  372. * +BBBBBBBBBBBBBBBBB+
  373. * +-----------------+
  374. * fbno fend
  375. *
  376. * No unbusy region in extent, return failure.
  377. */
  378. if (fend <= bend)
  379. goto fail;
  380. /*
  381. * Case 5:
  382. * bbno bend
  383. * +BBBBBBBBBBBBBBBBB+
  384. * +----------------------+
  385. * fbno fend
  386. *
  387. * Case 6:
  388. * bbno bend
  389. * +BBBBBBBBBBBBBBBBB+
  390. * +--------------------------+
  391. * fbno fend
  392. *
  393. * Needs to be trimmed to:
  394. * +-------+
  395. * fbno fend
  396. */
  397. fbno = bend;
  398. } else if (bend >= fend) {
  399. /* end overlap */
  400. /*
  401. * Case 7:
  402. * bbno bend
  403. * +BBBBBBBBBBBBBBBBB+
  404. * +------------------+
  405. * fbno fend
  406. *
  407. * Case 8:
  408. * bbno bend
  409. * +BBBBBBBBBBBBBBBBB+
  410. * +--------------------------+
  411. * fbno fend
  412. *
  413. * Needs to be trimmed to:
  414. * +-------+
  415. * fbno fend
  416. */
  417. fend = bbno;
  418. } else {
  419. /* middle overlap */
  420. /*
  421. * Case 9:
  422. * bbno bend
  423. * +BBBBBBBBBBBBBBBBB+
  424. * +-----------------------------------+
  425. * fbno fend
  426. *
  427. * Can be trimmed to:
  428. * +-------+ OR +-------+
  429. * fbno fend fbno fend
  430. *
  431. * Backward allocation leads to significant
  432. * fragmentation of directories, which degrades
  433. * directory performance, therefore we always want to
  434. * choose the option that produces forward allocation
  435. * patterns.
  436. * Preferring the lower bno extent will make the next
  437. * request use "fend" as the start of the next
  438. * allocation; if the segment is no longer busy at
  439. * that point, we'll get a contiguous allocation, but
  440. * even if it is still busy, we will get a forward
  441. * allocation.
  442. * We try to avoid choosing the segment at "bend",
  443. * because that can lead to the next allocation
  444. * taking the segment at "fbno", which would be a
  445. * backward allocation. We only use the segment at
  446. * "fbno" if it is much larger than the current
  447. * requested size, because in that case there's a
  448. * good chance subsequent allocations will be
  449. * contiguous.
  450. */
  451. if (bbno - fbno >= args->maxlen) {
  452. /* left candidate fits perfect */
  453. fend = bbno;
  454. } else if (fend - bend >= args->maxlen * 4) {
  455. /* right candidate has enough free space */
  456. fbno = bend;
  457. } else if (bbno - fbno >= args->minlen) {
  458. /* left candidate fits minimum requirement */
  459. fend = bbno;
  460. } else {
  461. goto fail;
  462. }
  463. }
  464. flen = fend - fbno;
  465. }
  466. out:
  467. if (fbno != *bno || flen != *len) {
  468. trace_xfs_extent_busy_trim(args->mp, args->agno, *bno, *len,
  469. fbno, flen);
  470. *bno = fbno;
  471. *len = flen;
  472. *busy_gen = args->pag->pagb_gen;
  473. ret = true;
  474. }
  475. spin_unlock(&args->pag->pagb_lock);
  476. return ret;
  477. fail:
  478. /*
  479. * Return a zero extent length as failure indications. All callers
  480. * re-check if the trimmed extent satisfies the minlen requirement.
  481. */
  482. flen = 0;
  483. goto out;
  484. }
  485. static bool
  486. xfs_extent_busy_clear_one(
  487. struct xfs_perag *pag,
  488. struct xfs_extent_busy *busyp,
  489. bool do_discard)
  490. {
  491. if (busyp->length) {
  492. if (do_discard &&
  493. !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
  494. busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
  495. return false;
  496. }
  497. trace_xfs_extent_busy_clear(pag->pag_mount, busyp->agno,
  498. busyp->bno, busyp->length);
  499. rb_erase(&busyp->rb_node, &pag->pagb_tree);
  500. }
  501. list_del_init(&busyp->list);
  502. kfree(busyp);
  503. return true;
  504. }
  505. /*
  506. * Remove all extents on the passed in list from the busy extents tree.
  507. * If do_discard is set skip extents that need to be discarded, and mark
  508. * these as undergoing a discard operation instead.
  509. */
  510. void
  511. xfs_extent_busy_clear(
  512. struct xfs_mount *mp,
  513. struct list_head *list,
  514. bool do_discard)
  515. {
  516. struct xfs_extent_busy *busyp, *next;
  517. busyp = list_first_entry_or_null(list, typeof(*busyp), list);
  518. if (!busyp)
  519. return;
  520. do {
  521. bool wakeup = false;
  522. struct xfs_perag *pag;
  523. pag = xfs_perag_get(mp, busyp->agno);
  524. spin_lock(&pag->pagb_lock);
  525. do {
  526. next = list_next_entry(busyp, list);
  527. if (xfs_extent_busy_clear_one(pag, busyp, do_discard))
  528. wakeup = true;
  529. busyp = next;
  530. } while (!list_entry_is_head(busyp, list, list) &&
  531. busyp->agno == pag->pag_agno);
  532. if (wakeup) {
  533. pag->pagb_gen++;
  534. wake_up_all(&pag->pagb_wait);
  535. }
  536. spin_unlock(&pag->pagb_lock);
  537. xfs_perag_put(pag);
  538. } while (!list_entry_is_head(busyp, list, list));
  539. }
  540. /*
  541. * Flush out all busy extents for this AG.
  542. *
  543. * If the current transaction is holding busy extents, the caller may not want
  544. * to wait for committed busy extents to resolve. If we are being told just to
  545. * try a flush or progress has been made since we last skipped a busy extent,
  546. * return immediately to allow the caller to try again.
  547. *
  548. * If we are freeing extents, we might actually be holding the only free extents
  549. * in the transaction busy list and the log force won't resolve that situation.
  550. * In this case, we must return -EAGAIN to avoid a deadlock by informing the
  551. * caller it needs to commit the busy extents it holds before retrying the
  552. * extent free operation.
  553. */
  554. int
  555. xfs_extent_busy_flush(
  556. struct xfs_trans *tp,
  557. struct xfs_perag *pag,
  558. unsigned busy_gen,
  559. uint32_t alloc_flags)
  560. {
  561. DEFINE_WAIT (wait);
  562. int error;
  563. error = xfs_log_force(tp->t_mountp, XFS_LOG_SYNC);
  564. if (error)
  565. return error;
  566. /* Avoid deadlocks on uncommitted busy extents. */
  567. if (!list_empty(&tp->t_busy)) {
  568. if (alloc_flags & XFS_ALLOC_FLAG_TRYFLUSH)
  569. return 0;
  570. if (busy_gen != READ_ONCE(pag->pagb_gen))
  571. return 0;
  572. if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
  573. return -EAGAIN;
  574. }
  575. /* Wait for committed busy extents to resolve. */
  576. do {
  577. prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
  578. if (busy_gen != READ_ONCE(pag->pagb_gen))
  579. break;
  580. schedule();
  581. } while (1);
  582. finish_wait(&pag->pagb_wait, &wait);
  583. return 0;
  584. }
  585. void
  586. xfs_extent_busy_wait_all(
  587. struct xfs_mount *mp)
  588. {
  589. struct xfs_perag *pag;
  590. DEFINE_WAIT (wait);
  591. xfs_agnumber_t agno;
  592. for_each_perag(mp, agno, pag) {
  593. do {
  594. prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
  595. if (RB_EMPTY_ROOT(&pag->pagb_tree))
  596. break;
  597. schedule();
  598. } while (1);
  599. finish_wait(&pag->pagb_wait, &wait);
  600. }
  601. }
  602. /*
  603. * Callback for list_sort to sort busy extents by the AG they reside in.
  604. */
  605. int
  606. xfs_extent_busy_ag_cmp(
  607. void *priv,
  608. const struct list_head *l1,
  609. const struct list_head *l2)
  610. {
  611. struct xfs_extent_busy *b1 =
  612. container_of(l1, struct xfs_extent_busy, list);
  613. struct xfs_extent_busy *b2 =
  614. container_of(l2, struct xfs_extent_busy, list);
  615. s32 diff;
  616. diff = b1->agno - b2->agno;
  617. if (!diff)
  618. diff = b1->bno - b2->bno;
  619. return diff;
  620. }
  621. /* Are there any busy extents in this AG? */
  622. bool
  623. xfs_extent_busy_list_empty(
  624. struct xfs_perag *pag)
  625. {
  626. bool res;
  627. spin_lock(&pag->pagb_lock);
  628. res = RB_EMPTY_ROOT(&pag->pagb_tree);
  629. spin_unlock(&pag->pagb_lock);
  630. return res;
  631. }