xfs_drain.h 3.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2022-2023 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <djwong@kernel.org>
  5. */
  6. #ifndef XFS_DRAIN_H_
  7. #define XFS_DRAIN_H_
  8. struct xfs_perag;
  9. #ifdef CONFIG_XFS_DRAIN_INTENTS
  10. /*
  11. * Passive drain mechanism. This data structure tracks a count of some items
  12. * and contains a waitqueue for callers who would like to wake up when the
  13. * count hits zero.
  14. */
  15. struct xfs_defer_drain {
  16. /* Number of items pending in some part of the filesystem. */
  17. atomic_t dr_count;
  18. /* Queue to wait for dri_count to go to zero */
  19. struct wait_queue_head dr_waiters;
  20. };
  21. void xfs_defer_drain_init(struct xfs_defer_drain *dr);
  22. void xfs_defer_drain_free(struct xfs_defer_drain *dr);
  23. void xfs_drain_wait_disable(void);
  24. void xfs_drain_wait_enable(void);
  25. /*
  26. * Deferred Work Intent Drains
  27. * ===========================
  28. *
  29. * When a writer thread executes a chain of log intent items, the AG header
  30. * buffer locks will cycle during a transaction roll to get from one intent
  31. * item to the next in a chain. Although scrub takes all AG header buffer
  32. * locks, this isn't sufficient to guard against scrub checking an AG while
  33. * that writer thread is in the middle of finishing a chain because there's no
  34. * higher level locking primitive guarding allocation groups.
  35. *
  36. * When there's a collision, cross-referencing between data structures (e.g.
  37. * rmapbt and refcountbt) yields false corruption events; if repair is running,
  38. * this results in incorrect repairs, which is catastrophic.
  39. *
  40. * The solution is to the perag structure the count of active intents and make
  41. * scrub wait until it has both AG header buffer locks and the intent counter
  42. * reaches zero. It is therefore critical that deferred work threads hold the
  43. * AGI or AGF buffers when decrementing the intent counter.
  44. *
  45. * Given a list of deferred work items, the deferred work manager will complete
  46. * a work item and all the sub-items that the parent item creates before moving
  47. * on to the next work item in the list. This is also true for all levels of
  48. * sub-items. Writer threads are permitted to queue multiple work items
  49. * targetting the same AG, so a deferred work item (such as a BUI) that creates
  50. * sub-items (such as RUIs) must bump the intent counter and maintain it until
  51. * the sub-items can themselves bump the intent counter.
  52. *
  53. * Therefore, the intent count tracks entire lifetimes of deferred work items.
  54. * All functions that create work items must increment the intent counter as
  55. * soon as the item is added to the transaction and cannot drop the counter
  56. * until the item is finished or cancelled.
  57. */
  58. struct xfs_perag *xfs_perag_intent_get(struct xfs_mount *mp,
  59. xfs_fsblock_t fsbno);
  60. void xfs_perag_intent_put(struct xfs_perag *pag);
  61. void xfs_perag_intent_hold(struct xfs_perag *pag);
  62. void xfs_perag_intent_rele(struct xfs_perag *pag);
  63. int xfs_perag_intent_drain(struct xfs_perag *pag);
  64. bool xfs_perag_intent_busy(struct xfs_perag *pag);
  65. #else
  66. struct xfs_defer_drain { /* empty */ };
  67. #define xfs_defer_drain_free(dr) ((void)0)
  68. #define xfs_defer_drain_init(dr) ((void)0)
  69. #define xfs_perag_intent_get(mp, fsbno) \
  70. xfs_perag_get((mp), XFS_FSB_TO_AGNO(mp, fsbno))
  71. #define xfs_perag_intent_put(pag) xfs_perag_put(pag)
  72. static inline void xfs_perag_intent_hold(struct xfs_perag *pag) { }
  73. static inline void xfs_perag_intent_rele(struct xfs_perag *pag) { }
  74. #endif /* CONFIG_XFS_DRAIN_INTENTS */
  75. #endif /* XFS_DRAIN_H_ */