xfs_trans_rmap.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2016 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_mount.h"
  13. #include "xfs_defer.h"
  14. #include "xfs_trans.h"
  15. #include "xfs_trans_priv.h"
  16. #include "xfs_rmap_item.h"
  17. #include "xfs_alloc.h"
  18. #include "xfs_rmap.h"
  19. /* Set the map extent flags for this reverse mapping. */
  20. static void
  21. xfs_trans_set_rmap_flags(
  22. struct xfs_map_extent *rmap,
  23. enum xfs_rmap_intent_type type,
  24. int whichfork,
  25. xfs_exntst_t state)
  26. {
  27. rmap->me_flags = 0;
  28. if (state == XFS_EXT_UNWRITTEN)
  29. rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
  30. if (whichfork == XFS_ATTR_FORK)
  31. rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
  32. switch (type) {
  33. case XFS_RMAP_MAP:
  34. rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
  35. break;
  36. case XFS_RMAP_MAP_SHARED:
  37. rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
  38. break;
  39. case XFS_RMAP_UNMAP:
  40. rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
  41. break;
  42. case XFS_RMAP_UNMAP_SHARED:
  43. rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
  44. break;
  45. case XFS_RMAP_CONVERT:
  46. rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
  47. break;
  48. case XFS_RMAP_CONVERT_SHARED:
  49. rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
  50. break;
  51. case XFS_RMAP_ALLOC:
  52. rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
  53. break;
  54. case XFS_RMAP_FREE:
  55. rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
  56. break;
  57. default:
  58. ASSERT(0);
  59. }
  60. }
  61. struct xfs_rud_log_item *
  62. xfs_trans_get_rud(
  63. struct xfs_trans *tp,
  64. struct xfs_rui_log_item *ruip)
  65. {
  66. struct xfs_rud_log_item *rudp;
  67. rudp = xfs_rud_init(tp->t_mountp, ruip);
  68. xfs_trans_add_item(tp, &rudp->rud_item);
  69. return rudp;
  70. }
  71. /*
  72. * Finish an rmap update and log it to the RUD. Note that the transaction is
  73. * marked dirty regardless of whether the rmap update succeeds or fails to
  74. * support the RUI/RUD lifecycle rules.
  75. */
  76. int
  77. xfs_trans_log_finish_rmap_update(
  78. struct xfs_trans *tp,
  79. struct xfs_rud_log_item *rudp,
  80. enum xfs_rmap_intent_type type,
  81. uint64_t owner,
  82. int whichfork,
  83. xfs_fileoff_t startoff,
  84. xfs_fsblock_t startblock,
  85. xfs_filblks_t blockcount,
  86. xfs_exntst_t state,
  87. struct xfs_btree_cur **pcur)
  88. {
  89. int error;
  90. error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
  91. startblock, blockcount, state, pcur);
  92. /*
  93. * Mark the transaction dirty, even on error. This ensures the
  94. * transaction is aborted, which:
  95. *
  96. * 1.) releases the RUI and frees the RUD
  97. * 2.) shuts down the filesystem
  98. */
  99. tp->t_flags |= XFS_TRANS_DIRTY;
  100. set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
  101. return error;
  102. }
  103. /* Sort rmap intents by AG. */
  104. static int
  105. xfs_rmap_update_diff_items(
  106. void *priv,
  107. struct list_head *a,
  108. struct list_head *b)
  109. {
  110. struct xfs_mount *mp = priv;
  111. struct xfs_rmap_intent *ra;
  112. struct xfs_rmap_intent *rb;
  113. ra = container_of(a, struct xfs_rmap_intent, ri_list);
  114. rb = container_of(b, struct xfs_rmap_intent, ri_list);
  115. return XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
  116. XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
  117. }
  118. /* Get an RUI. */
  119. STATIC void *
  120. xfs_rmap_update_create_intent(
  121. struct xfs_trans *tp,
  122. unsigned int count)
  123. {
  124. struct xfs_rui_log_item *ruip;
  125. ASSERT(tp != NULL);
  126. ASSERT(count > 0);
  127. ruip = xfs_rui_init(tp->t_mountp, count);
  128. ASSERT(ruip != NULL);
  129. /*
  130. * Get a log_item_desc to point at the new item.
  131. */
  132. xfs_trans_add_item(tp, &ruip->rui_item);
  133. return ruip;
  134. }
  135. /* Log rmap updates in the intent item. */
  136. STATIC void
  137. xfs_rmap_update_log_item(
  138. struct xfs_trans *tp,
  139. void *intent,
  140. struct list_head *item)
  141. {
  142. struct xfs_rui_log_item *ruip = intent;
  143. struct xfs_rmap_intent *rmap;
  144. uint next_extent;
  145. struct xfs_map_extent *map;
  146. rmap = container_of(item, struct xfs_rmap_intent, ri_list);
  147. tp->t_flags |= XFS_TRANS_DIRTY;
  148. set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
  149. /*
  150. * atomic_inc_return gives us the value after the increment;
  151. * we want to use it as an array index so we need to subtract 1 from
  152. * it.
  153. */
  154. next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
  155. ASSERT(next_extent < ruip->rui_format.rui_nextents);
  156. map = &ruip->rui_format.rui_extents[next_extent];
  157. map->me_owner = rmap->ri_owner;
  158. map->me_startblock = rmap->ri_bmap.br_startblock;
  159. map->me_startoff = rmap->ri_bmap.br_startoff;
  160. map->me_len = rmap->ri_bmap.br_blockcount;
  161. xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
  162. rmap->ri_bmap.br_state);
  163. }
  164. /* Get an RUD so we can process all the deferred rmap updates. */
  165. STATIC void *
  166. xfs_rmap_update_create_done(
  167. struct xfs_trans *tp,
  168. void *intent,
  169. unsigned int count)
  170. {
  171. return xfs_trans_get_rud(tp, intent);
  172. }
  173. /* Process a deferred rmap update. */
  174. STATIC int
  175. xfs_rmap_update_finish_item(
  176. struct xfs_trans *tp,
  177. struct list_head *item,
  178. void *done_item,
  179. void **state)
  180. {
  181. struct xfs_rmap_intent *rmap;
  182. int error;
  183. rmap = container_of(item, struct xfs_rmap_intent, ri_list);
  184. error = xfs_trans_log_finish_rmap_update(tp, done_item,
  185. rmap->ri_type,
  186. rmap->ri_owner, rmap->ri_whichfork,
  187. rmap->ri_bmap.br_startoff,
  188. rmap->ri_bmap.br_startblock,
  189. rmap->ri_bmap.br_blockcount,
  190. rmap->ri_bmap.br_state,
  191. (struct xfs_btree_cur **)state);
  192. kmem_free(rmap);
  193. return error;
  194. }
  195. /* Clean up after processing deferred rmaps. */
  196. STATIC void
  197. xfs_rmap_update_finish_cleanup(
  198. struct xfs_trans *tp,
  199. void *state,
  200. int error)
  201. {
  202. struct xfs_btree_cur *rcur = state;
  203. xfs_rmap_finish_one_cleanup(tp, rcur, error);
  204. }
  205. /* Abort all pending RUIs. */
  206. STATIC void
  207. xfs_rmap_update_abort_intent(
  208. void *intent)
  209. {
  210. xfs_rui_release(intent);
  211. }
  212. /* Cancel a deferred rmap update. */
  213. STATIC void
  214. xfs_rmap_update_cancel_item(
  215. struct list_head *item)
  216. {
  217. struct xfs_rmap_intent *rmap;
  218. rmap = container_of(item, struct xfs_rmap_intent, ri_list);
  219. kmem_free(rmap);
  220. }
  221. static const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
  222. .type = XFS_DEFER_OPS_TYPE_RMAP,
  223. .max_items = XFS_RUI_MAX_FAST_EXTENTS,
  224. .diff_items = xfs_rmap_update_diff_items,
  225. .create_intent = xfs_rmap_update_create_intent,
  226. .abort_intent = xfs_rmap_update_abort_intent,
  227. .log_item = xfs_rmap_update_log_item,
  228. .create_done = xfs_rmap_update_create_done,
  229. .finish_item = xfs_rmap_update_finish_item,
  230. .finish_cleanup = xfs_rmap_update_finish_cleanup,
  231. .cancel_item = xfs_rmap_update_cancel_item,
  232. };
  233. /* Register the deferred op type. */
  234. void
  235. xfs_rmap_update_init_defer_op(void)
  236. {
  237. xfs_defer_init_op_type(&xfs_rmap_update_defer_type);
  238. }