xfs_rmap_item.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2016 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_format.h"
  9. #include "xfs_log_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_bit.h"
  12. #include "xfs_shared.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_defer.h"
  15. #include "xfs_trans.h"
  16. #include "xfs_trans_priv.h"
  17. #include "xfs_rmap_item.h"
  18. #include "xfs_log.h"
  19. #include "xfs_rmap.h"
  20. #include "xfs_error.h"
  21. #include "xfs_log_priv.h"
  22. #include "xfs_log_recover.h"
  23. #include "xfs_ag.h"
  24. #include "xfs_btree.h"
  25. #include "xfs_trace.h"
  26. struct kmem_cache *xfs_rui_cache;
  27. struct kmem_cache *xfs_rud_cache;
  28. static const struct xfs_item_ops xfs_rui_item_ops;
  29. static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
  30. {
  31. return container_of(lip, struct xfs_rui_log_item, rui_item);
  32. }
  33. STATIC void
  34. xfs_rui_item_free(
  35. struct xfs_rui_log_item *ruip)
  36. {
  37. kvfree(ruip->rui_item.li_lv_shadow);
  38. if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
  39. kfree(ruip);
  40. else
  41. kmem_cache_free(xfs_rui_cache, ruip);
  42. }
  43. /*
  44. * Freeing the RUI requires that we remove it from the AIL if it has already
  45. * been placed there. However, the RUI may not yet have been placed in the AIL
  46. * when called by xfs_rui_release() from RUD processing due to the ordering of
  47. * committed vs unpin operations in bulk insert operations. Hence the reference
  48. * count to ensure only the last caller frees the RUI.
  49. */
  50. STATIC void
  51. xfs_rui_release(
  52. struct xfs_rui_log_item *ruip)
  53. {
  54. ASSERT(atomic_read(&ruip->rui_refcount) > 0);
  55. if (!atomic_dec_and_test(&ruip->rui_refcount))
  56. return;
  57. xfs_trans_ail_delete(&ruip->rui_item, 0);
  58. xfs_rui_item_free(ruip);
  59. }
  60. STATIC void
  61. xfs_rui_item_size(
  62. struct xfs_log_item *lip,
  63. int *nvecs,
  64. int *nbytes)
  65. {
  66. struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
  67. *nvecs += 1;
  68. *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
  69. }
  70. /*
  71. * This is called to fill in the vector of log iovecs for the
  72. * given rui log item. We use only 1 iovec, and we point that
  73. * at the rui_log_format structure embedded in the rui item.
  74. * It is at this point that we assert that all of the extent
  75. * slots in the rui item have been filled.
  76. */
  77. STATIC void
  78. xfs_rui_item_format(
  79. struct xfs_log_item *lip,
  80. struct xfs_log_vec *lv)
  81. {
  82. struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
  83. struct xfs_log_iovec *vecp = NULL;
  84. ASSERT(atomic_read(&ruip->rui_next_extent) ==
  85. ruip->rui_format.rui_nextents);
  86. ruip->rui_format.rui_type = XFS_LI_RUI;
  87. ruip->rui_format.rui_size = 1;
  88. xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
  89. xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
  90. }
  91. /*
  92. * The unpin operation is the last place an RUI is manipulated in the log. It is
  93. * either inserted in the AIL or aborted in the event of a log I/O error. In
  94. * either case, the RUI transaction has been successfully committed to make it
  95. * this far. Therefore, we expect whoever committed the RUI to either construct
  96. * and commit the RUD or drop the RUD's reference in the event of error. Simply
  97. * drop the log's RUI reference now that the log is done with it.
  98. */
  99. STATIC void
  100. xfs_rui_item_unpin(
  101. struct xfs_log_item *lip,
  102. int remove)
  103. {
  104. struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
  105. xfs_rui_release(ruip);
  106. }
  107. /*
  108. * The RUI has been either committed or aborted if the transaction has been
  109. * cancelled. If the transaction was cancelled, an RUD isn't going to be
  110. * constructed and thus we free the RUI here directly.
  111. */
  112. STATIC void
  113. xfs_rui_item_release(
  114. struct xfs_log_item *lip)
  115. {
  116. xfs_rui_release(RUI_ITEM(lip));
  117. }
  118. /*
  119. * Allocate and initialize an rui item with the given number of extents.
  120. */
  121. STATIC struct xfs_rui_log_item *
  122. xfs_rui_init(
  123. struct xfs_mount *mp,
  124. uint nextents)
  125. {
  126. struct xfs_rui_log_item *ruip;
  127. ASSERT(nextents > 0);
  128. if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
  129. ruip = kzalloc(xfs_rui_log_item_sizeof(nextents),
  130. GFP_KERNEL | __GFP_NOFAIL);
  131. else
  132. ruip = kmem_cache_zalloc(xfs_rui_cache,
  133. GFP_KERNEL | __GFP_NOFAIL);
  134. xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
  135. ruip->rui_format.rui_nextents = nextents;
  136. ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
  137. atomic_set(&ruip->rui_next_extent, 0);
  138. atomic_set(&ruip->rui_refcount, 2);
  139. return ruip;
  140. }
  141. static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
  142. {
  143. return container_of(lip, struct xfs_rud_log_item, rud_item);
  144. }
  145. STATIC void
  146. xfs_rud_item_size(
  147. struct xfs_log_item *lip,
  148. int *nvecs,
  149. int *nbytes)
  150. {
  151. *nvecs += 1;
  152. *nbytes += sizeof(struct xfs_rud_log_format);
  153. }
  154. /*
  155. * This is called to fill in the vector of log iovecs for the
  156. * given rud log item. We use only 1 iovec, and we point that
  157. * at the rud_log_format structure embedded in the rud item.
  158. * It is at this point that we assert that all of the extent
  159. * slots in the rud item have been filled.
  160. */
  161. STATIC void
  162. xfs_rud_item_format(
  163. struct xfs_log_item *lip,
  164. struct xfs_log_vec *lv)
  165. {
  166. struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
  167. struct xfs_log_iovec *vecp = NULL;
  168. rudp->rud_format.rud_type = XFS_LI_RUD;
  169. rudp->rud_format.rud_size = 1;
  170. xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
  171. sizeof(struct xfs_rud_log_format));
  172. }
  173. /*
  174. * The RUD is either committed or aborted if the transaction is cancelled. If
  175. * the transaction is cancelled, drop our reference to the RUI and free the
  176. * RUD.
  177. */
  178. STATIC void
  179. xfs_rud_item_release(
  180. struct xfs_log_item *lip)
  181. {
  182. struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
  183. xfs_rui_release(rudp->rud_ruip);
  184. kvfree(rudp->rud_item.li_lv_shadow);
  185. kmem_cache_free(xfs_rud_cache, rudp);
  186. }
  187. static struct xfs_log_item *
  188. xfs_rud_item_intent(
  189. struct xfs_log_item *lip)
  190. {
  191. return &RUD_ITEM(lip)->rud_ruip->rui_item;
  192. }
  193. static const struct xfs_item_ops xfs_rud_item_ops = {
  194. .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
  195. XFS_ITEM_INTENT_DONE,
  196. .iop_size = xfs_rud_item_size,
  197. .iop_format = xfs_rud_item_format,
  198. .iop_release = xfs_rud_item_release,
  199. .iop_intent = xfs_rud_item_intent,
  200. };
  201. static inline struct xfs_rmap_intent *ri_entry(const struct list_head *e)
  202. {
  203. return list_entry(e, struct xfs_rmap_intent, ri_list);
  204. }
  205. /* Sort rmap intents by AG. */
  206. static int
  207. xfs_rmap_update_diff_items(
  208. void *priv,
  209. const struct list_head *a,
  210. const struct list_head *b)
  211. {
  212. struct xfs_rmap_intent *ra = ri_entry(a);
  213. struct xfs_rmap_intent *rb = ri_entry(b);
  214. return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
  215. }
  216. /* Log rmap updates in the intent item. */
  217. STATIC void
  218. xfs_rmap_update_log_item(
  219. struct xfs_trans *tp,
  220. struct xfs_rui_log_item *ruip,
  221. struct xfs_rmap_intent *ri)
  222. {
  223. uint next_extent;
  224. struct xfs_map_extent *map;
  225. /*
  226. * atomic_inc_return gives us the value after the increment;
  227. * we want to use it as an array index so we need to subtract 1 from
  228. * it.
  229. */
  230. next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
  231. ASSERT(next_extent < ruip->rui_format.rui_nextents);
  232. map = &ruip->rui_format.rui_extents[next_extent];
  233. map->me_owner = ri->ri_owner;
  234. map->me_startblock = ri->ri_bmap.br_startblock;
  235. map->me_startoff = ri->ri_bmap.br_startoff;
  236. map->me_len = ri->ri_bmap.br_blockcount;
  237. map->me_flags = 0;
  238. if (ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN)
  239. map->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
  240. if (ri->ri_whichfork == XFS_ATTR_FORK)
  241. map->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
  242. switch (ri->ri_type) {
  243. case XFS_RMAP_MAP:
  244. map->me_flags |= XFS_RMAP_EXTENT_MAP;
  245. break;
  246. case XFS_RMAP_MAP_SHARED:
  247. map->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
  248. break;
  249. case XFS_RMAP_UNMAP:
  250. map->me_flags |= XFS_RMAP_EXTENT_UNMAP;
  251. break;
  252. case XFS_RMAP_UNMAP_SHARED:
  253. map->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
  254. break;
  255. case XFS_RMAP_CONVERT:
  256. map->me_flags |= XFS_RMAP_EXTENT_CONVERT;
  257. break;
  258. case XFS_RMAP_CONVERT_SHARED:
  259. map->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
  260. break;
  261. case XFS_RMAP_ALLOC:
  262. map->me_flags |= XFS_RMAP_EXTENT_ALLOC;
  263. break;
  264. case XFS_RMAP_FREE:
  265. map->me_flags |= XFS_RMAP_EXTENT_FREE;
  266. break;
  267. default:
  268. ASSERT(0);
  269. }
  270. }
  271. static struct xfs_log_item *
  272. xfs_rmap_update_create_intent(
  273. struct xfs_trans *tp,
  274. struct list_head *items,
  275. unsigned int count,
  276. bool sort)
  277. {
  278. struct xfs_mount *mp = tp->t_mountp;
  279. struct xfs_rui_log_item *ruip = xfs_rui_init(mp, count);
  280. struct xfs_rmap_intent *ri;
  281. ASSERT(count > 0);
  282. if (sort)
  283. list_sort(mp, items, xfs_rmap_update_diff_items);
  284. list_for_each_entry(ri, items, ri_list)
  285. xfs_rmap_update_log_item(tp, ruip, ri);
  286. return &ruip->rui_item;
  287. }
  288. /* Get an RUD so we can process all the deferred rmap updates. */
  289. static struct xfs_log_item *
  290. xfs_rmap_update_create_done(
  291. struct xfs_trans *tp,
  292. struct xfs_log_item *intent,
  293. unsigned int count)
  294. {
  295. struct xfs_rui_log_item *ruip = RUI_ITEM(intent);
  296. struct xfs_rud_log_item *rudp;
  297. rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL);
  298. xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
  299. &xfs_rud_item_ops);
  300. rudp->rud_ruip = ruip;
  301. rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
  302. return &rudp->rud_item;
  303. }
  304. /* Add this deferred RUI to the transaction. */
  305. void
  306. xfs_rmap_defer_add(
  307. struct xfs_trans *tp,
  308. struct xfs_rmap_intent *ri)
  309. {
  310. struct xfs_mount *mp = tp->t_mountp;
  311. trace_xfs_rmap_defer(mp, ri);
  312. ri->ri_pag = xfs_perag_intent_get(mp, ri->ri_bmap.br_startblock);
  313. xfs_defer_add(tp, &ri->ri_list, &xfs_rmap_update_defer_type);
  314. }
  315. /* Cancel a deferred rmap update. */
  316. STATIC void
  317. xfs_rmap_update_cancel_item(
  318. struct list_head *item)
  319. {
  320. struct xfs_rmap_intent *ri = ri_entry(item);
  321. xfs_perag_intent_put(ri->ri_pag);
  322. kmem_cache_free(xfs_rmap_intent_cache, ri);
  323. }
  324. /* Process a deferred rmap update. */
  325. STATIC int
  326. xfs_rmap_update_finish_item(
  327. struct xfs_trans *tp,
  328. struct xfs_log_item *done,
  329. struct list_head *item,
  330. struct xfs_btree_cur **state)
  331. {
  332. struct xfs_rmap_intent *ri = ri_entry(item);
  333. int error;
  334. error = xfs_rmap_finish_one(tp, ri, state);
  335. xfs_rmap_update_cancel_item(item);
  336. return error;
  337. }
  338. /* Clean up after calling xfs_rmap_finish_one. */
  339. STATIC void
  340. xfs_rmap_finish_one_cleanup(
  341. struct xfs_trans *tp,
  342. struct xfs_btree_cur *rcur,
  343. int error)
  344. {
  345. struct xfs_buf *agbp = NULL;
  346. if (rcur == NULL)
  347. return;
  348. agbp = rcur->bc_ag.agbp;
  349. xfs_btree_del_cursor(rcur, error);
  350. if (error && agbp)
  351. xfs_trans_brelse(tp, agbp);
  352. }
  353. /* Abort all pending RUIs. */
  354. STATIC void
  355. xfs_rmap_update_abort_intent(
  356. struct xfs_log_item *intent)
  357. {
  358. xfs_rui_release(RUI_ITEM(intent));
  359. }
  360. /* Is this recovered RUI ok? */
  361. static inline bool
  362. xfs_rui_validate_map(
  363. struct xfs_mount *mp,
  364. struct xfs_map_extent *map)
  365. {
  366. if (!xfs_has_rmapbt(mp))
  367. return false;
  368. if (map->me_flags & ~XFS_RMAP_EXTENT_FLAGS)
  369. return false;
  370. switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
  371. case XFS_RMAP_EXTENT_MAP:
  372. case XFS_RMAP_EXTENT_MAP_SHARED:
  373. case XFS_RMAP_EXTENT_UNMAP:
  374. case XFS_RMAP_EXTENT_UNMAP_SHARED:
  375. case XFS_RMAP_EXTENT_CONVERT:
  376. case XFS_RMAP_EXTENT_CONVERT_SHARED:
  377. case XFS_RMAP_EXTENT_ALLOC:
  378. case XFS_RMAP_EXTENT_FREE:
  379. break;
  380. default:
  381. return false;
  382. }
  383. if (!XFS_RMAP_NON_INODE_OWNER(map->me_owner) &&
  384. !xfs_verify_ino(mp, map->me_owner))
  385. return false;
  386. if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len))
  387. return false;
  388. return xfs_verify_fsbext(mp, map->me_startblock, map->me_len);
  389. }
  390. static inline void
  391. xfs_rui_recover_work(
  392. struct xfs_mount *mp,
  393. struct xfs_defer_pending *dfp,
  394. const struct xfs_map_extent *map)
  395. {
  396. struct xfs_rmap_intent *ri;
  397. ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
  398. switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
  399. case XFS_RMAP_EXTENT_MAP:
  400. ri->ri_type = XFS_RMAP_MAP;
  401. break;
  402. case XFS_RMAP_EXTENT_MAP_SHARED:
  403. ri->ri_type = XFS_RMAP_MAP_SHARED;
  404. break;
  405. case XFS_RMAP_EXTENT_UNMAP:
  406. ri->ri_type = XFS_RMAP_UNMAP;
  407. break;
  408. case XFS_RMAP_EXTENT_UNMAP_SHARED:
  409. ri->ri_type = XFS_RMAP_UNMAP_SHARED;
  410. break;
  411. case XFS_RMAP_EXTENT_CONVERT:
  412. ri->ri_type = XFS_RMAP_CONVERT;
  413. break;
  414. case XFS_RMAP_EXTENT_CONVERT_SHARED:
  415. ri->ri_type = XFS_RMAP_CONVERT_SHARED;
  416. break;
  417. case XFS_RMAP_EXTENT_ALLOC:
  418. ri->ri_type = XFS_RMAP_ALLOC;
  419. break;
  420. case XFS_RMAP_EXTENT_FREE:
  421. ri->ri_type = XFS_RMAP_FREE;
  422. break;
  423. default:
  424. ASSERT(0);
  425. return;
  426. }
  427. ri->ri_owner = map->me_owner;
  428. ri->ri_whichfork = (map->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
  429. XFS_ATTR_FORK : XFS_DATA_FORK;
  430. ri->ri_bmap.br_startblock = map->me_startblock;
  431. ri->ri_bmap.br_startoff = map->me_startoff;
  432. ri->ri_bmap.br_blockcount = map->me_len;
  433. ri->ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
  434. XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
  435. ri->ri_pag = xfs_perag_intent_get(mp, map->me_startblock);
  436. xfs_defer_add_item(dfp, &ri->ri_list);
  437. }
  438. /*
  439. * Process an rmap update intent item that was recovered from the log.
  440. * We need to update the rmapbt.
  441. */
  442. STATIC int
  443. xfs_rmap_recover_work(
  444. struct xfs_defer_pending *dfp,
  445. struct list_head *capture_list)
  446. {
  447. struct xfs_trans_res resv;
  448. struct xfs_log_item *lip = dfp->dfp_intent;
  449. struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
  450. struct xfs_trans *tp;
  451. struct xfs_mount *mp = lip->li_log->l_mp;
  452. int i;
  453. int error = 0;
  454. /*
  455. * First check the validity of the extents described by the
  456. * RUI. If any are bad, then assume that all are bad and
  457. * just toss the RUI.
  458. */
  459. for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
  460. if (!xfs_rui_validate_map(mp,
  461. &ruip->rui_format.rui_extents[i])) {
  462. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
  463. &ruip->rui_format,
  464. sizeof(ruip->rui_format));
  465. return -EFSCORRUPTED;
  466. }
  467. xfs_rui_recover_work(mp, dfp, &ruip->rui_format.rui_extents[i]);
  468. }
  469. resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
  470. error = xfs_trans_alloc(mp, &resv, mp->m_rmap_maxlevels, 0,
  471. XFS_TRANS_RESERVE, &tp);
  472. if (error)
  473. return error;
  474. error = xlog_recover_finish_intent(tp, dfp);
  475. if (error == -EFSCORRUPTED)
  476. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
  477. &ruip->rui_format,
  478. sizeof(ruip->rui_format));
  479. if (error)
  480. goto abort_error;
  481. return xfs_defer_ops_capture_and_commit(tp, capture_list);
  482. abort_error:
  483. xfs_trans_cancel(tp);
  484. return error;
  485. }
  486. /* Relog an intent item to push the log tail forward. */
  487. static struct xfs_log_item *
  488. xfs_rmap_relog_intent(
  489. struct xfs_trans *tp,
  490. struct xfs_log_item *intent,
  491. struct xfs_log_item *done_item)
  492. {
  493. struct xfs_rui_log_item *ruip;
  494. struct xfs_map_extent *map;
  495. unsigned int count;
  496. count = RUI_ITEM(intent)->rui_format.rui_nextents;
  497. map = RUI_ITEM(intent)->rui_format.rui_extents;
  498. ruip = xfs_rui_init(tp->t_mountp, count);
  499. memcpy(ruip->rui_format.rui_extents, map, count * sizeof(*map));
  500. atomic_set(&ruip->rui_next_extent, count);
  501. return &ruip->rui_item;
  502. }
  503. const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
  504. .name = "rmap",
  505. .max_items = XFS_RUI_MAX_FAST_EXTENTS,
  506. .create_intent = xfs_rmap_update_create_intent,
  507. .abort_intent = xfs_rmap_update_abort_intent,
  508. .create_done = xfs_rmap_update_create_done,
  509. .finish_item = xfs_rmap_update_finish_item,
  510. .finish_cleanup = xfs_rmap_finish_one_cleanup,
  511. .cancel_item = xfs_rmap_update_cancel_item,
  512. .recover_work = xfs_rmap_recover_work,
  513. .relog_intent = xfs_rmap_relog_intent,
  514. };
  515. STATIC bool
  516. xfs_rui_item_match(
  517. struct xfs_log_item *lip,
  518. uint64_t intent_id)
  519. {
  520. return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
  521. }
  522. static const struct xfs_item_ops xfs_rui_item_ops = {
  523. .flags = XFS_ITEM_INTENT,
  524. .iop_size = xfs_rui_item_size,
  525. .iop_format = xfs_rui_item_format,
  526. .iop_unpin = xfs_rui_item_unpin,
  527. .iop_release = xfs_rui_item_release,
  528. .iop_match = xfs_rui_item_match,
  529. };
  530. static inline void
  531. xfs_rui_copy_format(
  532. struct xfs_rui_log_format *dst,
  533. const struct xfs_rui_log_format *src)
  534. {
  535. unsigned int i;
  536. memcpy(dst, src, offsetof(struct xfs_rui_log_format, rui_extents));
  537. for (i = 0; i < src->rui_nextents; i++)
  538. memcpy(&dst->rui_extents[i], &src->rui_extents[i],
  539. sizeof(struct xfs_map_extent));
  540. }
  541. /*
  542. * This routine is called to create an in-core extent rmap update
  543. * item from the rui format structure which was logged on disk.
  544. * It allocates an in-core rui, copies the extents from the format
  545. * structure into it, and adds the rui to the AIL with the given
  546. * LSN.
  547. */
  548. STATIC int
  549. xlog_recover_rui_commit_pass2(
  550. struct xlog *log,
  551. struct list_head *buffer_list,
  552. struct xlog_recover_item *item,
  553. xfs_lsn_t lsn)
  554. {
  555. struct xfs_mount *mp = log->l_mp;
  556. struct xfs_rui_log_item *ruip;
  557. struct xfs_rui_log_format *rui_formatp;
  558. size_t len;
  559. rui_formatp = item->ri_buf[0].i_addr;
  560. if (item->ri_buf[0].i_len < xfs_rui_log_format_sizeof(0)) {
  561. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
  562. item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
  563. return -EFSCORRUPTED;
  564. }
  565. len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents);
  566. if (item->ri_buf[0].i_len != len) {
  567. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
  568. item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
  569. return -EFSCORRUPTED;
  570. }
  571. ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
  572. xfs_rui_copy_format(&ruip->rui_format, rui_formatp);
  573. atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
  574. xlog_recover_intent_item(log, &ruip->rui_item, lsn,
  575. &xfs_rmap_update_defer_type);
  576. return 0;
  577. }
  578. const struct xlog_recover_item_ops xlog_rui_item_ops = {
  579. .item_type = XFS_LI_RUI,
  580. .commit_pass2 = xlog_recover_rui_commit_pass2,
  581. };
  582. /*
  583. * This routine is called when an RUD format structure is found in a committed
  584. * transaction in the log. Its purpose is to cancel the corresponding RUI if it
  585. * was still in the log. To do this it searches the AIL for the RUI with an id
  586. * equal to that in the RUD format structure. If we find it we drop the RUD
  587. * reference, which removes the RUI from the AIL and frees it.
  588. */
  589. STATIC int
  590. xlog_recover_rud_commit_pass2(
  591. struct xlog *log,
  592. struct list_head *buffer_list,
  593. struct xlog_recover_item *item,
  594. xfs_lsn_t lsn)
  595. {
  596. struct xfs_rud_log_format *rud_formatp;
  597. rud_formatp = item->ri_buf[0].i_addr;
  598. if (item->ri_buf[0].i_len != sizeof(struct xfs_rud_log_format)) {
  599. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
  600. rud_formatp, item->ri_buf[0].i_len);
  601. return -EFSCORRUPTED;
  602. }
  603. xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
  604. return 0;
  605. }
  606. const struct xlog_recover_item_ops xlog_rud_item_ops = {
  607. .item_type = XFS_LI_RUD,
  608. .commit_pass2 = xlog_recover_rud_commit_pass2,
  609. };