xfs_refcount_item.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2016 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_format.h"
  9. #include "xfs_log_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_bit.h"
  12. #include "xfs_shared.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_defer.h"
  15. #include "xfs_trans.h"
  16. #include "xfs_trans_priv.h"
  17. #include "xfs_refcount_item.h"
  18. #include "xfs_log.h"
  19. #include "xfs_refcount.h"
  20. #include "xfs_error.h"
  21. #include "xfs_log_priv.h"
  22. #include "xfs_log_recover.h"
  23. #include "xfs_ag.h"
  24. #include "xfs_btree.h"
  25. #include "xfs_trace.h"
  26. struct kmem_cache *xfs_cui_cache;
  27. struct kmem_cache *xfs_cud_cache;
  28. static const struct xfs_item_ops xfs_cui_item_ops;
  29. static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
  30. {
  31. return container_of(lip, struct xfs_cui_log_item, cui_item);
  32. }
  33. STATIC void
  34. xfs_cui_item_free(
  35. struct xfs_cui_log_item *cuip)
  36. {
  37. kvfree(cuip->cui_item.li_lv_shadow);
  38. if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
  39. kfree(cuip);
  40. else
  41. kmem_cache_free(xfs_cui_cache, cuip);
  42. }
  43. /*
  44. * Freeing the CUI requires that we remove it from the AIL if it has already
  45. * been placed there. However, the CUI may not yet have been placed in the AIL
  46. * when called by xfs_cui_release() from CUD processing due to the ordering of
  47. * committed vs unpin operations in bulk insert operations. Hence the reference
  48. * count to ensure only the last caller frees the CUI.
  49. */
  50. STATIC void
  51. xfs_cui_release(
  52. struct xfs_cui_log_item *cuip)
  53. {
  54. ASSERT(atomic_read(&cuip->cui_refcount) > 0);
  55. if (!atomic_dec_and_test(&cuip->cui_refcount))
  56. return;
  57. xfs_trans_ail_delete(&cuip->cui_item, 0);
  58. xfs_cui_item_free(cuip);
  59. }
  60. STATIC void
  61. xfs_cui_item_size(
  62. struct xfs_log_item *lip,
  63. int *nvecs,
  64. int *nbytes)
  65. {
  66. struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
  67. *nvecs += 1;
  68. *nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
  69. }
  70. /*
  71. * This is called to fill in the vector of log iovecs for the
  72. * given cui log item. We use only 1 iovec, and we point that
  73. * at the cui_log_format structure embedded in the cui item.
  74. * It is at this point that we assert that all of the extent
  75. * slots in the cui item have been filled.
  76. */
  77. STATIC void
  78. xfs_cui_item_format(
  79. struct xfs_log_item *lip,
  80. struct xfs_log_vec *lv)
  81. {
  82. struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
  83. struct xfs_log_iovec *vecp = NULL;
  84. ASSERT(atomic_read(&cuip->cui_next_extent) ==
  85. cuip->cui_format.cui_nextents);
  86. cuip->cui_format.cui_type = XFS_LI_CUI;
  87. cuip->cui_format.cui_size = 1;
  88. xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
  89. xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
  90. }
  91. /*
  92. * The unpin operation is the last place an CUI is manipulated in the log. It is
  93. * either inserted in the AIL or aborted in the event of a log I/O error. In
  94. * either case, the CUI transaction has been successfully committed to make it
  95. * this far. Therefore, we expect whoever committed the CUI to either construct
  96. * and commit the CUD or drop the CUD's reference in the event of error. Simply
  97. * drop the log's CUI reference now that the log is done with it.
  98. */
  99. STATIC void
  100. xfs_cui_item_unpin(
  101. struct xfs_log_item *lip,
  102. int remove)
  103. {
  104. struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
  105. xfs_cui_release(cuip);
  106. }
  107. /*
  108. * The CUI has been either committed or aborted if the transaction has been
  109. * cancelled. If the transaction was cancelled, an CUD isn't going to be
  110. * constructed and thus we free the CUI here directly.
  111. */
  112. STATIC void
  113. xfs_cui_item_release(
  114. struct xfs_log_item *lip)
  115. {
  116. xfs_cui_release(CUI_ITEM(lip));
  117. }
  118. /*
  119. * Allocate and initialize an cui item with the given number of extents.
  120. */
  121. STATIC struct xfs_cui_log_item *
  122. xfs_cui_init(
  123. struct xfs_mount *mp,
  124. uint nextents)
  125. {
  126. struct xfs_cui_log_item *cuip;
  127. ASSERT(nextents > 0);
  128. if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
  129. cuip = kzalloc(xfs_cui_log_item_sizeof(nextents),
  130. GFP_KERNEL | __GFP_NOFAIL);
  131. else
  132. cuip = kmem_cache_zalloc(xfs_cui_cache,
  133. GFP_KERNEL | __GFP_NOFAIL);
  134. xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
  135. cuip->cui_format.cui_nextents = nextents;
  136. cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
  137. atomic_set(&cuip->cui_next_extent, 0);
  138. atomic_set(&cuip->cui_refcount, 2);
  139. return cuip;
  140. }
  141. static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
  142. {
  143. return container_of(lip, struct xfs_cud_log_item, cud_item);
  144. }
  145. STATIC void
  146. xfs_cud_item_size(
  147. struct xfs_log_item *lip,
  148. int *nvecs,
  149. int *nbytes)
  150. {
  151. *nvecs += 1;
  152. *nbytes += sizeof(struct xfs_cud_log_format);
  153. }
  154. /*
  155. * This is called to fill in the vector of log iovecs for the
  156. * given cud log item. We use only 1 iovec, and we point that
  157. * at the cud_log_format structure embedded in the cud item.
  158. * It is at this point that we assert that all of the extent
  159. * slots in the cud item have been filled.
  160. */
  161. STATIC void
  162. xfs_cud_item_format(
  163. struct xfs_log_item *lip,
  164. struct xfs_log_vec *lv)
  165. {
  166. struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
  167. struct xfs_log_iovec *vecp = NULL;
  168. cudp->cud_format.cud_type = XFS_LI_CUD;
  169. cudp->cud_format.cud_size = 1;
  170. xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
  171. sizeof(struct xfs_cud_log_format));
  172. }
  173. /*
  174. * The CUD is either committed or aborted if the transaction is cancelled. If
  175. * the transaction is cancelled, drop our reference to the CUI and free the
  176. * CUD.
  177. */
  178. STATIC void
  179. xfs_cud_item_release(
  180. struct xfs_log_item *lip)
  181. {
  182. struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
  183. xfs_cui_release(cudp->cud_cuip);
  184. kvfree(cudp->cud_item.li_lv_shadow);
  185. kmem_cache_free(xfs_cud_cache, cudp);
  186. }
  187. static struct xfs_log_item *
  188. xfs_cud_item_intent(
  189. struct xfs_log_item *lip)
  190. {
  191. return &CUD_ITEM(lip)->cud_cuip->cui_item;
  192. }
  193. static const struct xfs_item_ops xfs_cud_item_ops = {
  194. .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
  195. XFS_ITEM_INTENT_DONE,
  196. .iop_size = xfs_cud_item_size,
  197. .iop_format = xfs_cud_item_format,
  198. .iop_release = xfs_cud_item_release,
  199. .iop_intent = xfs_cud_item_intent,
  200. };
  201. static inline struct xfs_refcount_intent *ci_entry(const struct list_head *e)
  202. {
  203. return list_entry(e, struct xfs_refcount_intent, ri_list);
  204. }
  205. /* Sort refcount intents by AG. */
  206. static int
  207. xfs_refcount_update_diff_items(
  208. void *priv,
  209. const struct list_head *a,
  210. const struct list_head *b)
  211. {
  212. struct xfs_refcount_intent *ra = ci_entry(a);
  213. struct xfs_refcount_intent *rb = ci_entry(b);
  214. return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
  215. }
  216. /* Log refcount updates in the intent item. */
  217. STATIC void
  218. xfs_refcount_update_log_item(
  219. struct xfs_trans *tp,
  220. struct xfs_cui_log_item *cuip,
  221. struct xfs_refcount_intent *ri)
  222. {
  223. uint next_extent;
  224. struct xfs_phys_extent *pmap;
  225. /*
  226. * atomic_inc_return gives us the value after the increment;
  227. * we want to use it as an array index so we need to subtract 1 from
  228. * it.
  229. */
  230. next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
  231. ASSERT(next_extent < cuip->cui_format.cui_nextents);
  232. pmap = &cuip->cui_format.cui_extents[next_extent];
  233. pmap->pe_startblock = ri->ri_startblock;
  234. pmap->pe_len = ri->ri_blockcount;
  235. pmap->pe_flags = 0;
  236. switch (ri->ri_type) {
  237. case XFS_REFCOUNT_INCREASE:
  238. case XFS_REFCOUNT_DECREASE:
  239. case XFS_REFCOUNT_ALLOC_COW:
  240. case XFS_REFCOUNT_FREE_COW:
  241. pmap->pe_flags |= ri->ri_type;
  242. break;
  243. default:
  244. ASSERT(0);
  245. }
  246. }
  247. static struct xfs_log_item *
  248. xfs_refcount_update_create_intent(
  249. struct xfs_trans *tp,
  250. struct list_head *items,
  251. unsigned int count,
  252. bool sort)
  253. {
  254. struct xfs_mount *mp = tp->t_mountp;
  255. struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count);
  256. struct xfs_refcount_intent *ri;
  257. ASSERT(count > 0);
  258. if (sort)
  259. list_sort(mp, items, xfs_refcount_update_diff_items);
  260. list_for_each_entry(ri, items, ri_list)
  261. xfs_refcount_update_log_item(tp, cuip, ri);
  262. return &cuip->cui_item;
  263. }
  264. /* Get an CUD so we can process all the deferred refcount updates. */
  265. static struct xfs_log_item *
  266. xfs_refcount_update_create_done(
  267. struct xfs_trans *tp,
  268. struct xfs_log_item *intent,
  269. unsigned int count)
  270. {
  271. struct xfs_cui_log_item *cuip = CUI_ITEM(intent);
  272. struct xfs_cud_log_item *cudp;
  273. cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL);
  274. xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
  275. &xfs_cud_item_ops);
  276. cudp->cud_cuip = cuip;
  277. cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
  278. return &cudp->cud_item;
  279. }
  280. /* Add this deferred CUI to the transaction. */
  281. void
  282. xfs_refcount_defer_add(
  283. struct xfs_trans *tp,
  284. struct xfs_refcount_intent *ri)
  285. {
  286. struct xfs_mount *mp = tp->t_mountp;
  287. trace_xfs_refcount_defer(mp, ri);
  288. ri->ri_pag = xfs_perag_intent_get(mp, ri->ri_startblock);
  289. xfs_defer_add(tp, &ri->ri_list, &xfs_refcount_update_defer_type);
  290. }
  291. /* Cancel a deferred refcount update. */
  292. STATIC void
  293. xfs_refcount_update_cancel_item(
  294. struct list_head *item)
  295. {
  296. struct xfs_refcount_intent *ri = ci_entry(item);
  297. xfs_perag_intent_put(ri->ri_pag);
  298. kmem_cache_free(xfs_refcount_intent_cache, ri);
  299. }
  300. /* Process a deferred refcount update. */
  301. STATIC int
  302. xfs_refcount_update_finish_item(
  303. struct xfs_trans *tp,
  304. struct xfs_log_item *done,
  305. struct list_head *item,
  306. struct xfs_btree_cur **state)
  307. {
  308. struct xfs_refcount_intent *ri = ci_entry(item);
  309. int error;
  310. /* Did we run out of reservation? Requeue what we didn't finish. */
  311. error = xfs_refcount_finish_one(tp, ri, state);
  312. if (!error && ri->ri_blockcount > 0) {
  313. ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
  314. ri->ri_type == XFS_REFCOUNT_DECREASE);
  315. return -EAGAIN;
  316. }
  317. xfs_refcount_update_cancel_item(item);
  318. return error;
  319. }
  320. /* Clean up after calling xfs_refcount_finish_one. */
  321. STATIC void
  322. xfs_refcount_finish_one_cleanup(
  323. struct xfs_trans *tp,
  324. struct xfs_btree_cur *rcur,
  325. int error)
  326. {
  327. struct xfs_buf *agbp;
  328. if (rcur == NULL)
  329. return;
  330. agbp = rcur->bc_ag.agbp;
  331. xfs_btree_del_cursor(rcur, error);
  332. if (error)
  333. xfs_trans_brelse(tp, agbp);
  334. }
  335. /* Abort all pending CUIs. */
  336. STATIC void
  337. xfs_refcount_update_abort_intent(
  338. struct xfs_log_item *intent)
  339. {
  340. xfs_cui_release(CUI_ITEM(intent));
  341. }
  342. /* Is this recovered CUI ok? */
  343. static inline bool
  344. xfs_cui_validate_phys(
  345. struct xfs_mount *mp,
  346. struct xfs_phys_extent *pmap)
  347. {
  348. if (!xfs_has_reflink(mp))
  349. return false;
  350. if (pmap->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)
  351. return false;
  352. switch (pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
  353. case XFS_REFCOUNT_INCREASE:
  354. case XFS_REFCOUNT_DECREASE:
  355. case XFS_REFCOUNT_ALLOC_COW:
  356. case XFS_REFCOUNT_FREE_COW:
  357. break;
  358. default:
  359. return false;
  360. }
  361. return xfs_verify_fsbext(mp, pmap->pe_startblock, pmap->pe_len);
  362. }
  363. static inline void
  364. xfs_cui_recover_work(
  365. struct xfs_mount *mp,
  366. struct xfs_defer_pending *dfp,
  367. struct xfs_phys_extent *pmap)
  368. {
  369. struct xfs_refcount_intent *ri;
  370. ri = kmem_cache_alloc(xfs_refcount_intent_cache,
  371. GFP_KERNEL | __GFP_NOFAIL);
  372. ri->ri_type = pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
  373. ri->ri_startblock = pmap->pe_startblock;
  374. ri->ri_blockcount = pmap->pe_len;
  375. ri->ri_pag = xfs_perag_intent_get(mp, pmap->pe_startblock);
  376. xfs_defer_add_item(dfp, &ri->ri_list);
  377. }
  378. /*
  379. * Process a refcount update intent item that was recovered from the log.
  380. * We need to update the refcountbt.
  381. */
  382. STATIC int
  383. xfs_refcount_recover_work(
  384. struct xfs_defer_pending *dfp,
  385. struct list_head *capture_list)
  386. {
  387. struct xfs_trans_res resv;
  388. struct xfs_log_item *lip = dfp->dfp_intent;
  389. struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
  390. struct xfs_trans *tp;
  391. struct xfs_mount *mp = lip->li_log->l_mp;
  392. int i;
  393. int error = 0;
  394. /*
  395. * First check the validity of the extents described by the
  396. * CUI. If any are bad, then assume that all are bad and
  397. * just toss the CUI.
  398. */
  399. for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
  400. if (!xfs_cui_validate_phys(mp,
  401. &cuip->cui_format.cui_extents[i])) {
  402. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
  403. &cuip->cui_format,
  404. sizeof(cuip->cui_format));
  405. return -EFSCORRUPTED;
  406. }
  407. xfs_cui_recover_work(mp, dfp, &cuip->cui_format.cui_extents[i]);
  408. }
  409. /*
  410. * Under normal operation, refcount updates are deferred, so we
  411. * wouldn't be adding them directly to a transaction. All
  412. * refcount updates manage reservation usage internally and
  413. * dynamically by deferring work that won't fit in the
  414. * transaction. Normally, any work that needs to be deferred
  415. * gets attached to the same defer_ops that scheduled the
  416. * refcount update. However, we're in log recovery here, so we
  417. * use the passed in defer_ops and to finish up any work that
  418. * doesn't fit. We need to reserve enough blocks to handle a
  419. * full btree split on either end of the refcount range.
  420. */
  421. resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
  422. error = xfs_trans_alloc(mp, &resv, mp->m_refc_maxlevels * 2, 0,
  423. XFS_TRANS_RESERVE, &tp);
  424. if (error)
  425. return error;
  426. error = xlog_recover_finish_intent(tp, dfp);
  427. if (error == -EFSCORRUPTED)
  428. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
  429. &cuip->cui_format,
  430. sizeof(cuip->cui_format));
  431. if (error)
  432. goto abort_error;
  433. return xfs_defer_ops_capture_and_commit(tp, capture_list);
  434. abort_error:
  435. xfs_trans_cancel(tp);
  436. return error;
  437. }
  438. /* Relog an intent item to push the log tail forward. */
  439. static struct xfs_log_item *
  440. xfs_refcount_relog_intent(
  441. struct xfs_trans *tp,
  442. struct xfs_log_item *intent,
  443. struct xfs_log_item *done_item)
  444. {
  445. struct xfs_cui_log_item *cuip;
  446. struct xfs_phys_extent *pmap;
  447. unsigned int count;
  448. count = CUI_ITEM(intent)->cui_format.cui_nextents;
  449. pmap = CUI_ITEM(intent)->cui_format.cui_extents;
  450. cuip = xfs_cui_init(tp->t_mountp, count);
  451. memcpy(cuip->cui_format.cui_extents, pmap, count * sizeof(*pmap));
  452. atomic_set(&cuip->cui_next_extent, count);
  453. return &cuip->cui_item;
  454. }
  455. const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
  456. .name = "refcount",
  457. .max_items = XFS_CUI_MAX_FAST_EXTENTS,
  458. .create_intent = xfs_refcount_update_create_intent,
  459. .abort_intent = xfs_refcount_update_abort_intent,
  460. .create_done = xfs_refcount_update_create_done,
  461. .finish_item = xfs_refcount_update_finish_item,
  462. .finish_cleanup = xfs_refcount_finish_one_cleanup,
  463. .cancel_item = xfs_refcount_update_cancel_item,
  464. .recover_work = xfs_refcount_recover_work,
  465. .relog_intent = xfs_refcount_relog_intent,
  466. };
  467. STATIC bool
  468. xfs_cui_item_match(
  469. struct xfs_log_item *lip,
  470. uint64_t intent_id)
  471. {
  472. return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
  473. }
  474. static const struct xfs_item_ops xfs_cui_item_ops = {
  475. .flags = XFS_ITEM_INTENT,
  476. .iop_size = xfs_cui_item_size,
  477. .iop_format = xfs_cui_item_format,
  478. .iop_unpin = xfs_cui_item_unpin,
  479. .iop_release = xfs_cui_item_release,
  480. .iop_match = xfs_cui_item_match,
  481. };
  482. static inline void
  483. xfs_cui_copy_format(
  484. struct xfs_cui_log_format *dst,
  485. const struct xfs_cui_log_format *src)
  486. {
  487. unsigned int i;
  488. memcpy(dst, src, offsetof(struct xfs_cui_log_format, cui_extents));
  489. for (i = 0; i < src->cui_nextents; i++)
  490. memcpy(&dst->cui_extents[i], &src->cui_extents[i],
  491. sizeof(struct xfs_phys_extent));
  492. }
  493. /*
  494. * This routine is called to create an in-core extent refcount update
  495. * item from the cui format structure which was logged on disk.
  496. * It allocates an in-core cui, copies the extents from the format
  497. * structure into it, and adds the cui to the AIL with the given
  498. * LSN.
  499. */
  500. STATIC int
  501. xlog_recover_cui_commit_pass2(
  502. struct xlog *log,
  503. struct list_head *buffer_list,
  504. struct xlog_recover_item *item,
  505. xfs_lsn_t lsn)
  506. {
  507. struct xfs_mount *mp = log->l_mp;
  508. struct xfs_cui_log_item *cuip;
  509. struct xfs_cui_log_format *cui_formatp;
  510. size_t len;
  511. cui_formatp = item->ri_buf[0].i_addr;
  512. if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) {
  513. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
  514. item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
  515. return -EFSCORRUPTED;
  516. }
  517. len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents);
  518. if (item->ri_buf[0].i_len != len) {
  519. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
  520. item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
  521. return -EFSCORRUPTED;
  522. }
  523. cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
  524. xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
  525. atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
  526. xlog_recover_intent_item(log, &cuip->cui_item, lsn,
  527. &xfs_refcount_update_defer_type);
  528. return 0;
  529. }
  530. const struct xlog_recover_item_ops xlog_cui_item_ops = {
  531. .item_type = XFS_LI_CUI,
  532. .commit_pass2 = xlog_recover_cui_commit_pass2,
  533. };
  534. /*
  535. * This routine is called when an CUD format structure is found in a committed
  536. * transaction in the log. Its purpose is to cancel the corresponding CUI if it
  537. * was still in the log. To do this it searches the AIL for the CUI with an id
  538. * equal to that in the CUD format structure. If we find it we drop the CUD
  539. * reference, which removes the CUI from the AIL and frees it.
  540. */
  541. STATIC int
  542. xlog_recover_cud_commit_pass2(
  543. struct xlog *log,
  544. struct list_head *buffer_list,
  545. struct xlog_recover_item *item,
  546. xfs_lsn_t lsn)
  547. {
  548. struct xfs_cud_log_format *cud_formatp;
  549. cud_formatp = item->ri_buf[0].i_addr;
  550. if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
  551. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
  552. item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
  553. return -EFSCORRUPTED;
  554. }
  555. xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
  556. return 0;
  557. }
  558. const struct xlog_recover_item_ops xlog_cud_item_ops = {
  559. .item_type = XFS_LI_CUD,
  560. .commit_pass2 = xlog_recover_cud_commit_pass2,
  561. };