xfs_exchmaps_item.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (c) 2020-2024 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <djwong@kernel.org>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_format.h"
  9. #include "xfs_log_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_bit.h"
  12. #include "xfs_shared.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_defer.h"
  15. #include "xfs_inode.h"
  16. #include "xfs_trans.h"
  17. #include "xfs_trans_priv.h"
  18. #include "xfs_exchmaps_item.h"
  19. #include "xfs_exchmaps.h"
  20. #include "xfs_log.h"
  21. #include "xfs_bmap.h"
  22. #include "xfs_icache.h"
  23. #include "xfs_bmap_btree.h"
  24. #include "xfs_trans_space.h"
  25. #include "xfs_error.h"
  26. #include "xfs_log_priv.h"
  27. #include "xfs_log_recover.h"
  28. #include "xfs_exchrange.h"
  29. #include "xfs_trace.h"
  30. struct kmem_cache *xfs_xmi_cache;
  31. struct kmem_cache *xfs_xmd_cache;
  32. static const struct xfs_item_ops xfs_xmi_item_ops;
  33. static inline struct xfs_xmi_log_item *XMI_ITEM(struct xfs_log_item *lip)
  34. {
  35. return container_of(lip, struct xfs_xmi_log_item, xmi_item);
  36. }
  37. STATIC void
  38. xfs_xmi_item_free(
  39. struct xfs_xmi_log_item *xmi_lip)
  40. {
  41. kvfree(xmi_lip->xmi_item.li_lv_shadow);
  42. kmem_cache_free(xfs_xmi_cache, xmi_lip);
  43. }
  44. /*
  45. * Freeing the XMI requires that we remove it from the AIL if it has already
  46. * been placed there. However, the XMI may not yet have been placed in the AIL
  47. * when called by xfs_xmi_release() from XMD processing due to the ordering of
  48. * committed vs unpin operations in bulk insert operations. Hence the reference
  49. * count to ensure only the last caller frees the XMI.
  50. */
  51. STATIC void
  52. xfs_xmi_release(
  53. struct xfs_xmi_log_item *xmi_lip)
  54. {
  55. ASSERT(atomic_read(&xmi_lip->xmi_refcount) > 0);
  56. if (atomic_dec_and_test(&xmi_lip->xmi_refcount)) {
  57. xfs_trans_ail_delete(&xmi_lip->xmi_item, 0);
  58. xfs_xmi_item_free(xmi_lip);
  59. }
  60. }
  61. STATIC void
  62. xfs_xmi_item_size(
  63. struct xfs_log_item *lip,
  64. int *nvecs,
  65. int *nbytes)
  66. {
  67. *nvecs += 1;
  68. *nbytes += sizeof(struct xfs_xmi_log_format);
  69. }
  70. /*
  71. * This is called to fill in the vector of log iovecs for the given xmi log
  72. * item. We use only 1 iovec, and we point that at the xmi_log_format structure
  73. * embedded in the xmi item.
  74. */
  75. STATIC void
  76. xfs_xmi_item_format(
  77. struct xfs_log_item *lip,
  78. struct xfs_log_vec *lv)
  79. {
  80. struct xfs_xmi_log_item *xmi_lip = XMI_ITEM(lip);
  81. struct xfs_log_iovec *vecp = NULL;
  82. xmi_lip->xmi_format.xmi_type = XFS_LI_XMI;
  83. xmi_lip->xmi_format.xmi_size = 1;
  84. xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_XMI_FORMAT,
  85. &xmi_lip->xmi_format,
  86. sizeof(struct xfs_xmi_log_format));
  87. }
  88. /*
  89. * The unpin operation is the last place an XMI is manipulated in the log. It
  90. * is either inserted in the AIL or aborted in the event of a log I/O error. In
  91. * either case, the XMI transaction has been successfully committed to make it
  92. * this far. Therefore, we expect whoever committed the XMI to either construct
  93. * and commit the XMD or drop the XMD's reference in the event of error. Simply
  94. * drop the log's XMI reference now that the log is done with it.
  95. */
  96. STATIC void
  97. xfs_xmi_item_unpin(
  98. struct xfs_log_item *lip,
  99. int remove)
  100. {
  101. struct xfs_xmi_log_item *xmi_lip = XMI_ITEM(lip);
  102. xfs_xmi_release(xmi_lip);
  103. }
  104. /*
  105. * The XMI has been either committed or aborted if the transaction has been
  106. * cancelled. If the transaction was cancelled, an XMD isn't going to be
  107. * constructed and thus we free the XMI here directly.
  108. */
  109. STATIC void
  110. xfs_xmi_item_release(
  111. struct xfs_log_item *lip)
  112. {
  113. xfs_xmi_release(XMI_ITEM(lip));
  114. }
  115. /* Allocate and initialize an xmi item. */
  116. STATIC struct xfs_xmi_log_item *
  117. xfs_xmi_init(
  118. struct xfs_mount *mp)
  119. {
  120. struct xfs_xmi_log_item *xmi_lip;
  121. xmi_lip = kmem_cache_zalloc(xfs_xmi_cache, GFP_KERNEL | __GFP_NOFAIL);
  122. xfs_log_item_init(mp, &xmi_lip->xmi_item, XFS_LI_XMI, &xfs_xmi_item_ops);
  123. xmi_lip->xmi_format.xmi_id = (uintptr_t)(void *)xmi_lip;
  124. atomic_set(&xmi_lip->xmi_refcount, 2);
  125. return xmi_lip;
  126. }
  127. static inline struct xfs_xmd_log_item *XMD_ITEM(struct xfs_log_item *lip)
  128. {
  129. return container_of(lip, struct xfs_xmd_log_item, xmd_item);
  130. }
  131. STATIC void
  132. xfs_xmd_item_size(
  133. struct xfs_log_item *lip,
  134. int *nvecs,
  135. int *nbytes)
  136. {
  137. *nvecs += 1;
  138. *nbytes += sizeof(struct xfs_xmd_log_format);
  139. }
  140. /*
  141. * This is called to fill in the vector of log iovecs for the given xmd log
  142. * item. We use only 1 iovec, and we point that at the xmd_log_format structure
  143. * embedded in the xmd item.
  144. */
  145. STATIC void
  146. xfs_xmd_item_format(
  147. struct xfs_log_item *lip,
  148. struct xfs_log_vec *lv)
  149. {
  150. struct xfs_xmd_log_item *xmd_lip = XMD_ITEM(lip);
  151. struct xfs_log_iovec *vecp = NULL;
  152. xmd_lip->xmd_format.xmd_type = XFS_LI_XMD;
  153. xmd_lip->xmd_format.xmd_size = 1;
  154. xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_XMD_FORMAT, &xmd_lip->xmd_format,
  155. sizeof(struct xfs_xmd_log_format));
  156. }
  157. /*
  158. * The XMD is either committed or aborted if the transaction is cancelled. If
  159. * the transaction is cancelled, drop our reference to the XMI and free the
  160. * XMD.
  161. */
  162. STATIC void
  163. xfs_xmd_item_release(
  164. struct xfs_log_item *lip)
  165. {
  166. struct xfs_xmd_log_item *xmd_lip = XMD_ITEM(lip);
  167. xfs_xmi_release(xmd_lip->xmd_intent_log_item);
  168. kvfree(xmd_lip->xmd_item.li_lv_shadow);
  169. kmem_cache_free(xfs_xmd_cache, xmd_lip);
  170. }
  171. static struct xfs_log_item *
  172. xfs_xmd_item_intent(
  173. struct xfs_log_item *lip)
  174. {
  175. return &XMD_ITEM(lip)->xmd_intent_log_item->xmi_item;
  176. }
  177. static const struct xfs_item_ops xfs_xmd_item_ops = {
  178. .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
  179. XFS_ITEM_INTENT_DONE,
  180. .iop_size = xfs_xmd_item_size,
  181. .iop_format = xfs_xmd_item_format,
  182. .iop_release = xfs_xmd_item_release,
  183. .iop_intent = xfs_xmd_item_intent,
  184. };
  185. /* Log file mapping exchange information in the intent item. */
  186. STATIC struct xfs_log_item *
  187. xfs_exchmaps_create_intent(
  188. struct xfs_trans *tp,
  189. struct list_head *items,
  190. unsigned int count,
  191. bool sort)
  192. {
  193. struct xfs_xmi_log_item *xmi_lip;
  194. struct xfs_exchmaps_intent *xmi;
  195. struct xfs_xmi_log_format *xlf;
  196. ASSERT(count == 1);
  197. xmi = list_first_entry_or_null(items, struct xfs_exchmaps_intent,
  198. xmi_list);
  199. xmi_lip = xfs_xmi_init(tp->t_mountp);
  200. xlf = &xmi_lip->xmi_format;
  201. xlf->xmi_inode1 = xmi->xmi_ip1->i_ino;
  202. xlf->xmi_igen1 = VFS_I(xmi->xmi_ip1)->i_generation;
  203. xlf->xmi_inode2 = xmi->xmi_ip2->i_ino;
  204. xlf->xmi_igen2 = VFS_I(xmi->xmi_ip2)->i_generation;
  205. xlf->xmi_startoff1 = xmi->xmi_startoff1;
  206. xlf->xmi_startoff2 = xmi->xmi_startoff2;
  207. xlf->xmi_blockcount = xmi->xmi_blockcount;
  208. xlf->xmi_isize1 = xmi->xmi_isize1;
  209. xlf->xmi_isize2 = xmi->xmi_isize2;
  210. xlf->xmi_flags = xmi->xmi_flags & XFS_EXCHMAPS_LOGGED_FLAGS;
  211. return &xmi_lip->xmi_item;
  212. }
  213. STATIC struct xfs_log_item *
  214. xfs_exchmaps_create_done(
  215. struct xfs_trans *tp,
  216. struct xfs_log_item *intent,
  217. unsigned int count)
  218. {
  219. struct xfs_xmi_log_item *xmi_lip = XMI_ITEM(intent);
  220. struct xfs_xmd_log_item *xmd_lip;
  221. xmd_lip = kmem_cache_zalloc(xfs_xmd_cache, GFP_KERNEL | __GFP_NOFAIL);
  222. xfs_log_item_init(tp->t_mountp, &xmd_lip->xmd_item, XFS_LI_XMD,
  223. &xfs_xmd_item_ops);
  224. xmd_lip->xmd_intent_log_item = xmi_lip;
  225. xmd_lip->xmd_format.xmd_xmi_id = xmi_lip->xmi_format.xmi_id;
  226. return &xmd_lip->xmd_item;
  227. }
  228. /* Add this deferred XMI to the transaction. */
  229. void
  230. xfs_exchmaps_defer_add(
  231. struct xfs_trans *tp,
  232. struct xfs_exchmaps_intent *xmi)
  233. {
  234. trace_xfs_exchmaps_defer(tp->t_mountp, xmi);
  235. xfs_defer_add(tp, &xmi->xmi_list, &xfs_exchmaps_defer_type);
  236. }
  237. static inline struct xfs_exchmaps_intent *xmi_entry(const struct list_head *e)
  238. {
  239. return list_entry(e, struct xfs_exchmaps_intent, xmi_list);
  240. }
  241. /* Cancel a deferred file mapping exchange. */
  242. STATIC void
  243. xfs_exchmaps_cancel_item(
  244. struct list_head *item)
  245. {
  246. struct xfs_exchmaps_intent *xmi = xmi_entry(item);
  247. kmem_cache_free(xfs_exchmaps_intent_cache, xmi);
  248. }
  249. /* Process a deferred file mapping exchange. */
  250. STATIC int
  251. xfs_exchmaps_finish_item(
  252. struct xfs_trans *tp,
  253. struct xfs_log_item *done,
  254. struct list_head *item,
  255. struct xfs_btree_cur **state)
  256. {
  257. struct xfs_exchmaps_intent *xmi = xmi_entry(item);
  258. int error;
  259. /*
  260. * Exchange one more mappings between two files. If there's still more
  261. * work to do, we want to requeue ourselves after all other pending
  262. * deferred operations have finished. This includes all of the dfops
  263. * that we queued directly as well as any new ones created in the
  264. * process of finishing the others. Doing so prevents us from queuing
  265. * a large number of XMI log items in kernel memory, which in turn
  266. * prevents us from pinning the tail of the log (while logging those
  267. * new XMI items) until the first XMI items can be processed.
  268. */
  269. error = xfs_exchmaps_finish_one(tp, xmi);
  270. if (error != -EAGAIN)
  271. xfs_exchmaps_cancel_item(item);
  272. return error;
  273. }
  274. /* Abort all pending XMIs. */
  275. STATIC void
  276. xfs_exchmaps_abort_intent(
  277. struct xfs_log_item *intent)
  278. {
  279. xfs_xmi_release(XMI_ITEM(intent));
  280. }
  281. /* Is this recovered XMI ok? */
  282. static inline bool
  283. xfs_xmi_validate(
  284. struct xfs_mount *mp,
  285. struct xfs_xmi_log_item *xmi_lip)
  286. {
  287. struct xfs_xmi_log_format *xlf = &xmi_lip->xmi_format;
  288. if (!xfs_has_exchange_range(mp))
  289. return false;
  290. if (xmi_lip->xmi_format.__pad != 0)
  291. return false;
  292. if (xlf->xmi_flags & ~XFS_EXCHMAPS_LOGGED_FLAGS)
  293. return false;
  294. if (!xfs_verify_ino(mp, xlf->xmi_inode1) ||
  295. !xfs_verify_ino(mp, xlf->xmi_inode2))
  296. return false;
  297. if (!xfs_verify_fileext(mp, xlf->xmi_startoff1, xlf->xmi_blockcount))
  298. return false;
  299. return xfs_verify_fileext(mp, xlf->xmi_startoff2, xlf->xmi_blockcount);
  300. }
  301. /*
  302. * Use the recovered log state to create a new request, estimate resource
  303. * requirements, and create a new incore intent state.
  304. */
  305. STATIC struct xfs_exchmaps_intent *
  306. xfs_xmi_item_recover_intent(
  307. struct xfs_mount *mp,
  308. struct xfs_defer_pending *dfp,
  309. const struct xfs_xmi_log_format *xlf,
  310. struct xfs_exchmaps_req *req,
  311. struct xfs_inode **ipp1,
  312. struct xfs_inode **ipp2)
  313. {
  314. struct xfs_inode *ip1, *ip2;
  315. struct xfs_exchmaps_intent *xmi;
  316. int error;
  317. /*
  318. * Grab both inodes and set IRECOVERY to prevent trimming of post-eof
  319. * mappings and freeing of unlinked inodes until we're totally done
  320. * processing files. The ondisk format of this new log item contains
  321. * file handle information, which is why recovery for other items do
  322. * not check the inode generation number.
  323. */
  324. error = xlog_recover_iget_handle(mp, xlf->xmi_inode1, xlf->xmi_igen1,
  325. &ip1);
  326. if (error) {
  327. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, xlf,
  328. sizeof(*xlf));
  329. return ERR_PTR(error);
  330. }
  331. error = xlog_recover_iget_handle(mp, xlf->xmi_inode2, xlf->xmi_igen2,
  332. &ip2);
  333. if (error) {
  334. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, xlf,
  335. sizeof(*xlf));
  336. goto err_rele1;
  337. }
  338. req->ip1 = ip1;
  339. req->ip2 = ip2;
  340. req->startoff1 = xlf->xmi_startoff1;
  341. req->startoff2 = xlf->xmi_startoff2;
  342. req->blockcount = xlf->xmi_blockcount;
  343. req->flags = xlf->xmi_flags & XFS_EXCHMAPS_PARAMS;
  344. xfs_exchrange_ilock(NULL, ip1, ip2);
  345. error = xfs_exchmaps_estimate(req);
  346. xfs_exchrange_iunlock(ip1, ip2);
  347. if (error)
  348. goto err_rele2;
  349. *ipp1 = ip1;
  350. *ipp2 = ip2;
  351. xmi = xfs_exchmaps_init_intent(req);
  352. xfs_defer_add_item(dfp, &xmi->xmi_list);
  353. return xmi;
  354. err_rele2:
  355. xfs_irele(ip2);
  356. err_rele1:
  357. xfs_irele(ip1);
  358. req->ip2 = req->ip1 = NULL;
  359. return ERR_PTR(error);
  360. }
  361. /* Process a file mapping exchange item that was recovered from the log. */
  362. STATIC int
  363. xfs_exchmaps_recover_work(
  364. struct xfs_defer_pending *dfp,
  365. struct list_head *capture_list)
  366. {
  367. struct xfs_exchmaps_req req = { .flags = 0 };
  368. struct xfs_trans_res resv;
  369. struct xfs_exchmaps_intent *xmi;
  370. struct xfs_log_item *lip = dfp->dfp_intent;
  371. struct xfs_xmi_log_item *xmi_lip = XMI_ITEM(lip);
  372. struct xfs_mount *mp = lip->li_log->l_mp;
  373. struct xfs_trans *tp;
  374. struct xfs_inode *ip1, *ip2;
  375. int error = 0;
  376. if (!xfs_xmi_validate(mp, xmi_lip)) {
  377. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
  378. &xmi_lip->xmi_format,
  379. sizeof(xmi_lip->xmi_format));
  380. return -EFSCORRUPTED;
  381. }
  382. xmi = xfs_xmi_item_recover_intent(mp, dfp, &xmi_lip->xmi_format, &req,
  383. &ip1, &ip2);
  384. if (IS_ERR(xmi))
  385. return PTR_ERR(xmi);
  386. trace_xfs_exchmaps_recover(mp, xmi);
  387. resv = xlog_recover_resv(&M_RES(mp)->tr_write);
  388. error = xfs_trans_alloc(mp, &resv, req.resblks, 0, 0, &tp);
  389. if (error)
  390. goto err_rele;
  391. xfs_exchrange_ilock(tp, ip1, ip2);
  392. xfs_exchmaps_ensure_reflink(tp, xmi);
  393. xfs_exchmaps_upgrade_extent_counts(tp, xmi);
  394. error = xlog_recover_finish_intent(tp, dfp);
  395. if (error == -EFSCORRUPTED)
  396. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
  397. &xmi_lip->xmi_format,
  398. sizeof(xmi_lip->xmi_format));
  399. if (error)
  400. goto err_cancel;
  401. /*
  402. * Commit transaction, which frees the transaction and saves the inodes
  403. * for later replay activities.
  404. */
  405. error = xfs_defer_ops_capture_and_commit(tp, capture_list);
  406. goto err_unlock;
  407. err_cancel:
  408. xfs_trans_cancel(tp);
  409. err_unlock:
  410. xfs_exchrange_iunlock(ip1, ip2);
  411. err_rele:
  412. xfs_irele(ip2);
  413. xfs_irele(ip1);
  414. return error;
  415. }
  416. /* Relog an intent item to push the log tail forward. */
  417. static struct xfs_log_item *
  418. xfs_exchmaps_relog_intent(
  419. struct xfs_trans *tp,
  420. struct xfs_log_item *intent,
  421. struct xfs_log_item *done_item)
  422. {
  423. struct xfs_xmi_log_item *xmi_lip;
  424. struct xfs_xmi_log_format *old_xlf, *new_xlf;
  425. old_xlf = &XMI_ITEM(intent)->xmi_format;
  426. xmi_lip = xfs_xmi_init(tp->t_mountp);
  427. new_xlf = &xmi_lip->xmi_format;
  428. new_xlf->xmi_inode1 = old_xlf->xmi_inode1;
  429. new_xlf->xmi_inode2 = old_xlf->xmi_inode2;
  430. new_xlf->xmi_igen1 = old_xlf->xmi_igen1;
  431. new_xlf->xmi_igen2 = old_xlf->xmi_igen2;
  432. new_xlf->xmi_startoff1 = old_xlf->xmi_startoff1;
  433. new_xlf->xmi_startoff2 = old_xlf->xmi_startoff2;
  434. new_xlf->xmi_blockcount = old_xlf->xmi_blockcount;
  435. new_xlf->xmi_flags = old_xlf->xmi_flags;
  436. new_xlf->xmi_isize1 = old_xlf->xmi_isize1;
  437. new_xlf->xmi_isize2 = old_xlf->xmi_isize2;
  438. return &xmi_lip->xmi_item;
  439. }
  440. const struct xfs_defer_op_type xfs_exchmaps_defer_type = {
  441. .name = "exchmaps",
  442. .max_items = 1,
  443. .create_intent = xfs_exchmaps_create_intent,
  444. .abort_intent = xfs_exchmaps_abort_intent,
  445. .create_done = xfs_exchmaps_create_done,
  446. .finish_item = xfs_exchmaps_finish_item,
  447. .cancel_item = xfs_exchmaps_cancel_item,
  448. .recover_work = xfs_exchmaps_recover_work,
  449. .relog_intent = xfs_exchmaps_relog_intent,
  450. };
  451. STATIC bool
  452. xfs_xmi_item_match(
  453. struct xfs_log_item *lip,
  454. uint64_t intent_id)
  455. {
  456. return XMI_ITEM(lip)->xmi_format.xmi_id == intent_id;
  457. }
  458. static const struct xfs_item_ops xfs_xmi_item_ops = {
  459. .flags = XFS_ITEM_INTENT,
  460. .iop_size = xfs_xmi_item_size,
  461. .iop_format = xfs_xmi_item_format,
  462. .iop_unpin = xfs_xmi_item_unpin,
  463. .iop_release = xfs_xmi_item_release,
  464. .iop_match = xfs_xmi_item_match,
  465. };
  466. /*
  467. * This routine is called to create an in-core file mapping exchange item from
  468. * the xmi format structure which was logged on disk. It allocates an in-core
  469. * xmi, copies the exchange information from the format structure into it, and
  470. * adds the xmi to the AIL with the given LSN.
  471. */
  472. STATIC int
  473. xlog_recover_xmi_commit_pass2(
  474. struct xlog *log,
  475. struct list_head *buffer_list,
  476. struct xlog_recover_item *item,
  477. xfs_lsn_t lsn)
  478. {
  479. struct xfs_mount *mp = log->l_mp;
  480. struct xfs_xmi_log_item *xmi_lip;
  481. struct xfs_xmi_log_format *xmi_formatp;
  482. size_t len;
  483. len = sizeof(struct xfs_xmi_log_format);
  484. if (item->ri_buf[0].i_len != len) {
  485. XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
  486. return -EFSCORRUPTED;
  487. }
  488. xmi_formatp = item->ri_buf[0].i_addr;
  489. if (xmi_formatp->__pad != 0) {
  490. XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
  491. return -EFSCORRUPTED;
  492. }
  493. xmi_lip = xfs_xmi_init(mp);
  494. memcpy(&xmi_lip->xmi_format, xmi_formatp, len);
  495. xlog_recover_intent_item(log, &xmi_lip->xmi_item, lsn,
  496. &xfs_exchmaps_defer_type);
  497. return 0;
  498. }
  499. const struct xlog_recover_item_ops xlog_xmi_item_ops = {
  500. .item_type = XFS_LI_XMI,
  501. .commit_pass2 = xlog_recover_xmi_commit_pass2,
  502. };
  503. /*
  504. * This routine is called when an XMD format structure is found in a committed
  505. * transaction in the log. Its purpose is to cancel the corresponding XMI if it
  506. * was still in the log. To do this it searches the AIL for the XMI with an id
  507. * equal to that in the XMD format structure. If we find it we drop the XMD
  508. * reference, which removes the XMI from the AIL and frees it.
  509. */
  510. STATIC int
  511. xlog_recover_xmd_commit_pass2(
  512. struct xlog *log,
  513. struct list_head *buffer_list,
  514. struct xlog_recover_item *item,
  515. xfs_lsn_t lsn)
  516. {
  517. struct xfs_xmd_log_format *xmd_formatp;
  518. xmd_formatp = item->ri_buf[0].i_addr;
  519. if (item->ri_buf[0].i_len != sizeof(struct xfs_xmd_log_format)) {
  520. XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
  521. return -EFSCORRUPTED;
  522. }
  523. xlog_recover_release_intent(log, XFS_LI_XMI, xmd_formatp->xmd_xmi_id);
  524. return 0;
  525. }
  526. const struct xlog_recover_item_ops xlog_xmd_item_ops = {
  527. .item_type = XFS_LI_XMD,
  528. .commit_pass2 = xlog_recover_xmd_commit_pass2,
  529. };