xfs_bmap_btree.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_bit.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_defer.h"
  15. #include "xfs_inode.h"
  16. #include "xfs_trans.h"
  17. #include "xfs_inode_item.h"
  18. #include "xfs_alloc.h"
  19. #include "xfs_btree.h"
  20. #include "xfs_bmap_btree.h"
  21. #include "xfs_bmap.h"
  22. #include "xfs_error.h"
  23. #include "xfs_quota.h"
  24. #include "xfs_trace.h"
  25. #include "xfs_cksum.h"
  26. #include "xfs_rmap.h"
  27. /*
  28. * Convert on-disk form of btree root to in-memory form.
  29. */
  30. void
  31. xfs_bmdr_to_bmbt(
  32. struct xfs_inode *ip,
  33. xfs_bmdr_block_t *dblock,
  34. int dblocklen,
  35. struct xfs_btree_block *rblock,
  36. int rblocklen)
  37. {
  38. struct xfs_mount *mp = ip->i_mount;
  39. int dmxr;
  40. xfs_bmbt_key_t *fkp;
  41. __be64 *fpp;
  42. xfs_bmbt_key_t *tkp;
  43. __be64 *tpp;
  44. xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
  45. XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
  46. XFS_BTREE_LONG_PTRS);
  47. rblock->bb_level = dblock->bb_level;
  48. ASSERT(be16_to_cpu(rblock->bb_level) > 0);
  49. rblock->bb_numrecs = dblock->bb_numrecs;
  50. dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
  51. fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
  52. tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
  53. fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
  54. tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
  55. dmxr = be16_to_cpu(dblock->bb_numrecs);
  56. memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
  57. memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
  58. }
  59. void
  60. xfs_bmbt_disk_get_all(
  61. struct xfs_bmbt_rec *rec,
  62. struct xfs_bmbt_irec *irec)
  63. {
  64. uint64_t l0 = get_unaligned_be64(&rec->l0);
  65. uint64_t l1 = get_unaligned_be64(&rec->l1);
  66. irec->br_startoff = (l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
  67. irec->br_startblock = ((l0 & xfs_mask64lo(9)) << 43) | (l1 >> 21);
  68. irec->br_blockcount = l1 & xfs_mask64lo(21);
  69. if (l0 >> (64 - BMBT_EXNTFLAG_BITLEN))
  70. irec->br_state = XFS_EXT_UNWRITTEN;
  71. else
  72. irec->br_state = XFS_EXT_NORM;
  73. }
  74. /*
  75. * Extract the blockcount field from an on disk bmap extent record.
  76. */
  77. xfs_filblks_t
  78. xfs_bmbt_disk_get_blockcount(
  79. xfs_bmbt_rec_t *r)
  80. {
  81. return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
  82. }
  83. /*
  84. * Extract the startoff field from a disk format bmap extent record.
  85. */
  86. xfs_fileoff_t
  87. xfs_bmbt_disk_get_startoff(
  88. xfs_bmbt_rec_t *r)
  89. {
  90. return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
  91. xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
  92. }
  93. /*
  94. * Set all the fields in a bmap extent record from the uncompressed form.
  95. */
  96. void
  97. xfs_bmbt_disk_set_all(
  98. struct xfs_bmbt_rec *r,
  99. struct xfs_bmbt_irec *s)
  100. {
  101. int extent_flag = (s->br_state != XFS_EXT_NORM);
  102. ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN);
  103. ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)));
  104. ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)));
  105. ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)));
  106. put_unaligned_be64(
  107. ((xfs_bmbt_rec_base_t)extent_flag << 63) |
  108. ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
  109. ((xfs_bmbt_rec_base_t)s->br_startblock >> 43), &r->l0);
  110. put_unaligned_be64(
  111. ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
  112. ((xfs_bmbt_rec_base_t)s->br_blockcount &
  113. (xfs_bmbt_rec_base_t)xfs_mask64lo(21)), &r->l1);
  114. }
  115. /*
  116. * Convert in-memory form of btree root to on-disk form.
  117. */
  118. void
  119. xfs_bmbt_to_bmdr(
  120. struct xfs_mount *mp,
  121. struct xfs_btree_block *rblock,
  122. int rblocklen,
  123. xfs_bmdr_block_t *dblock,
  124. int dblocklen)
  125. {
  126. int dmxr;
  127. xfs_bmbt_key_t *fkp;
  128. __be64 *fpp;
  129. xfs_bmbt_key_t *tkp;
  130. __be64 *tpp;
  131. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  132. ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
  133. ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid,
  134. &mp->m_sb.sb_meta_uuid));
  135. ASSERT(rblock->bb_u.l.bb_blkno ==
  136. cpu_to_be64(XFS_BUF_DADDR_NULL));
  137. } else
  138. ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
  139. ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
  140. ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
  141. ASSERT(rblock->bb_level != 0);
  142. dblock->bb_level = rblock->bb_level;
  143. dblock->bb_numrecs = rblock->bb_numrecs;
  144. dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
  145. fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
  146. tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
  147. fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
  148. tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
  149. dmxr = be16_to_cpu(dblock->bb_numrecs);
  150. memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
  151. memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
  152. }
  153. STATIC struct xfs_btree_cur *
  154. xfs_bmbt_dup_cursor(
  155. struct xfs_btree_cur *cur)
  156. {
  157. struct xfs_btree_cur *new;
  158. new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
  159. cur->bc_private.b.ip, cur->bc_private.b.whichfork);
  160. /*
  161. * Copy the firstblock, dfops, and flags values,
  162. * since init cursor doesn't get them.
  163. */
  164. new->bc_private.b.flags = cur->bc_private.b.flags;
  165. return new;
  166. }
  167. STATIC void
  168. xfs_bmbt_update_cursor(
  169. struct xfs_btree_cur *src,
  170. struct xfs_btree_cur *dst)
  171. {
  172. ASSERT((dst->bc_tp->t_firstblock != NULLFSBLOCK) ||
  173. (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
  174. dst->bc_private.b.allocated += src->bc_private.b.allocated;
  175. dst->bc_tp->t_firstblock = src->bc_tp->t_firstblock;
  176. src->bc_private.b.allocated = 0;
  177. }
  178. STATIC int
  179. xfs_bmbt_alloc_block(
  180. struct xfs_btree_cur *cur,
  181. union xfs_btree_ptr *start,
  182. union xfs_btree_ptr *new,
  183. int *stat)
  184. {
  185. xfs_alloc_arg_t args; /* block allocation args */
  186. int error; /* error return value */
  187. memset(&args, 0, sizeof(args));
  188. args.tp = cur->bc_tp;
  189. args.mp = cur->bc_mp;
  190. args.fsbno = cur->bc_tp->t_firstblock;
  191. xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
  192. cur->bc_private.b.whichfork);
  193. if (args.fsbno == NULLFSBLOCK) {
  194. args.fsbno = be64_to_cpu(start->l);
  195. args.type = XFS_ALLOCTYPE_START_BNO;
  196. /*
  197. * Make sure there is sufficient room left in the AG to
  198. * complete a full tree split for an extent insert. If
  199. * we are converting the middle part of an extent then
  200. * we may need space for two tree splits.
  201. *
  202. * We are relying on the caller to make the correct block
  203. * reservation for this operation to succeed. If the
  204. * reservation amount is insufficient then we may fail a
  205. * block allocation here and corrupt the filesystem.
  206. */
  207. args.minleft = args.tp->t_blk_res;
  208. } else if (cur->bc_tp->t_flags & XFS_TRANS_LOWMODE) {
  209. args.type = XFS_ALLOCTYPE_START_BNO;
  210. } else {
  211. args.type = XFS_ALLOCTYPE_NEAR_BNO;
  212. }
  213. args.minlen = args.maxlen = args.prod = 1;
  214. args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
  215. if (!args.wasdel && args.tp->t_blk_res == 0) {
  216. error = -ENOSPC;
  217. goto error0;
  218. }
  219. error = xfs_alloc_vextent(&args);
  220. if (error)
  221. goto error0;
  222. if (args.fsbno == NULLFSBLOCK && args.minleft) {
  223. /*
  224. * Could not find an AG with enough free space to satisfy
  225. * a full btree split. Try again and if
  226. * successful activate the lowspace algorithm.
  227. */
  228. args.fsbno = 0;
  229. args.type = XFS_ALLOCTYPE_FIRST_AG;
  230. error = xfs_alloc_vextent(&args);
  231. if (error)
  232. goto error0;
  233. cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE;
  234. }
  235. if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
  236. *stat = 0;
  237. return 0;
  238. }
  239. ASSERT(args.len == 1);
  240. cur->bc_tp->t_firstblock = args.fsbno;
  241. cur->bc_private.b.allocated++;
  242. cur->bc_private.b.ip->i_d.di_nblocks++;
  243. xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
  244. xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
  245. XFS_TRANS_DQ_BCOUNT, 1L);
  246. new->l = cpu_to_be64(args.fsbno);
  247. *stat = 1;
  248. return 0;
  249. error0:
  250. return error;
  251. }
  252. STATIC int
  253. xfs_bmbt_free_block(
  254. struct xfs_btree_cur *cur,
  255. struct xfs_buf *bp)
  256. {
  257. struct xfs_mount *mp = cur->bc_mp;
  258. struct xfs_inode *ip = cur->bc_private.b.ip;
  259. struct xfs_trans *tp = cur->bc_tp;
  260. xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
  261. struct xfs_owner_info oinfo;
  262. xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
  263. xfs_bmap_add_free(cur->bc_tp, fsbno, 1, &oinfo);
  264. ip->i_d.di_nblocks--;
  265. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  266. xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
  267. return 0;
  268. }
  269. STATIC int
  270. xfs_bmbt_get_minrecs(
  271. struct xfs_btree_cur *cur,
  272. int level)
  273. {
  274. if (level == cur->bc_nlevels - 1) {
  275. struct xfs_ifork *ifp;
  276. ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
  277. cur->bc_private.b.whichfork);
  278. return xfs_bmbt_maxrecs(cur->bc_mp,
  279. ifp->if_broot_bytes, level == 0) / 2;
  280. }
  281. return cur->bc_mp->m_bmap_dmnr[level != 0];
  282. }
  283. int
  284. xfs_bmbt_get_maxrecs(
  285. struct xfs_btree_cur *cur,
  286. int level)
  287. {
  288. if (level == cur->bc_nlevels - 1) {
  289. struct xfs_ifork *ifp;
  290. ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
  291. cur->bc_private.b.whichfork);
  292. return xfs_bmbt_maxrecs(cur->bc_mp,
  293. ifp->if_broot_bytes, level == 0);
  294. }
  295. return cur->bc_mp->m_bmap_dmxr[level != 0];
  296. }
  297. /*
  298. * Get the maximum records we could store in the on-disk format.
  299. *
  300. * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
  301. * for the root node this checks the available space in the dinode fork
  302. * so that we can resize the in-memory buffer to match it. After a
  303. * resize to the maximum size this function returns the same value
  304. * as xfs_bmbt_get_maxrecs for the root node, too.
  305. */
  306. STATIC int
  307. xfs_bmbt_get_dmaxrecs(
  308. struct xfs_btree_cur *cur,
  309. int level)
  310. {
  311. if (level != cur->bc_nlevels - 1)
  312. return cur->bc_mp->m_bmap_dmxr[level != 0];
  313. return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0);
  314. }
  315. STATIC void
  316. xfs_bmbt_init_key_from_rec(
  317. union xfs_btree_key *key,
  318. union xfs_btree_rec *rec)
  319. {
  320. key->bmbt.br_startoff =
  321. cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
  322. }
  323. STATIC void
  324. xfs_bmbt_init_high_key_from_rec(
  325. union xfs_btree_key *key,
  326. union xfs_btree_rec *rec)
  327. {
  328. key->bmbt.br_startoff = cpu_to_be64(
  329. xfs_bmbt_disk_get_startoff(&rec->bmbt) +
  330. xfs_bmbt_disk_get_blockcount(&rec->bmbt) - 1);
  331. }
  332. STATIC void
  333. xfs_bmbt_init_rec_from_cur(
  334. struct xfs_btree_cur *cur,
  335. union xfs_btree_rec *rec)
  336. {
  337. xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
  338. }
  339. STATIC void
  340. xfs_bmbt_init_ptr_from_cur(
  341. struct xfs_btree_cur *cur,
  342. union xfs_btree_ptr *ptr)
  343. {
  344. ptr->l = 0;
  345. }
  346. STATIC int64_t
  347. xfs_bmbt_key_diff(
  348. struct xfs_btree_cur *cur,
  349. union xfs_btree_key *key)
  350. {
  351. return (int64_t)be64_to_cpu(key->bmbt.br_startoff) -
  352. cur->bc_rec.b.br_startoff;
  353. }
  354. STATIC int64_t
  355. xfs_bmbt_diff_two_keys(
  356. struct xfs_btree_cur *cur,
  357. union xfs_btree_key *k1,
  358. union xfs_btree_key *k2)
  359. {
  360. return (int64_t)be64_to_cpu(k1->bmbt.br_startoff) -
  361. be64_to_cpu(k2->bmbt.br_startoff);
  362. }
  363. static xfs_failaddr_t
  364. xfs_bmbt_verify(
  365. struct xfs_buf *bp)
  366. {
  367. struct xfs_mount *mp = bp->b_target->bt_mount;
  368. struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
  369. xfs_failaddr_t fa;
  370. unsigned int level;
  371. switch (block->bb_magic) {
  372. case cpu_to_be32(XFS_BMAP_CRC_MAGIC):
  373. /*
  374. * XXX: need a better way of verifying the owner here. Right now
  375. * just make sure there has been one set.
  376. */
  377. fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
  378. if (fa)
  379. return fa;
  380. /* fall through */
  381. case cpu_to_be32(XFS_BMAP_MAGIC):
  382. break;
  383. default:
  384. return __this_address;
  385. }
  386. /*
  387. * numrecs and level verification.
  388. *
  389. * We don't know what fork we belong to, so just verify that the level
  390. * is less than the maximum of the two. Later checks will be more
  391. * precise.
  392. */
  393. level = be16_to_cpu(block->bb_level);
  394. if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
  395. return __this_address;
  396. return xfs_btree_lblock_verify(bp, mp->m_bmap_dmxr[level != 0]);
  397. }
  398. static void
  399. xfs_bmbt_read_verify(
  400. struct xfs_buf *bp)
  401. {
  402. xfs_failaddr_t fa;
  403. if (!xfs_btree_lblock_verify_crc(bp))
  404. xfs_verifier_error(bp, -EFSBADCRC, __this_address);
  405. else {
  406. fa = xfs_bmbt_verify(bp);
  407. if (fa)
  408. xfs_verifier_error(bp, -EFSCORRUPTED, fa);
  409. }
  410. if (bp->b_error)
  411. trace_xfs_btree_corrupt(bp, _RET_IP_);
  412. }
  413. static void
  414. xfs_bmbt_write_verify(
  415. struct xfs_buf *bp)
  416. {
  417. xfs_failaddr_t fa;
  418. fa = xfs_bmbt_verify(bp);
  419. if (fa) {
  420. trace_xfs_btree_corrupt(bp, _RET_IP_);
  421. xfs_verifier_error(bp, -EFSCORRUPTED, fa);
  422. return;
  423. }
  424. xfs_btree_lblock_calc_crc(bp);
  425. }
  426. const struct xfs_buf_ops xfs_bmbt_buf_ops = {
  427. .name = "xfs_bmbt",
  428. .verify_read = xfs_bmbt_read_verify,
  429. .verify_write = xfs_bmbt_write_verify,
  430. .verify_struct = xfs_bmbt_verify,
  431. };
  432. STATIC int
  433. xfs_bmbt_keys_inorder(
  434. struct xfs_btree_cur *cur,
  435. union xfs_btree_key *k1,
  436. union xfs_btree_key *k2)
  437. {
  438. return be64_to_cpu(k1->bmbt.br_startoff) <
  439. be64_to_cpu(k2->bmbt.br_startoff);
  440. }
  441. STATIC int
  442. xfs_bmbt_recs_inorder(
  443. struct xfs_btree_cur *cur,
  444. union xfs_btree_rec *r1,
  445. union xfs_btree_rec *r2)
  446. {
  447. return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
  448. xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
  449. xfs_bmbt_disk_get_startoff(&r2->bmbt);
  450. }
  451. static const struct xfs_btree_ops xfs_bmbt_ops = {
  452. .rec_len = sizeof(xfs_bmbt_rec_t),
  453. .key_len = sizeof(xfs_bmbt_key_t),
  454. .dup_cursor = xfs_bmbt_dup_cursor,
  455. .update_cursor = xfs_bmbt_update_cursor,
  456. .alloc_block = xfs_bmbt_alloc_block,
  457. .free_block = xfs_bmbt_free_block,
  458. .get_maxrecs = xfs_bmbt_get_maxrecs,
  459. .get_minrecs = xfs_bmbt_get_minrecs,
  460. .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
  461. .init_key_from_rec = xfs_bmbt_init_key_from_rec,
  462. .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec,
  463. .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
  464. .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
  465. .key_diff = xfs_bmbt_key_diff,
  466. .diff_two_keys = xfs_bmbt_diff_two_keys,
  467. .buf_ops = &xfs_bmbt_buf_ops,
  468. .keys_inorder = xfs_bmbt_keys_inorder,
  469. .recs_inorder = xfs_bmbt_recs_inorder,
  470. };
  471. /*
  472. * Allocate a new bmap btree cursor.
  473. */
  474. struct xfs_btree_cur * /* new bmap btree cursor */
  475. xfs_bmbt_init_cursor(
  476. struct xfs_mount *mp, /* file system mount point */
  477. struct xfs_trans *tp, /* transaction pointer */
  478. struct xfs_inode *ip, /* inode owning the btree */
  479. int whichfork) /* data or attr fork */
  480. {
  481. struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
  482. struct xfs_btree_cur *cur;
  483. ASSERT(whichfork != XFS_COW_FORK);
  484. cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
  485. cur->bc_tp = tp;
  486. cur->bc_mp = mp;
  487. cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
  488. cur->bc_btnum = XFS_BTNUM_BMAP;
  489. cur->bc_blocklog = mp->m_sb.sb_blocklog;
  490. cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
  491. cur->bc_ops = &xfs_bmbt_ops;
  492. cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
  493. if (xfs_sb_version_hascrc(&mp->m_sb))
  494. cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
  495. cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
  496. cur->bc_private.b.ip = ip;
  497. cur->bc_private.b.allocated = 0;
  498. cur->bc_private.b.flags = 0;
  499. cur->bc_private.b.whichfork = whichfork;
  500. return cur;
  501. }
  502. /*
  503. * Calculate number of records in a bmap btree block.
  504. */
  505. int
  506. xfs_bmbt_maxrecs(
  507. struct xfs_mount *mp,
  508. int blocklen,
  509. int leaf)
  510. {
  511. blocklen -= XFS_BMBT_BLOCK_LEN(mp);
  512. if (leaf)
  513. return blocklen / sizeof(xfs_bmbt_rec_t);
  514. return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
  515. }
  516. /*
  517. * Calculate number of records in a bmap btree inode root.
  518. */
  519. int
  520. xfs_bmdr_maxrecs(
  521. int blocklen,
  522. int leaf)
  523. {
  524. blocklen -= sizeof(xfs_bmdr_block_t);
  525. if (leaf)
  526. return blocklen / sizeof(xfs_bmdr_rec_t);
  527. return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
  528. }
  529. /*
  530. * Change the owner of a btree format fork fo the inode passed in. Change it to
  531. * the owner of that is passed in so that we can change owners before or after
  532. * we switch forks between inodes. The operation that the caller is doing will
  533. * determine whether is needs to change owner before or after the switch.
  534. *
  535. * For demand paged transactional modification, the fork switch should be done
  536. * after reading in all the blocks, modifying them and pinning them in the
  537. * transaction. For modification when the buffers are already pinned in memory,
  538. * the fork switch can be done before changing the owner as we won't need to
  539. * validate the owner until the btree buffers are unpinned and writes can occur
  540. * again.
  541. *
  542. * For recovery based ownership change, there is no transactional context and
  543. * so a buffer list must be supplied so that we can record the buffers that we
  544. * modified for the caller to issue IO on.
  545. */
  546. int
  547. xfs_bmbt_change_owner(
  548. struct xfs_trans *tp,
  549. struct xfs_inode *ip,
  550. int whichfork,
  551. xfs_ino_t new_owner,
  552. struct list_head *buffer_list)
  553. {
  554. struct xfs_btree_cur *cur;
  555. int error;
  556. ASSERT(tp || buffer_list);
  557. ASSERT(!(tp && buffer_list));
  558. if (whichfork == XFS_DATA_FORK)
  559. ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_BTREE);
  560. else
  561. ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE);
  562. cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
  563. if (!cur)
  564. return -ENOMEM;
  565. cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
  566. error = xfs_btree_change_owner(cur, new_owner, buffer_list);
  567. xfs_btree_del_cursor(cur, error);
  568. return error;
  569. }
  570. /* Calculate the bmap btree size for some records. */
  571. unsigned long long
  572. xfs_bmbt_calc_size(
  573. struct xfs_mount *mp,
  574. unsigned long long len)
  575. {
  576. return xfs_btree_calc_size(mp->m_bmap_dmnr, len);
  577. }