xfs_alloc_btree.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_sb.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_btree.h"
  15. #include "xfs_alloc_btree.h"
  16. #include "xfs_alloc.h"
  17. #include "xfs_extent_busy.h"
  18. #include "xfs_error.h"
  19. #include "xfs_trace.h"
  20. #include "xfs_cksum.h"
  21. #include "xfs_trans.h"
  22. STATIC struct xfs_btree_cur *
  23. xfs_allocbt_dup_cursor(
  24. struct xfs_btree_cur *cur)
  25. {
  26. return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
  27. cur->bc_private.a.agbp, cur->bc_private.a.agno,
  28. cur->bc_btnum);
  29. }
  30. STATIC void
  31. xfs_allocbt_set_root(
  32. struct xfs_btree_cur *cur,
  33. union xfs_btree_ptr *ptr,
  34. int inc)
  35. {
  36. struct xfs_buf *agbp = cur->bc_private.a.agbp;
  37. struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
  38. xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
  39. int btnum = cur->bc_btnum;
  40. struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
  41. ASSERT(ptr->s != 0);
  42. agf->agf_roots[btnum] = ptr->s;
  43. be32_add_cpu(&agf->agf_levels[btnum], inc);
  44. pag->pagf_levels[btnum] += inc;
  45. xfs_perag_put(pag);
  46. xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
  47. }
  48. STATIC int
  49. xfs_allocbt_alloc_block(
  50. struct xfs_btree_cur *cur,
  51. union xfs_btree_ptr *start,
  52. union xfs_btree_ptr *new,
  53. int *stat)
  54. {
  55. int error;
  56. xfs_agblock_t bno;
  57. /* Allocate the new block from the freelist. If we can't, give up. */
  58. error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
  59. &bno, 1);
  60. if (error)
  61. return error;
  62. if (bno == NULLAGBLOCK) {
  63. *stat = 0;
  64. return 0;
  65. }
  66. xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
  67. xfs_trans_agbtree_delta(cur->bc_tp, 1);
  68. new->s = cpu_to_be32(bno);
  69. *stat = 1;
  70. return 0;
  71. }
  72. STATIC int
  73. xfs_allocbt_free_block(
  74. struct xfs_btree_cur *cur,
  75. struct xfs_buf *bp)
  76. {
  77. struct xfs_buf *agbp = cur->bc_private.a.agbp;
  78. struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
  79. xfs_agblock_t bno;
  80. int error;
  81. bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
  82. error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
  83. if (error)
  84. return error;
  85. xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
  86. XFS_EXTENT_BUSY_SKIP_DISCARD);
  87. xfs_trans_agbtree_delta(cur->bc_tp, -1);
  88. return 0;
  89. }
  90. /*
  91. * Update the longest extent in the AGF
  92. */
  93. STATIC void
  94. xfs_allocbt_update_lastrec(
  95. struct xfs_btree_cur *cur,
  96. struct xfs_btree_block *block,
  97. union xfs_btree_rec *rec,
  98. int ptr,
  99. int reason)
  100. {
  101. struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
  102. xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
  103. struct xfs_perag *pag;
  104. __be32 len;
  105. int numrecs;
  106. ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
  107. switch (reason) {
  108. case LASTREC_UPDATE:
  109. /*
  110. * If this is the last leaf block and it's the last record,
  111. * then update the size of the longest extent in the AG.
  112. */
  113. if (ptr != xfs_btree_get_numrecs(block))
  114. return;
  115. len = rec->alloc.ar_blockcount;
  116. break;
  117. case LASTREC_INSREC:
  118. if (be32_to_cpu(rec->alloc.ar_blockcount) <=
  119. be32_to_cpu(agf->agf_longest))
  120. return;
  121. len = rec->alloc.ar_blockcount;
  122. break;
  123. case LASTREC_DELREC:
  124. numrecs = xfs_btree_get_numrecs(block);
  125. if (ptr <= numrecs)
  126. return;
  127. ASSERT(ptr == numrecs + 1);
  128. if (numrecs) {
  129. xfs_alloc_rec_t *rrp;
  130. rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
  131. len = rrp->ar_blockcount;
  132. } else {
  133. len = 0;
  134. }
  135. break;
  136. default:
  137. ASSERT(0);
  138. return;
  139. }
  140. agf->agf_longest = len;
  141. pag = xfs_perag_get(cur->bc_mp, seqno);
  142. pag->pagf_longest = be32_to_cpu(len);
  143. xfs_perag_put(pag);
  144. xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
  145. }
  146. STATIC int
  147. xfs_allocbt_get_minrecs(
  148. struct xfs_btree_cur *cur,
  149. int level)
  150. {
  151. return cur->bc_mp->m_alloc_mnr[level != 0];
  152. }
  153. STATIC int
  154. xfs_allocbt_get_maxrecs(
  155. struct xfs_btree_cur *cur,
  156. int level)
  157. {
  158. return cur->bc_mp->m_alloc_mxr[level != 0];
  159. }
  160. STATIC void
  161. xfs_allocbt_init_key_from_rec(
  162. union xfs_btree_key *key,
  163. union xfs_btree_rec *rec)
  164. {
  165. key->alloc.ar_startblock = rec->alloc.ar_startblock;
  166. key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
  167. }
  168. STATIC void
  169. xfs_bnobt_init_high_key_from_rec(
  170. union xfs_btree_key *key,
  171. union xfs_btree_rec *rec)
  172. {
  173. __u32 x;
  174. x = be32_to_cpu(rec->alloc.ar_startblock);
  175. x += be32_to_cpu(rec->alloc.ar_blockcount) - 1;
  176. key->alloc.ar_startblock = cpu_to_be32(x);
  177. key->alloc.ar_blockcount = 0;
  178. }
  179. STATIC void
  180. xfs_cntbt_init_high_key_from_rec(
  181. union xfs_btree_key *key,
  182. union xfs_btree_rec *rec)
  183. {
  184. key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
  185. key->alloc.ar_startblock = 0;
  186. }
  187. STATIC void
  188. xfs_allocbt_init_rec_from_cur(
  189. struct xfs_btree_cur *cur,
  190. union xfs_btree_rec *rec)
  191. {
  192. rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
  193. rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
  194. }
  195. STATIC void
  196. xfs_allocbt_init_ptr_from_cur(
  197. struct xfs_btree_cur *cur,
  198. union xfs_btree_ptr *ptr)
  199. {
  200. struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
  201. ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
  202. ptr->s = agf->agf_roots[cur->bc_btnum];
  203. }
  204. STATIC int64_t
  205. xfs_bnobt_key_diff(
  206. struct xfs_btree_cur *cur,
  207. union xfs_btree_key *key)
  208. {
  209. xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
  210. xfs_alloc_key_t *kp = &key->alloc;
  211. return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
  212. }
  213. STATIC int64_t
  214. xfs_cntbt_key_diff(
  215. struct xfs_btree_cur *cur,
  216. union xfs_btree_key *key)
  217. {
  218. xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
  219. xfs_alloc_key_t *kp = &key->alloc;
  220. int64_t diff;
  221. diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
  222. if (diff)
  223. return diff;
  224. return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
  225. }
  226. STATIC int64_t
  227. xfs_bnobt_diff_two_keys(
  228. struct xfs_btree_cur *cur,
  229. union xfs_btree_key *k1,
  230. union xfs_btree_key *k2)
  231. {
  232. return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
  233. be32_to_cpu(k2->alloc.ar_startblock);
  234. }
  235. STATIC int64_t
  236. xfs_cntbt_diff_two_keys(
  237. struct xfs_btree_cur *cur,
  238. union xfs_btree_key *k1,
  239. union xfs_btree_key *k2)
  240. {
  241. int64_t diff;
  242. diff = be32_to_cpu(k1->alloc.ar_blockcount) -
  243. be32_to_cpu(k2->alloc.ar_blockcount);
  244. if (diff)
  245. return diff;
  246. return be32_to_cpu(k1->alloc.ar_startblock) -
  247. be32_to_cpu(k2->alloc.ar_startblock);
  248. }
  249. static xfs_failaddr_t
  250. xfs_allocbt_verify(
  251. struct xfs_buf *bp)
  252. {
  253. struct xfs_mount *mp = bp->b_target->bt_mount;
  254. struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
  255. struct xfs_perag *pag = bp->b_pag;
  256. xfs_failaddr_t fa;
  257. unsigned int level;
  258. /*
  259. * magic number and level verification
  260. *
  261. * During growfs operations, we can't verify the exact level or owner as
  262. * the perag is not fully initialised and hence not attached to the
  263. * buffer. In this case, check against the maximum tree depth.
  264. *
  265. * Similarly, during log recovery we will have a perag structure
  266. * attached, but the agf information will not yet have been initialised
  267. * from the on disk AGF. Again, we can only check against maximum limits
  268. * in this case.
  269. */
  270. level = be16_to_cpu(block->bb_level);
  271. switch (block->bb_magic) {
  272. case cpu_to_be32(XFS_ABTB_CRC_MAGIC):
  273. fa = xfs_btree_sblock_v5hdr_verify(bp);
  274. if (fa)
  275. return fa;
  276. /* fall through */
  277. case cpu_to_be32(XFS_ABTB_MAGIC):
  278. if (pag && pag->pagf_init) {
  279. if (level >= pag->pagf_levels[XFS_BTNUM_BNOi])
  280. return __this_address;
  281. } else if (level >= mp->m_ag_maxlevels)
  282. return __this_address;
  283. break;
  284. case cpu_to_be32(XFS_ABTC_CRC_MAGIC):
  285. fa = xfs_btree_sblock_v5hdr_verify(bp);
  286. if (fa)
  287. return fa;
  288. /* fall through */
  289. case cpu_to_be32(XFS_ABTC_MAGIC):
  290. if (pag && pag->pagf_init) {
  291. if (level >= pag->pagf_levels[XFS_BTNUM_CNTi])
  292. return __this_address;
  293. } else if (level >= mp->m_ag_maxlevels)
  294. return __this_address;
  295. break;
  296. default:
  297. return __this_address;
  298. }
  299. return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
  300. }
  301. static void
  302. xfs_allocbt_read_verify(
  303. struct xfs_buf *bp)
  304. {
  305. xfs_failaddr_t fa;
  306. if (!xfs_btree_sblock_verify_crc(bp))
  307. xfs_verifier_error(bp, -EFSBADCRC, __this_address);
  308. else {
  309. fa = xfs_allocbt_verify(bp);
  310. if (fa)
  311. xfs_verifier_error(bp, -EFSCORRUPTED, fa);
  312. }
  313. if (bp->b_error)
  314. trace_xfs_btree_corrupt(bp, _RET_IP_);
  315. }
  316. static void
  317. xfs_allocbt_write_verify(
  318. struct xfs_buf *bp)
  319. {
  320. xfs_failaddr_t fa;
  321. fa = xfs_allocbt_verify(bp);
  322. if (fa) {
  323. trace_xfs_btree_corrupt(bp, _RET_IP_);
  324. xfs_verifier_error(bp, -EFSCORRUPTED, fa);
  325. return;
  326. }
  327. xfs_btree_sblock_calc_crc(bp);
  328. }
  329. const struct xfs_buf_ops xfs_allocbt_buf_ops = {
  330. .name = "xfs_allocbt",
  331. .verify_read = xfs_allocbt_read_verify,
  332. .verify_write = xfs_allocbt_write_verify,
  333. .verify_struct = xfs_allocbt_verify,
  334. };
  335. STATIC int
  336. xfs_bnobt_keys_inorder(
  337. struct xfs_btree_cur *cur,
  338. union xfs_btree_key *k1,
  339. union xfs_btree_key *k2)
  340. {
  341. return be32_to_cpu(k1->alloc.ar_startblock) <
  342. be32_to_cpu(k2->alloc.ar_startblock);
  343. }
  344. STATIC int
  345. xfs_bnobt_recs_inorder(
  346. struct xfs_btree_cur *cur,
  347. union xfs_btree_rec *r1,
  348. union xfs_btree_rec *r2)
  349. {
  350. return be32_to_cpu(r1->alloc.ar_startblock) +
  351. be32_to_cpu(r1->alloc.ar_blockcount) <=
  352. be32_to_cpu(r2->alloc.ar_startblock);
  353. }
  354. STATIC int
  355. xfs_cntbt_keys_inorder(
  356. struct xfs_btree_cur *cur,
  357. union xfs_btree_key *k1,
  358. union xfs_btree_key *k2)
  359. {
  360. return be32_to_cpu(k1->alloc.ar_blockcount) <
  361. be32_to_cpu(k2->alloc.ar_blockcount) ||
  362. (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
  363. be32_to_cpu(k1->alloc.ar_startblock) <
  364. be32_to_cpu(k2->alloc.ar_startblock));
  365. }
  366. STATIC int
  367. xfs_cntbt_recs_inorder(
  368. struct xfs_btree_cur *cur,
  369. union xfs_btree_rec *r1,
  370. union xfs_btree_rec *r2)
  371. {
  372. return be32_to_cpu(r1->alloc.ar_blockcount) <
  373. be32_to_cpu(r2->alloc.ar_blockcount) ||
  374. (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
  375. be32_to_cpu(r1->alloc.ar_startblock) <
  376. be32_to_cpu(r2->alloc.ar_startblock));
  377. }
  378. static const struct xfs_btree_ops xfs_bnobt_ops = {
  379. .rec_len = sizeof(xfs_alloc_rec_t),
  380. .key_len = sizeof(xfs_alloc_key_t),
  381. .dup_cursor = xfs_allocbt_dup_cursor,
  382. .set_root = xfs_allocbt_set_root,
  383. .alloc_block = xfs_allocbt_alloc_block,
  384. .free_block = xfs_allocbt_free_block,
  385. .update_lastrec = xfs_allocbt_update_lastrec,
  386. .get_minrecs = xfs_allocbt_get_minrecs,
  387. .get_maxrecs = xfs_allocbt_get_maxrecs,
  388. .init_key_from_rec = xfs_allocbt_init_key_from_rec,
  389. .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec,
  390. .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
  391. .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
  392. .key_diff = xfs_bnobt_key_diff,
  393. .buf_ops = &xfs_allocbt_buf_ops,
  394. .diff_two_keys = xfs_bnobt_diff_two_keys,
  395. .keys_inorder = xfs_bnobt_keys_inorder,
  396. .recs_inorder = xfs_bnobt_recs_inorder,
  397. };
  398. static const struct xfs_btree_ops xfs_cntbt_ops = {
  399. .rec_len = sizeof(xfs_alloc_rec_t),
  400. .key_len = sizeof(xfs_alloc_key_t),
  401. .dup_cursor = xfs_allocbt_dup_cursor,
  402. .set_root = xfs_allocbt_set_root,
  403. .alloc_block = xfs_allocbt_alloc_block,
  404. .free_block = xfs_allocbt_free_block,
  405. .update_lastrec = xfs_allocbt_update_lastrec,
  406. .get_minrecs = xfs_allocbt_get_minrecs,
  407. .get_maxrecs = xfs_allocbt_get_maxrecs,
  408. .init_key_from_rec = xfs_allocbt_init_key_from_rec,
  409. .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec,
  410. .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
  411. .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
  412. .key_diff = xfs_cntbt_key_diff,
  413. .buf_ops = &xfs_allocbt_buf_ops,
  414. .diff_two_keys = xfs_cntbt_diff_two_keys,
  415. .keys_inorder = xfs_cntbt_keys_inorder,
  416. .recs_inorder = xfs_cntbt_recs_inorder,
  417. };
  418. /*
  419. * Allocate a new allocation btree cursor.
  420. */
  421. struct xfs_btree_cur * /* new alloc btree cursor */
  422. xfs_allocbt_init_cursor(
  423. struct xfs_mount *mp, /* file system mount point */
  424. struct xfs_trans *tp, /* transaction pointer */
  425. struct xfs_buf *agbp, /* buffer for agf structure */
  426. xfs_agnumber_t agno, /* allocation group number */
  427. xfs_btnum_t btnum) /* btree identifier */
  428. {
  429. struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
  430. struct xfs_btree_cur *cur;
  431. ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
  432. cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
  433. cur->bc_tp = tp;
  434. cur->bc_mp = mp;
  435. cur->bc_btnum = btnum;
  436. cur->bc_blocklog = mp->m_sb.sb_blocklog;
  437. if (btnum == XFS_BTNUM_CNT) {
  438. cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
  439. cur->bc_ops = &xfs_cntbt_ops;
  440. cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
  441. cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
  442. } else {
  443. cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
  444. cur->bc_ops = &xfs_bnobt_ops;
  445. cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
  446. }
  447. cur->bc_private.a.agbp = agbp;
  448. cur->bc_private.a.agno = agno;
  449. if (xfs_sb_version_hascrc(&mp->m_sb))
  450. cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
  451. return cur;
  452. }
  453. /*
  454. * Calculate number of records in an alloc btree block.
  455. */
  456. int
  457. xfs_allocbt_maxrecs(
  458. struct xfs_mount *mp,
  459. int blocklen,
  460. int leaf)
  461. {
  462. blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
  463. if (leaf)
  464. return blocklen / sizeof(xfs_alloc_rec_t);
  465. return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
  466. }
  467. /* Calculate the freespace btree size for some records. */
  468. xfs_extlen_t
  469. xfs_allocbt_calc_size(
  470. struct xfs_mount *mp,
  471. unsigned long long len)
  472. {
  473. return xfs_btree_calc_size(mp->m_alloc_mnr, len);
  474. }