btree.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/hfs/btree.c
  4. *
  5. * Copyright (C) 2001
  6. * Brad Boyer (flar@allandria.com)
  7. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  8. *
  9. * Handle opening/closing btree
  10. */
  11. #include <linux/pagemap.h>
  12. #include <linux/slab.h>
  13. #include <linux/log2.h>
  14. #include "btree.h"
  15. /* Get a reference to a B*Tree and do some initial checks */
  16. struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp keycmp)
  17. {
  18. struct hfs_btree *tree;
  19. struct hfs_btree_header_rec *head;
  20. struct address_space *mapping;
  21. struct folio *folio;
  22. struct buffer_head *bh;
  23. unsigned int size;
  24. u16 dblock;
  25. sector_t start_block;
  26. loff_t offset;
  27. tree = kzalloc(sizeof(*tree), GFP_KERNEL);
  28. if (!tree)
  29. return NULL;
  30. mutex_init(&tree->tree_lock);
  31. spin_lock_init(&tree->hash_lock);
  32. /* Set the correct compare function */
  33. tree->sb = sb;
  34. tree->cnid = id;
  35. tree->keycmp = keycmp;
  36. tree->inode = iget_locked(sb, id);
  37. if (!tree->inode)
  38. goto free_tree;
  39. BUG_ON(!(tree->inode->i_state & I_NEW));
  40. {
  41. struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
  42. HFS_I(tree->inode)->flags = 0;
  43. mutex_init(&HFS_I(tree->inode)->extents_lock);
  44. switch (id) {
  45. case HFS_EXT_CNID:
  46. hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
  47. mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
  48. if (HFS_I(tree->inode)->alloc_blocks >
  49. HFS_I(tree->inode)->first_blocks) {
  50. pr_err("invalid btree extent records\n");
  51. unlock_new_inode(tree->inode);
  52. goto free_inode;
  53. }
  54. tree->inode->i_mapping->a_ops = &hfs_btree_aops;
  55. break;
  56. case HFS_CAT_CNID:
  57. hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
  58. mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
  59. if (!HFS_I(tree->inode)->first_blocks) {
  60. pr_err("invalid btree extent records (0 size)\n");
  61. unlock_new_inode(tree->inode);
  62. goto free_inode;
  63. }
  64. tree->inode->i_mapping->a_ops = &hfs_btree_aops;
  65. break;
  66. default:
  67. BUG();
  68. }
  69. }
  70. unlock_new_inode(tree->inode);
  71. mapping = tree->inode->i_mapping;
  72. folio = filemap_grab_folio(mapping, 0);
  73. if (IS_ERR(folio))
  74. goto free_inode;
  75. folio_zero_range(folio, 0, folio_size(folio));
  76. dblock = hfs_ext_find_block(HFS_I(tree->inode)->first_extents, 0);
  77. start_block = HFS_SB(sb)->fs_start + (dblock * HFS_SB(sb)->fs_div);
  78. size = folio_size(folio);
  79. offset = 0;
  80. while (size > 0) {
  81. size_t len;
  82. bh = sb_bread(sb, start_block);
  83. if (!bh) {
  84. pr_err("unable to read tree header\n");
  85. goto put_folio;
  86. }
  87. len = min_t(size_t, folio_size(folio), sb->s_blocksize);
  88. memcpy_to_folio(folio, offset, bh->b_data, sb->s_blocksize);
  89. brelse(bh);
  90. start_block++;
  91. offset += len;
  92. size -= len;
  93. }
  94. folio_mark_uptodate(folio);
  95. /* Load the header */
  96. head = (struct hfs_btree_header_rec *)(kmap_local_folio(folio, 0) +
  97. sizeof(struct hfs_bnode_desc));
  98. tree->root = be32_to_cpu(head->root);
  99. tree->leaf_count = be32_to_cpu(head->leaf_count);
  100. tree->leaf_head = be32_to_cpu(head->leaf_head);
  101. tree->leaf_tail = be32_to_cpu(head->leaf_tail);
  102. tree->node_count = be32_to_cpu(head->node_count);
  103. tree->free_nodes = be32_to_cpu(head->free_nodes);
  104. tree->attributes = be32_to_cpu(head->attributes);
  105. tree->node_size = be16_to_cpu(head->node_size);
  106. tree->max_key_len = be16_to_cpu(head->max_key_len);
  107. tree->depth = be16_to_cpu(head->depth);
  108. size = tree->node_size;
  109. if (!is_power_of_2(size))
  110. goto fail_folio;
  111. if (!tree->node_count)
  112. goto fail_folio;
  113. switch (id) {
  114. case HFS_EXT_CNID:
  115. if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) {
  116. pr_err("invalid extent max_key_len %d\n",
  117. tree->max_key_len);
  118. goto fail_folio;
  119. }
  120. break;
  121. case HFS_CAT_CNID:
  122. if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) {
  123. pr_err("invalid catalog max_key_len %d\n",
  124. tree->max_key_len);
  125. goto fail_folio;
  126. }
  127. break;
  128. default:
  129. BUG();
  130. }
  131. tree->node_size_shift = ffs(size) - 1;
  132. tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  133. kunmap_local(head);
  134. folio_unlock(folio);
  135. folio_put(folio);
  136. return tree;
  137. fail_folio:
  138. kunmap_local(head);
  139. put_folio:
  140. folio_unlock(folio);
  141. folio_put(folio);
  142. free_inode:
  143. tree->inode->i_mapping->a_ops = &hfs_aops;
  144. iput(tree->inode);
  145. free_tree:
  146. kfree(tree);
  147. return NULL;
  148. }
  149. /* Release resources used by a btree */
  150. void hfs_btree_close(struct hfs_btree *tree)
  151. {
  152. struct hfs_bnode *node;
  153. int i;
  154. if (!tree)
  155. return;
  156. for (i = 0; i < NODE_HASH_SIZE; i++) {
  157. while ((node = tree->node_hash[i])) {
  158. tree->node_hash[i] = node->next_hash;
  159. if (atomic_read(&node->refcnt))
  160. pr_err("node %d:%d still has %d user(s)!\n",
  161. node->tree->cnid, node->this,
  162. atomic_read(&node->refcnt));
  163. hfs_bnode_free(node);
  164. tree->node_hash_cnt--;
  165. }
  166. }
  167. iput(tree->inode);
  168. kfree(tree);
  169. }
  170. void hfs_btree_write(struct hfs_btree *tree)
  171. {
  172. struct hfs_btree_header_rec *head;
  173. struct hfs_bnode *node;
  174. struct page *page;
  175. node = hfs_bnode_find(tree, 0);
  176. if (IS_ERR(node))
  177. /* panic? */
  178. return;
  179. /* Load the header */
  180. page = node->page[0];
  181. head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
  182. sizeof(struct hfs_bnode_desc));
  183. head->root = cpu_to_be32(tree->root);
  184. head->leaf_count = cpu_to_be32(tree->leaf_count);
  185. head->leaf_head = cpu_to_be32(tree->leaf_head);
  186. head->leaf_tail = cpu_to_be32(tree->leaf_tail);
  187. head->node_count = cpu_to_be32(tree->node_count);
  188. head->free_nodes = cpu_to_be32(tree->free_nodes);
  189. head->attributes = cpu_to_be32(tree->attributes);
  190. head->depth = cpu_to_be16(tree->depth);
  191. kunmap_local(head);
  192. set_page_dirty(page);
  193. hfs_bnode_put(node);
  194. }
  195. static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
  196. {
  197. struct hfs_btree *tree = prev->tree;
  198. struct hfs_bnode *node;
  199. struct hfs_bnode_desc desc;
  200. __be32 cnid;
  201. node = hfs_bnode_create(tree, idx);
  202. if (IS_ERR(node))
  203. return node;
  204. if (!tree->free_nodes)
  205. panic("FIXME!!!");
  206. tree->free_nodes--;
  207. prev->next = idx;
  208. cnid = cpu_to_be32(idx);
  209. hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
  210. node->type = HFS_NODE_MAP;
  211. node->num_recs = 1;
  212. hfs_bnode_clear(node, 0, tree->node_size);
  213. desc.next = 0;
  214. desc.prev = 0;
  215. desc.type = HFS_NODE_MAP;
  216. desc.height = 0;
  217. desc.num_recs = cpu_to_be16(1);
  218. desc.reserved = 0;
  219. hfs_bnode_write(node, &desc, 0, sizeof(desc));
  220. hfs_bnode_write_u16(node, 14, 0x8000);
  221. hfs_bnode_write_u16(node, tree->node_size - 2, 14);
  222. hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
  223. return node;
  224. }
  225. /* Make sure @tree has enough space for the @rsvd_nodes */
  226. int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
  227. {
  228. struct inode *inode = tree->inode;
  229. u32 count;
  230. int res;
  231. while (tree->free_nodes < rsvd_nodes) {
  232. res = hfs_extend_file(inode);
  233. if (res)
  234. return res;
  235. HFS_I(inode)->phys_size = inode->i_size =
  236. (loff_t)HFS_I(inode)->alloc_blocks *
  237. HFS_SB(tree->sb)->alloc_blksz;
  238. HFS_I(inode)->fs_blocks = inode->i_size >>
  239. tree->sb->s_blocksize_bits;
  240. inode_set_bytes(inode, inode->i_size);
  241. count = inode->i_size >> tree->node_size_shift;
  242. tree->free_nodes += count - tree->node_count;
  243. tree->node_count = count;
  244. }
  245. return 0;
  246. }
  247. struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
  248. {
  249. struct hfs_bnode *node, *next_node;
  250. struct page **pagep;
  251. u32 nidx, idx;
  252. unsigned off;
  253. u16 off16;
  254. u16 len;
  255. u8 *data, byte, m;
  256. int i, res;
  257. res = hfs_bmap_reserve(tree, 1);
  258. if (res)
  259. return ERR_PTR(res);
  260. nidx = 0;
  261. node = hfs_bnode_find(tree, nidx);
  262. if (IS_ERR(node))
  263. return node;
  264. len = hfs_brec_lenoff(node, 2, &off16);
  265. off = off16;
  266. off += node->page_offset;
  267. pagep = node->page + (off >> PAGE_SHIFT);
  268. data = kmap_local_page(*pagep);
  269. off &= ~PAGE_MASK;
  270. idx = 0;
  271. for (;;) {
  272. while (len) {
  273. byte = data[off];
  274. if (byte != 0xff) {
  275. for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
  276. if (!(byte & m)) {
  277. idx += i;
  278. data[off] |= m;
  279. set_page_dirty(*pagep);
  280. kunmap_local(data);
  281. tree->free_nodes--;
  282. mark_inode_dirty(tree->inode);
  283. hfs_bnode_put(node);
  284. return hfs_bnode_create(tree, idx);
  285. }
  286. }
  287. }
  288. if (++off >= PAGE_SIZE) {
  289. kunmap_local(data);
  290. data = kmap_local_page(*++pagep);
  291. off = 0;
  292. }
  293. idx += 8;
  294. len--;
  295. }
  296. kunmap_local(data);
  297. nidx = node->next;
  298. if (!nidx) {
  299. printk(KERN_DEBUG "create new bmap node...\n");
  300. next_node = hfs_bmap_new_bmap(node, idx);
  301. } else
  302. next_node = hfs_bnode_find(tree, nidx);
  303. hfs_bnode_put(node);
  304. if (IS_ERR(next_node))
  305. return next_node;
  306. node = next_node;
  307. len = hfs_brec_lenoff(node, 0, &off16);
  308. off = off16;
  309. off += node->page_offset;
  310. pagep = node->page + (off >> PAGE_SHIFT);
  311. data = kmap_local_page(*pagep);
  312. off &= ~PAGE_MASK;
  313. }
  314. }
  315. void hfs_bmap_free(struct hfs_bnode *node)
  316. {
  317. struct hfs_btree *tree;
  318. struct page *page;
  319. u16 off, len;
  320. u32 nidx;
  321. u8 *data, byte, m;
  322. hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
  323. tree = node->tree;
  324. nidx = node->this;
  325. node = hfs_bnode_find(tree, 0);
  326. if (IS_ERR(node))
  327. return;
  328. len = hfs_brec_lenoff(node, 2, &off);
  329. while (nidx >= len * 8) {
  330. u32 i;
  331. nidx -= len * 8;
  332. i = node->next;
  333. if (!i) {
  334. /* panic */;
  335. pr_crit("unable to free bnode %u. bmap not found!\n",
  336. node->this);
  337. hfs_bnode_put(node);
  338. return;
  339. }
  340. hfs_bnode_put(node);
  341. node = hfs_bnode_find(tree, i);
  342. if (IS_ERR(node))
  343. return;
  344. if (node->type != HFS_NODE_MAP) {
  345. /* panic */;
  346. pr_crit("invalid bmap found! (%u,%d)\n",
  347. node->this, node->type);
  348. hfs_bnode_put(node);
  349. return;
  350. }
  351. len = hfs_brec_lenoff(node, 0, &off);
  352. }
  353. off += node->page_offset + nidx / 8;
  354. page = node->page[off >> PAGE_SHIFT];
  355. data = kmap_local_page(page);
  356. off &= ~PAGE_MASK;
  357. m = 1 << (~nidx & 7);
  358. byte = data[off];
  359. if (!(byte & m)) {
  360. pr_crit("trying to free free bnode %u(%d)\n",
  361. node->this, node->type);
  362. kunmap_local(data);
  363. hfs_bnode_put(node);
  364. return;
  365. }
  366. data[off] = byte & ~m;
  367. set_page_dirty(page);
  368. kunmap_local(data);
  369. hfs_bnode_put(node);
  370. tree->free_nodes++;
  371. mark_inode_dirty(tree->inode);
  372. }