bnode.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/hfsplus/bnode.c
  4. *
  5. * Copyright (C) 2001
  6. * Brad Boyer (flar@allandria.com)
  7. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  8. *
  9. * Handle basic btree node operations
  10. */
  11. #include <linux/string.h>
  12. #include <linux/slab.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/fs.h>
  15. #include <linux/swap.h>
  16. #include "hfsplus_fs.h"
  17. #include "hfsplus_raw.h"
  18. /* Copy a specified range of bytes from the raw data of a node */
  19. void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
  20. {
  21. struct page **pagep;
  22. int l;
  23. if (!is_bnode_offset_valid(node, off))
  24. return;
  25. if (len == 0) {
  26. pr_err("requested zero length: "
  27. "NODE: id %u, type %#x, height %u, "
  28. "node_size %u, offset %d, len %d\n",
  29. node->this, node->type, node->height,
  30. node->tree->node_size, off, len);
  31. return;
  32. }
  33. len = check_and_correct_requested_length(node, off, len);
  34. off += node->page_offset;
  35. pagep = node->page + (off >> PAGE_SHIFT);
  36. off &= ~PAGE_MASK;
  37. l = min_t(int, len, PAGE_SIZE - off);
  38. memcpy_from_page(buf, *pagep, off, l);
  39. while ((len -= l) != 0) {
  40. buf += l;
  41. l = min_t(int, len, PAGE_SIZE);
  42. memcpy_from_page(buf, *++pagep, 0, l);
  43. }
  44. }
  45. u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
  46. {
  47. __be16 data;
  48. /* TODO: optimize later... */
  49. hfs_bnode_read(node, &data, off, 2);
  50. return be16_to_cpu(data);
  51. }
  52. u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
  53. {
  54. u8 data;
  55. /* TODO: optimize later... */
  56. hfs_bnode_read(node, &data, off, 1);
  57. return data;
  58. }
  59. void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
  60. {
  61. struct hfs_btree *tree;
  62. int key_len;
  63. tree = node->tree;
  64. if (node->type == HFS_NODE_LEAF ||
  65. tree->attributes & HFS_TREE_VARIDXKEYS ||
  66. node->tree->cnid == HFSPLUS_ATTR_CNID)
  67. key_len = hfs_bnode_read_u16(node, off) + 2;
  68. else
  69. key_len = tree->max_key_len + 2;
  70. if (key_len > sizeof(hfsplus_btree_key) || key_len < 1) {
  71. memset(key, 0, sizeof(hfsplus_btree_key));
  72. pr_err("hfsplus: Invalid key length: %d\n", key_len);
  73. return;
  74. }
  75. hfs_bnode_read(node, key, off, key_len);
  76. }
  77. void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
  78. {
  79. struct page **pagep;
  80. int l;
  81. if (!is_bnode_offset_valid(node, off))
  82. return;
  83. if (len == 0) {
  84. pr_err("requested zero length: "
  85. "NODE: id %u, type %#x, height %u, "
  86. "node_size %u, offset %d, len %d\n",
  87. node->this, node->type, node->height,
  88. node->tree->node_size, off, len);
  89. return;
  90. }
  91. len = check_and_correct_requested_length(node, off, len);
  92. off += node->page_offset;
  93. pagep = node->page + (off >> PAGE_SHIFT);
  94. off &= ~PAGE_MASK;
  95. l = min_t(int, len, PAGE_SIZE - off);
  96. memcpy_to_page(*pagep, off, buf, l);
  97. set_page_dirty(*pagep);
  98. while ((len -= l) != 0) {
  99. buf += l;
  100. l = min_t(int, len, PAGE_SIZE);
  101. memcpy_to_page(*++pagep, 0, buf, l);
  102. set_page_dirty(*pagep);
  103. }
  104. }
  105. void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
  106. {
  107. __be16 v = cpu_to_be16(data);
  108. /* TODO: optimize later... */
  109. hfs_bnode_write(node, &v, off, 2);
  110. }
  111. void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
  112. {
  113. struct page **pagep;
  114. int l;
  115. if (!is_bnode_offset_valid(node, off))
  116. return;
  117. if (len == 0) {
  118. pr_err("requested zero length: "
  119. "NODE: id %u, type %#x, height %u, "
  120. "node_size %u, offset %d, len %d\n",
  121. node->this, node->type, node->height,
  122. node->tree->node_size, off, len);
  123. return;
  124. }
  125. len = check_and_correct_requested_length(node, off, len);
  126. off += node->page_offset;
  127. pagep = node->page + (off >> PAGE_SHIFT);
  128. off &= ~PAGE_MASK;
  129. l = min_t(int, len, PAGE_SIZE - off);
  130. memzero_page(*pagep, off, l);
  131. set_page_dirty(*pagep);
  132. while ((len -= l) != 0) {
  133. l = min_t(int, len, PAGE_SIZE);
  134. memzero_page(*++pagep, 0, l);
  135. set_page_dirty(*pagep);
  136. }
  137. }
  138. void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
  139. struct hfs_bnode *src_node, int src, int len)
  140. {
  141. struct page **src_page, **dst_page;
  142. int l;
  143. hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
  144. if (!len)
  145. return;
  146. len = check_and_correct_requested_length(src_node, src, len);
  147. len = check_and_correct_requested_length(dst_node, dst, len);
  148. src += src_node->page_offset;
  149. dst += dst_node->page_offset;
  150. src_page = src_node->page + (src >> PAGE_SHIFT);
  151. src &= ~PAGE_MASK;
  152. dst_page = dst_node->page + (dst >> PAGE_SHIFT);
  153. dst &= ~PAGE_MASK;
  154. if (src == dst) {
  155. l = min_t(int, len, PAGE_SIZE - src);
  156. memcpy_page(*dst_page, src, *src_page, src, l);
  157. set_page_dirty(*dst_page);
  158. while ((len -= l) != 0) {
  159. l = min_t(int, len, PAGE_SIZE);
  160. memcpy_page(*++dst_page, 0, *++src_page, 0, l);
  161. set_page_dirty(*dst_page);
  162. }
  163. } else {
  164. void *src_ptr, *dst_ptr;
  165. do {
  166. dst_ptr = kmap_local_page(*dst_page) + dst;
  167. src_ptr = kmap_local_page(*src_page) + src;
  168. if (PAGE_SIZE - src < PAGE_SIZE - dst) {
  169. l = PAGE_SIZE - src;
  170. src = 0;
  171. dst += l;
  172. } else {
  173. l = PAGE_SIZE - dst;
  174. src += l;
  175. dst = 0;
  176. }
  177. l = min(len, l);
  178. memcpy(dst_ptr, src_ptr, l);
  179. kunmap_local(src_ptr);
  180. set_page_dirty(*dst_page);
  181. kunmap_local(dst_ptr);
  182. if (!dst)
  183. dst_page++;
  184. else
  185. src_page++;
  186. } while ((len -= l));
  187. }
  188. }
  189. void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
  190. {
  191. struct page **src_page, **dst_page;
  192. void *src_ptr, *dst_ptr;
  193. int l;
  194. hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
  195. if (!len)
  196. return;
  197. len = check_and_correct_requested_length(node, src, len);
  198. len = check_and_correct_requested_length(node, dst, len);
  199. src += node->page_offset;
  200. dst += node->page_offset;
  201. if (dst > src) {
  202. src += len - 1;
  203. src_page = node->page + (src >> PAGE_SHIFT);
  204. src = (src & ~PAGE_MASK) + 1;
  205. dst += len - 1;
  206. dst_page = node->page + (dst >> PAGE_SHIFT);
  207. dst = (dst & ~PAGE_MASK) + 1;
  208. if (src == dst) {
  209. while (src < len) {
  210. dst_ptr = kmap_local_page(*dst_page);
  211. src_ptr = kmap_local_page(*src_page);
  212. memmove(dst_ptr, src_ptr, src);
  213. kunmap_local(src_ptr);
  214. set_page_dirty(*dst_page);
  215. kunmap_local(dst_ptr);
  216. len -= src;
  217. src = PAGE_SIZE;
  218. src_page--;
  219. dst_page--;
  220. }
  221. src -= len;
  222. dst_ptr = kmap_local_page(*dst_page);
  223. src_ptr = kmap_local_page(*src_page);
  224. memmove(dst_ptr + src, src_ptr + src, len);
  225. kunmap_local(src_ptr);
  226. set_page_dirty(*dst_page);
  227. kunmap_local(dst_ptr);
  228. } else {
  229. do {
  230. dst_ptr = kmap_local_page(*dst_page) + dst;
  231. src_ptr = kmap_local_page(*src_page) + src;
  232. if (src < dst) {
  233. l = src;
  234. src = PAGE_SIZE;
  235. dst -= l;
  236. } else {
  237. l = dst;
  238. src -= l;
  239. dst = PAGE_SIZE;
  240. }
  241. l = min(len, l);
  242. memmove(dst_ptr - l, src_ptr - l, l);
  243. kunmap_local(src_ptr);
  244. set_page_dirty(*dst_page);
  245. kunmap_local(dst_ptr);
  246. if (dst == PAGE_SIZE)
  247. dst_page--;
  248. else
  249. src_page--;
  250. } while ((len -= l));
  251. }
  252. } else {
  253. src_page = node->page + (src >> PAGE_SHIFT);
  254. src &= ~PAGE_MASK;
  255. dst_page = node->page + (dst >> PAGE_SHIFT);
  256. dst &= ~PAGE_MASK;
  257. if (src == dst) {
  258. l = min_t(int, len, PAGE_SIZE - src);
  259. dst_ptr = kmap_local_page(*dst_page) + src;
  260. src_ptr = kmap_local_page(*src_page) + src;
  261. memmove(dst_ptr, src_ptr, l);
  262. kunmap_local(src_ptr);
  263. set_page_dirty(*dst_page);
  264. kunmap_local(dst_ptr);
  265. while ((len -= l) != 0) {
  266. l = min_t(int, len, PAGE_SIZE);
  267. dst_ptr = kmap_local_page(*++dst_page);
  268. src_ptr = kmap_local_page(*++src_page);
  269. memmove(dst_ptr, src_ptr, l);
  270. kunmap_local(src_ptr);
  271. set_page_dirty(*dst_page);
  272. kunmap_local(dst_ptr);
  273. }
  274. } else {
  275. do {
  276. dst_ptr = kmap_local_page(*dst_page) + dst;
  277. src_ptr = kmap_local_page(*src_page) + src;
  278. if (PAGE_SIZE - src <
  279. PAGE_SIZE - dst) {
  280. l = PAGE_SIZE - src;
  281. src = 0;
  282. dst += l;
  283. } else {
  284. l = PAGE_SIZE - dst;
  285. src += l;
  286. dst = 0;
  287. }
  288. l = min(len, l);
  289. memmove(dst_ptr, src_ptr, l);
  290. kunmap_local(src_ptr);
  291. set_page_dirty(*dst_page);
  292. kunmap_local(dst_ptr);
  293. if (!dst)
  294. dst_page++;
  295. else
  296. src_page++;
  297. } while ((len -= l));
  298. }
  299. }
  300. }
  301. void hfs_bnode_dump(struct hfs_bnode *node)
  302. {
  303. struct hfs_bnode_desc desc;
  304. __be32 cnid;
  305. int i, off, key_off;
  306. hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
  307. hfs_bnode_read(node, &desc, 0, sizeof(desc));
  308. hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
  309. be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
  310. desc.type, desc.height, be16_to_cpu(desc.num_recs));
  311. off = node->tree->node_size - 2;
  312. for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
  313. key_off = hfs_bnode_read_u16(node, off);
  314. hfs_dbg(BNODE_MOD, " %d", key_off);
  315. if (i && node->type == HFS_NODE_INDEX) {
  316. int tmp;
  317. if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
  318. node->tree->cnid == HFSPLUS_ATTR_CNID)
  319. tmp = hfs_bnode_read_u16(node, key_off) + 2;
  320. else
  321. tmp = node->tree->max_key_len + 2;
  322. hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
  323. hfs_bnode_read(node, &cnid, key_off + tmp, 4);
  324. hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
  325. } else if (i && node->type == HFS_NODE_LEAF) {
  326. int tmp;
  327. tmp = hfs_bnode_read_u16(node, key_off);
  328. hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
  329. }
  330. }
  331. hfs_dbg_cont(BNODE_MOD, "\n");
  332. }
  333. void hfs_bnode_unlink(struct hfs_bnode *node)
  334. {
  335. struct hfs_btree *tree;
  336. struct hfs_bnode *tmp;
  337. __be32 cnid;
  338. tree = node->tree;
  339. if (node->prev) {
  340. tmp = hfs_bnode_find(tree, node->prev);
  341. if (IS_ERR(tmp))
  342. return;
  343. tmp->next = node->next;
  344. cnid = cpu_to_be32(tmp->next);
  345. hfs_bnode_write(tmp, &cnid,
  346. offsetof(struct hfs_bnode_desc, next), 4);
  347. hfs_bnode_put(tmp);
  348. } else if (node->type == HFS_NODE_LEAF)
  349. tree->leaf_head = node->next;
  350. if (node->next) {
  351. tmp = hfs_bnode_find(tree, node->next);
  352. if (IS_ERR(tmp))
  353. return;
  354. tmp->prev = node->prev;
  355. cnid = cpu_to_be32(tmp->prev);
  356. hfs_bnode_write(tmp, &cnid,
  357. offsetof(struct hfs_bnode_desc, prev), 4);
  358. hfs_bnode_put(tmp);
  359. } else if (node->type == HFS_NODE_LEAF)
  360. tree->leaf_tail = node->prev;
  361. /* move down? */
  362. if (!node->prev && !node->next)
  363. hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
  364. if (!node->parent) {
  365. tree->root = 0;
  366. tree->depth = 0;
  367. }
  368. set_bit(HFS_BNODE_DELETED, &node->flags);
  369. }
  370. static inline int hfs_bnode_hash(u32 num)
  371. {
  372. num = (num >> 16) + num;
  373. num += num >> 8;
  374. return num & (NODE_HASH_SIZE - 1);
  375. }
  376. struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
  377. {
  378. struct hfs_bnode *node;
  379. if (cnid >= tree->node_count) {
  380. pr_err("request for non-existent node %d in B*Tree\n",
  381. cnid);
  382. return NULL;
  383. }
  384. for (node = tree->node_hash[hfs_bnode_hash(cnid)];
  385. node; node = node->next_hash)
  386. if (node->this == cnid)
  387. return node;
  388. return NULL;
  389. }
  390. static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
  391. {
  392. struct hfs_bnode *node, *node2;
  393. struct address_space *mapping;
  394. struct page *page;
  395. int size, block, i, hash;
  396. loff_t off;
  397. if (cnid >= tree->node_count) {
  398. pr_err("request for non-existent node %d in B*Tree\n",
  399. cnid);
  400. return NULL;
  401. }
  402. size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
  403. sizeof(struct page *);
  404. node = kzalloc(size, GFP_KERNEL);
  405. if (!node)
  406. return NULL;
  407. node->tree = tree;
  408. node->this = cnid;
  409. set_bit(HFS_BNODE_NEW, &node->flags);
  410. atomic_set(&node->refcnt, 1);
  411. hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
  412. node->tree->cnid, node->this);
  413. init_waitqueue_head(&node->lock_wq);
  414. spin_lock(&tree->hash_lock);
  415. node2 = hfs_bnode_findhash(tree, cnid);
  416. if (!node2) {
  417. hash = hfs_bnode_hash(cnid);
  418. node->next_hash = tree->node_hash[hash];
  419. tree->node_hash[hash] = node;
  420. tree->node_hash_cnt++;
  421. } else {
  422. spin_unlock(&tree->hash_lock);
  423. kfree(node);
  424. wait_event(node2->lock_wq,
  425. !test_bit(HFS_BNODE_NEW, &node2->flags));
  426. return node2;
  427. }
  428. spin_unlock(&tree->hash_lock);
  429. mapping = tree->inode->i_mapping;
  430. off = (loff_t)cnid << tree->node_size_shift;
  431. block = off >> PAGE_SHIFT;
  432. node->page_offset = off & ~PAGE_MASK;
  433. for (i = 0; i < tree->pages_per_bnode; block++, i++) {
  434. page = read_mapping_page(mapping, block, NULL);
  435. if (IS_ERR(page))
  436. goto fail;
  437. node->page[i] = page;
  438. }
  439. return node;
  440. fail:
  441. set_bit(HFS_BNODE_ERROR, &node->flags);
  442. return node;
  443. }
  444. void hfs_bnode_unhash(struct hfs_bnode *node)
  445. {
  446. struct hfs_bnode **p;
  447. hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
  448. node->tree->cnid, node->this, atomic_read(&node->refcnt));
  449. for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
  450. *p && *p != node; p = &(*p)->next_hash)
  451. ;
  452. BUG_ON(!*p);
  453. *p = node->next_hash;
  454. node->tree->node_hash_cnt--;
  455. }
  456. /* Load a particular node out of a tree */
  457. struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
  458. {
  459. struct hfs_bnode *node;
  460. struct hfs_bnode_desc *desc;
  461. int i, rec_off, off, next_off;
  462. int entry_size, key_size;
  463. spin_lock(&tree->hash_lock);
  464. node = hfs_bnode_findhash(tree, num);
  465. if (node) {
  466. hfs_bnode_get(node);
  467. spin_unlock(&tree->hash_lock);
  468. wait_event(node->lock_wq,
  469. !test_bit(HFS_BNODE_NEW, &node->flags));
  470. if (test_bit(HFS_BNODE_ERROR, &node->flags))
  471. goto node_error;
  472. return node;
  473. }
  474. spin_unlock(&tree->hash_lock);
  475. node = __hfs_bnode_create(tree, num);
  476. if (!node)
  477. return ERR_PTR(-ENOMEM);
  478. if (test_bit(HFS_BNODE_ERROR, &node->flags))
  479. goto node_error;
  480. if (!test_bit(HFS_BNODE_NEW, &node->flags))
  481. return node;
  482. desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
  483. node->page_offset);
  484. node->prev = be32_to_cpu(desc->prev);
  485. node->next = be32_to_cpu(desc->next);
  486. node->num_recs = be16_to_cpu(desc->num_recs);
  487. node->type = desc->type;
  488. node->height = desc->height;
  489. kunmap_local(desc);
  490. switch (node->type) {
  491. case HFS_NODE_HEADER:
  492. case HFS_NODE_MAP:
  493. if (node->height != 0)
  494. goto node_error;
  495. break;
  496. case HFS_NODE_LEAF:
  497. if (node->height != 1)
  498. goto node_error;
  499. break;
  500. case HFS_NODE_INDEX:
  501. if (node->height <= 1 || node->height > tree->depth)
  502. goto node_error;
  503. break;
  504. default:
  505. goto node_error;
  506. }
  507. rec_off = tree->node_size - 2;
  508. off = hfs_bnode_read_u16(node, rec_off);
  509. if (off != sizeof(struct hfs_bnode_desc))
  510. goto node_error;
  511. for (i = 1; i <= node->num_recs; off = next_off, i++) {
  512. rec_off -= 2;
  513. next_off = hfs_bnode_read_u16(node, rec_off);
  514. if (next_off <= off ||
  515. next_off > tree->node_size ||
  516. next_off & 1)
  517. goto node_error;
  518. entry_size = next_off - off;
  519. if (node->type != HFS_NODE_INDEX &&
  520. node->type != HFS_NODE_LEAF)
  521. continue;
  522. key_size = hfs_bnode_read_u16(node, off) + 2;
  523. if (key_size >= entry_size || key_size & 1)
  524. goto node_error;
  525. }
  526. clear_bit(HFS_BNODE_NEW, &node->flags);
  527. wake_up(&node->lock_wq);
  528. return node;
  529. node_error:
  530. set_bit(HFS_BNODE_ERROR, &node->flags);
  531. clear_bit(HFS_BNODE_NEW, &node->flags);
  532. wake_up(&node->lock_wq);
  533. hfs_bnode_put(node);
  534. return ERR_PTR(-EIO);
  535. }
  536. void hfs_bnode_free(struct hfs_bnode *node)
  537. {
  538. int i;
  539. for (i = 0; i < node->tree->pages_per_bnode; i++)
  540. if (node->page[i])
  541. put_page(node->page[i]);
  542. kfree(node);
  543. }
  544. struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
  545. {
  546. struct hfs_bnode *node;
  547. struct page **pagep;
  548. int i;
  549. spin_lock(&tree->hash_lock);
  550. node = hfs_bnode_findhash(tree, num);
  551. spin_unlock(&tree->hash_lock);
  552. if (node) {
  553. pr_crit("new node %u already hashed?\n", num);
  554. WARN_ON(1);
  555. return node;
  556. }
  557. node = __hfs_bnode_create(tree, num);
  558. if (!node)
  559. return ERR_PTR(-ENOMEM);
  560. if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
  561. hfs_bnode_put(node);
  562. return ERR_PTR(-EIO);
  563. }
  564. pagep = node->page;
  565. memzero_page(*pagep, node->page_offset,
  566. min_t(int, PAGE_SIZE, tree->node_size));
  567. set_page_dirty(*pagep);
  568. for (i = 1; i < tree->pages_per_bnode; i++) {
  569. memzero_page(*++pagep, 0, PAGE_SIZE);
  570. set_page_dirty(*pagep);
  571. }
  572. clear_bit(HFS_BNODE_NEW, &node->flags);
  573. wake_up(&node->lock_wq);
  574. return node;
  575. }
  576. void hfs_bnode_get(struct hfs_bnode *node)
  577. {
  578. if (node) {
  579. atomic_inc(&node->refcnt);
  580. hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
  581. node->tree->cnid, node->this,
  582. atomic_read(&node->refcnt));
  583. }
  584. }
  585. /* Dispose of resources used by a node */
  586. void hfs_bnode_put(struct hfs_bnode *node)
  587. {
  588. if (node) {
  589. struct hfs_btree *tree = node->tree;
  590. int i;
  591. hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
  592. node->tree->cnid, node->this,
  593. atomic_read(&node->refcnt));
  594. BUG_ON(!atomic_read(&node->refcnt));
  595. if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
  596. return;
  597. for (i = 0; i < tree->pages_per_bnode; i++) {
  598. if (!node->page[i])
  599. continue;
  600. mark_page_accessed(node->page[i]);
  601. }
  602. if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
  603. hfs_bnode_unhash(node);
  604. spin_unlock(&tree->hash_lock);
  605. if (hfs_bnode_need_zeroout(tree))
  606. hfs_bnode_clear(node, 0, tree->node_size);
  607. hfs_bmap_free(node);
  608. hfs_bnode_free(node);
  609. return;
  610. }
  611. spin_unlock(&tree->hash_lock);
  612. }
  613. }
  614. /*
  615. * Unused nodes have to be zeroed if this is the catalog tree and
  616. * a corresponding flag in the volume header is set.
  617. */
  618. bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
  619. {
  620. struct super_block *sb = tree->inode->i_sb;
  621. struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
  622. const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
  623. return tree->cnid == HFSPLUS_CAT_CNID &&
  624. volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
  625. }