btree.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * lib/btree.c - Simple In-memory B+Tree
  4. *
  5. * Copyright (c) 2007-2008 Joern Engel <joern@purestorage.com>
  6. * Bits and pieces stolen from Peter Zijlstra's code, which is
  7. * Copyright 2007, Red Hat Inc. Peter Zijlstra
  8. *
  9. * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
  10. *
  11. * A relatively simple B+Tree implementation. I have written it as a learning
  12. * exercise to understand how B+Trees work. Turned out to be useful as well.
  13. *
  14. * B+Trees can be used similar to Linux radix trees (which don't have anything
  15. * in common with textbook radix trees, beware). Prerequisite for them working
  16. * well is that access to a random tree node is much faster than a large number
  17. * of operations within each node.
  18. *
  19. * Disks have fulfilled the prerequisite for a long time. More recently DRAM
  20. * has gained similar properties, as memory access times, when measured in cpu
  21. * cycles, have increased. Cacheline sizes have increased as well, which also
  22. * helps B+Trees.
  23. *
  24. * Compared to radix trees, B+Trees are more efficient when dealing with a
  25. * sparsely populated address space. Between 25% and 50% of the memory is
  26. * occupied with valid pointers. When densely populated, radix trees contain
  27. * ~98% pointers - hard to beat. Very sparse radix trees contain only ~2%
  28. * pointers.
  29. *
  30. * This particular implementation stores pointers identified by a long value.
  31. * Storing NULL pointers is illegal, lookup will return NULL when no entry
  32. * was found.
  33. *
  34. * A tricks was used that is not commonly found in textbooks. The lowest
  35. * values are to the right, not to the left. All used slots within a node
  36. * are on the left, all unused slots contain NUL values. Most operations
  37. * simply loop once over all slots and terminate on the first NUL.
  38. */
  39. #include <linux/btree.h>
  40. #include <linux/cache.h>
  41. #include <linux/kernel.h>
  42. #include <linux/slab.h>
  43. #include <linux/module.h>
  44. #define NODESIZE MAX(L1_CACHE_BYTES, 128)
  45. struct btree_geo {
  46. int keylen;
  47. int no_pairs;
  48. int no_longs;
  49. };
  50. struct btree_geo btree_geo32 = {
  51. .keylen = 1,
  52. .no_pairs = NODESIZE / sizeof(long) / 2,
  53. .no_longs = NODESIZE / sizeof(long) / 2,
  54. };
  55. EXPORT_SYMBOL_GPL(btree_geo32);
  56. #define LONG_PER_U64 (64 / BITS_PER_LONG)
  57. struct btree_geo btree_geo64 = {
  58. .keylen = LONG_PER_U64,
  59. .no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64),
  60. .no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)),
  61. };
  62. EXPORT_SYMBOL_GPL(btree_geo64);
  63. struct btree_geo btree_geo128 = {
  64. .keylen = 2 * LONG_PER_U64,
  65. .no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64),
  66. .no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)),
  67. };
  68. EXPORT_SYMBOL_GPL(btree_geo128);
  69. #define MAX_KEYLEN (2 * LONG_PER_U64)
  70. static struct kmem_cache *btree_cachep;
  71. void *btree_alloc(gfp_t gfp_mask, void *pool_data)
  72. {
  73. return kmem_cache_alloc(btree_cachep, gfp_mask);
  74. }
  75. EXPORT_SYMBOL_GPL(btree_alloc);
  76. void btree_free(void *element, void *pool_data)
  77. {
  78. kmem_cache_free(btree_cachep, element);
  79. }
  80. EXPORT_SYMBOL_GPL(btree_free);
  81. static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp)
  82. {
  83. unsigned long *node;
  84. node = mempool_alloc(head->mempool, gfp);
  85. if (likely(node))
  86. memset(node, 0, NODESIZE);
  87. return node;
  88. }
  89. static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n)
  90. {
  91. size_t i;
  92. for (i = 0; i < n; i++) {
  93. if (l1[i] < l2[i])
  94. return -1;
  95. if (l1[i] > l2[i])
  96. return 1;
  97. }
  98. return 0;
  99. }
  100. static unsigned long *longcpy(unsigned long *dest, const unsigned long *src,
  101. size_t n)
  102. {
  103. size_t i;
  104. for (i = 0; i < n; i++)
  105. dest[i] = src[i];
  106. return dest;
  107. }
  108. static unsigned long *longset(unsigned long *s, unsigned long c, size_t n)
  109. {
  110. size_t i;
  111. for (i = 0; i < n; i++)
  112. s[i] = c;
  113. return s;
  114. }
  115. static void dec_key(struct btree_geo *geo, unsigned long *key)
  116. {
  117. unsigned long val;
  118. int i;
  119. for (i = geo->keylen - 1; i >= 0; i--) {
  120. val = key[i];
  121. key[i] = val - 1;
  122. if (val)
  123. break;
  124. }
  125. }
  126. static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n)
  127. {
  128. return &node[n * geo->keylen];
  129. }
  130. static void *bval(struct btree_geo *geo, unsigned long *node, int n)
  131. {
  132. return (void *)node[geo->no_longs + n];
  133. }
  134. static void setkey(struct btree_geo *geo, unsigned long *node, int n,
  135. unsigned long *key)
  136. {
  137. longcpy(bkey(geo, node, n), key, geo->keylen);
  138. }
  139. static void setval(struct btree_geo *geo, unsigned long *node, int n,
  140. void *val)
  141. {
  142. node[geo->no_longs + n] = (unsigned long) val;
  143. }
  144. static void clearpair(struct btree_geo *geo, unsigned long *node, int n)
  145. {
  146. longset(bkey(geo, node, n), 0, geo->keylen);
  147. node[geo->no_longs + n] = 0;
  148. }
  149. static inline void __btree_init(struct btree_head *head)
  150. {
  151. head->node = NULL;
  152. head->height = 0;
  153. }
  154. void btree_init_mempool(struct btree_head *head, mempool_t *mempool)
  155. {
  156. __btree_init(head);
  157. head->mempool = mempool;
  158. }
  159. EXPORT_SYMBOL_GPL(btree_init_mempool);
  160. int btree_init(struct btree_head *head)
  161. {
  162. __btree_init(head);
  163. head->mempool = mempool_create(0, btree_alloc, btree_free, NULL);
  164. if (!head->mempool)
  165. return -ENOMEM;
  166. return 0;
  167. }
  168. EXPORT_SYMBOL_GPL(btree_init);
  169. void btree_destroy(struct btree_head *head)
  170. {
  171. mempool_free(head->node, head->mempool);
  172. mempool_destroy(head->mempool);
  173. head->mempool = NULL;
  174. }
  175. EXPORT_SYMBOL_GPL(btree_destroy);
  176. void *btree_last(struct btree_head *head, struct btree_geo *geo,
  177. unsigned long *key)
  178. {
  179. int height = head->height;
  180. unsigned long *node = head->node;
  181. if (height == 0)
  182. return NULL;
  183. for ( ; height > 1; height--)
  184. node = bval(geo, node, 0);
  185. longcpy(key, bkey(geo, node, 0), geo->keylen);
  186. return bval(geo, node, 0);
  187. }
  188. EXPORT_SYMBOL_GPL(btree_last);
  189. static int keycmp(struct btree_geo *geo, unsigned long *node, int pos,
  190. unsigned long *key)
  191. {
  192. return longcmp(bkey(geo, node, pos), key, geo->keylen);
  193. }
  194. static int keyzero(struct btree_geo *geo, unsigned long *key)
  195. {
  196. int i;
  197. for (i = 0; i < geo->keylen; i++)
  198. if (key[i])
  199. return 0;
  200. return 1;
  201. }
  202. static void *btree_lookup_node(struct btree_head *head, struct btree_geo *geo,
  203. unsigned long *key)
  204. {
  205. int i, height = head->height;
  206. unsigned long *node = head->node;
  207. if (height == 0)
  208. return NULL;
  209. for ( ; height > 1; height--) {
  210. for (i = 0; i < geo->no_pairs; i++)
  211. if (keycmp(geo, node, i, key) <= 0)
  212. break;
  213. if (i == geo->no_pairs)
  214. return NULL;
  215. node = bval(geo, node, i);
  216. if (!node)
  217. return NULL;
  218. }
  219. return node;
  220. }
  221. void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
  222. unsigned long *key)
  223. {
  224. int i;
  225. unsigned long *node;
  226. node = btree_lookup_node(head, geo, key);
  227. if (!node)
  228. return NULL;
  229. for (i = 0; i < geo->no_pairs; i++)
  230. if (keycmp(geo, node, i, key) == 0)
  231. return bval(geo, node, i);
  232. return NULL;
  233. }
  234. EXPORT_SYMBOL_GPL(btree_lookup);
  235. int btree_update(struct btree_head *head, struct btree_geo *geo,
  236. unsigned long *key, void *val)
  237. {
  238. int i;
  239. unsigned long *node;
  240. node = btree_lookup_node(head, geo, key);
  241. if (!node)
  242. return -ENOENT;
  243. for (i = 0; i < geo->no_pairs; i++)
  244. if (keycmp(geo, node, i, key) == 0) {
  245. setval(geo, node, i, val);
  246. return 0;
  247. }
  248. return -ENOENT;
  249. }
  250. EXPORT_SYMBOL_GPL(btree_update);
  251. /*
  252. * Usually this function is quite similar to normal lookup. But the key of
  253. * a parent node may be smaller than the smallest key of all its siblings.
  254. * In such a case we cannot just return NULL, as we have only proven that no
  255. * key smaller than __key, but larger than this parent key exists.
  256. * So we set __key to the parent key and retry. We have to use the smallest
  257. * such parent key, which is the last parent key we encountered.
  258. */
  259. void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
  260. unsigned long *__key)
  261. {
  262. int i, height;
  263. unsigned long *node, *oldnode;
  264. unsigned long *retry_key = NULL, key[MAX_KEYLEN];
  265. if (keyzero(geo, __key))
  266. return NULL;
  267. if (head->height == 0)
  268. return NULL;
  269. longcpy(key, __key, geo->keylen);
  270. retry:
  271. dec_key(geo, key);
  272. node = head->node;
  273. for (height = head->height ; height > 1; height--) {
  274. for (i = 0; i < geo->no_pairs; i++)
  275. if (keycmp(geo, node, i, key) <= 0)
  276. break;
  277. if (i == geo->no_pairs)
  278. goto miss;
  279. oldnode = node;
  280. node = bval(geo, node, i);
  281. if (!node)
  282. goto miss;
  283. retry_key = bkey(geo, oldnode, i);
  284. }
  285. if (!node)
  286. goto miss;
  287. for (i = 0; i < geo->no_pairs; i++) {
  288. if (keycmp(geo, node, i, key) <= 0) {
  289. if (bval(geo, node, i)) {
  290. longcpy(__key, bkey(geo, node, i), geo->keylen);
  291. return bval(geo, node, i);
  292. } else
  293. goto miss;
  294. }
  295. }
  296. miss:
  297. if (retry_key) {
  298. longcpy(key, retry_key, geo->keylen);
  299. retry_key = NULL;
  300. goto retry;
  301. }
  302. return NULL;
  303. }
  304. EXPORT_SYMBOL_GPL(btree_get_prev);
  305. static int getpos(struct btree_geo *geo, unsigned long *node,
  306. unsigned long *key)
  307. {
  308. int i;
  309. for (i = 0; i < geo->no_pairs; i++) {
  310. if (keycmp(geo, node, i, key) <= 0)
  311. break;
  312. }
  313. return i;
  314. }
  315. static int getfill(struct btree_geo *geo, unsigned long *node, int start)
  316. {
  317. int i;
  318. for (i = start; i < geo->no_pairs; i++)
  319. if (!bval(geo, node, i))
  320. break;
  321. return i;
  322. }
  323. /*
  324. * locate the correct leaf node in the btree
  325. */
  326. static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo,
  327. unsigned long *key, int level)
  328. {
  329. unsigned long *node = head->node;
  330. int i, height;
  331. for (height = head->height; height > level; height--) {
  332. for (i = 0; i < geo->no_pairs; i++)
  333. if (keycmp(geo, node, i, key) <= 0)
  334. break;
  335. if ((i == geo->no_pairs) || !bval(geo, node, i)) {
  336. /* right-most key is too large, update it */
  337. /* FIXME: If the right-most key on higher levels is
  338. * always zero, this wouldn't be necessary. */
  339. i--;
  340. setkey(geo, node, i, key);
  341. }
  342. BUG_ON(i < 0);
  343. node = bval(geo, node, i);
  344. }
  345. BUG_ON(!node);
  346. return node;
  347. }
  348. static int btree_grow(struct btree_head *head, struct btree_geo *geo,
  349. gfp_t gfp)
  350. {
  351. unsigned long *node;
  352. int fill;
  353. node = btree_node_alloc(head, gfp);
  354. if (!node)
  355. return -ENOMEM;
  356. if (head->node) {
  357. fill = getfill(geo, head->node, 0);
  358. setkey(geo, node, 0, bkey(geo, head->node, fill - 1));
  359. setval(geo, node, 0, head->node);
  360. }
  361. head->node = node;
  362. head->height++;
  363. return 0;
  364. }
  365. static void btree_shrink(struct btree_head *head, struct btree_geo *geo)
  366. {
  367. unsigned long *node;
  368. int fill;
  369. if (head->height <= 1)
  370. return;
  371. node = head->node;
  372. fill = getfill(geo, node, 0);
  373. BUG_ON(fill > 1);
  374. head->node = bval(geo, node, 0);
  375. head->height--;
  376. mempool_free(node, head->mempool);
  377. }
  378. static int btree_insert_level(struct btree_head *head, struct btree_geo *geo,
  379. unsigned long *key, void *val, int level,
  380. gfp_t gfp)
  381. {
  382. unsigned long *node;
  383. int i, pos, fill, err;
  384. BUG_ON(!val);
  385. if (head->height < level) {
  386. err = btree_grow(head, geo, gfp);
  387. if (err)
  388. return err;
  389. }
  390. retry:
  391. node = find_level(head, geo, key, level);
  392. pos = getpos(geo, node, key);
  393. fill = getfill(geo, node, pos);
  394. /* two identical keys are not allowed */
  395. BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0);
  396. if (fill == geo->no_pairs) {
  397. /* need to split node */
  398. unsigned long *new;
  399. new = btree_node_alloc(head, gfp);
  400. if (!new)
  401. return -ENOMEM;
  402. err = btree_insert_level(head, geo,
  403. bkey(geo, node, fill / 2 - 1),
  404. new, level + 1, gfp);
  405. if (err) {
  406. mempool_free(new, head->mempool);
  407. return err;
  408. }
  409. for (i = 0; i < fill / 2; i++) {
  410. setkey(geo, new, i, bkey(geo, node, i));
  411. setval(geo, new, i, bval(geo, node, i));
  412. setkey(geo, node, i, bkey(geo, node, i + fill / 2));
  413. setval(geo, node, i, bval(geo, node, i + fill / 2));
  414. clearpair(geo, node, i + fill / 2);
  415. }
  416. if (fill & 1) {
  417. setkey(geo, node, i, bkey(geo, node, fill - 1));
  418. setval(geo, node, i, bval(geo, node, fill - 1));
  419. clearpair(geo, node, fill - 1);
  420. }
  421. goto retry;
  422. }
  423. BUG_ON(fill >= geo->no_pairs);
  424. /* shift and insert */
  425. for (i = fill; i > pos; i--) {
  426. setkey(geo, node, i, bkey(geo, node, i - 1));
  427. setval(geo, node, i, bval(geo, node, i - 1));
  428. }
  429. setkey(geo, node, pos, key);
  430. setval(geo, node, pos, val);
  431. return 0;
  432. }
  433. int btree_insert(struct btree_head *head, struct btree_geo *geo,
  434. unsigned long *key, void *val, gfp_t gfp)
  435. {
  436. BUG_ON(!val);
  437. return btree_insert_level(head, geo, key, val, 1, gfp);
  438. }
  439. EXPORT_SYMBOL_GPL(btree_insert);
  440. static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
  441. unsigned long *key, int level);
  442. static void merge(struct btree_head *head, struct btree_geo *geo, int level,
  443. unsigned long *left, int lfill,
  444. unsigned long *right, int rfill,
  445. unsigned long *parent, int lpos)
  446. {
  447. int i;
  448. for (i = 0; i < rfill; i++) {
  449. /* Move all keys to the left */
  450. setkey(geo, left, lfill + i, bkey(geo, right, i));
  451. setval(geo, left, lfill + i, bval(geo, right, i));
  452. }
  453. /* Exchange left and right child in parent */
  454. setval(geo, parent, lpos, right);
  455. setval(geo, parent, lpos + 1, left);
  456. /* Remove left (formerly right) child from parent */
  457. btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1);
  458. mempool_free(right, head->mempool);
  459. }
  460. static void rebalance(struct btree_head *head, struct btree_geo *geo,
  461. unsigned long *key, int level, unsigned long *child, int fill)
  462. {
  463. unsigned long *parent, *left = NULL, *right = NULL;
  464. int i, no_left, no_right;
  465. if (fill == 0) {
  466. /* Because we don't steal entries from a neighbour, this case
  467. * can happen. Parent node contains a single child, this
  468. * node, so merging with a sibling never happens.
  469. */
  470. btree_remove_level(head, geo, key, level + 1);
  471. mempool_free(child, head->mempool);
  472. return;
  473. }
  474. parent = find_level(head, geo, key, level + 1);
  475. i = getpos(geo, parent, key);
  476. BUG_ON(bval(geo, parent, i) != child);
  477. if (i > 0) {
  478. left = bval(geo, parent, i - 1);
  479. no_left = getfill(geo, left, 0);
  480. if (fill + no_left <= geo->no_pairs) {
  481. merge(head, geo, level,
  482. left, no_left,
  483. child, fill,
  484. parent, i - 1);
  485. return;
  486. }
  487. }
  488. if (i + 1 < getfill(geo, parent, i)) {
  489. right = bval(geo, parent, i + 1);
  490. no_right = getfill(geo, right, 0);
  491. if (fill + no_right <= geo->no_pairs) {
  492. merge(head, geo, level,
  493. child, fill,
  494. right, no_right,
  495. parent, i);
  496. return;
  497. }
  498. }
  499. /*
  500. * We could also try to steal one entry from the left or right
  501. * neighbor. By not doing so we changed the invariant from
  502. * "all nodes are at least half full" to "no two neighboring
  503. * nodes can be merged". Which means that the average fill of
  504. * all nodes is still half or better.
  505. */
  506. }
  507. static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
  508. unsigned long *key, int level)
  509. {
  510. unsigned long *node;
  511. int i, pos, fill;
  512. void *ret;
  513. if (level > head->height) {
  514. /* we recursed all the way up */
  515. head->height = 0;
  516. head->node = NULL;
  517. return NULL;
  518. }
  519. node = find_level(head, geo, key, level);
  520. pos = getpos(geo, node, key);
  521. fill = getfill(geo, node, pos);
  522. if ((level == 1) && (keycmp(geo, node, pos, key) != 0))
  523. return NULL;
  524. ret = bval(geo, node, pos);
  525. /* remove and shift */
  526. for (i = pos; i < fill - 1; i++) {
  527. setkey(geo, node, i, bkey(geo, node, i + 1));
  528. setval(geo, node, i, bval(geo, node, i + 1));
  529. }
  530. clearpair(geo, node, fill - 1);
  531. if (fill - 1 < geo->no_pairs / 2) {
  532. if (level < head->height)
  533. rebalance(head, geo, key, level, node, fill - 1);
  534. else if (fill - 1 == 1)
  535. btree_shrink(head, geo);
  536. }
  537. return ret;
  538. }
  539. void *btree_remove(struct btree_head *head, struct btree_geo *geo,
  540. unsigned long *key)
  541. {
  542. if (head->height == 0)
  543. return NULL;
  544. return btree_remove_level(head, geo, key, 1);
  545. }
  546. EXPORT_SYMBOL_GPL(btree_remove);
  547. int btree_merge(struct btree_head *target, struct btree_head *victim,
  548. struct btree_geo *geo, gfp_t gfp)
  549. {
  550. unsigned long key[MAX_KEYLEN];
  551. unsigned long dup[MAX_KEYLEN];
  552. void *val;
  553. int err;
  554. BUG_ON(target == victim);
  555. if (!(target->node)) {
  556. /* target is empty, just copy fields over */
  557. target->node = victim->node;
  558. target->height = victim->height;
  559. __btree_init(victim);
  560. return 0;
  561. }
  562. /* TODO: This needs some optimizations. Currently we do three tree
  563. * walks to remove a single object from the victim.
  564. */
  565. for (;;) {
  566. if (!btree_last(victim, geo, key))
  567. break;
  568. val = btree_lookup(victim, geo, key);
  569. err = btree_insert(target, geo, key, val, gfp);
  570. if (err)
  571. return err;
  572. /* We must make a copy of the key, as the original will get
  573. * mangled inside btree_remove. */
  574. longcpy(dup, key, geo->keylen);
  575. btree_remove(victim, geo, dup);
  576. }
  577. return 0;
  578. }
  579. EXPORT_SYMBOL_GPL(btree_merge);
  580. static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo,
  581. unsigned long *node, unsigned long opaque,
  582. void (*func)(void *elem, unsigned long opaque,
  583. unsigned long *key, size_t index,
  584. void *func2),
  585. void *func2, int reap, int height, size_t count)
  586. {
  587. int i;
  588. unsigned long *child;
  589. for (i = 0; i < geo->no_pairs; i++) {
  590. child = bval(geo, node, i);
  591. if (!child)
  592. break;
  593. if (height > 1)
  594. count = __btree_for_each(head, geo, child, opaque,
  595. func, func2, reap, height - 1, count);
  596. else
  597. func(child, opaque, bkey(geo, node, i), count++,
  598. func2);
  599. }
  600. if (reap)
  601. mempool_free(node, head->mempool);
  602. return count;
  603. }
  604. static void empty(void *elem, unsigned long opaque, unsigned long *key,
  605. size_t index, void *func2)
  606. {
  607. }
  608. void visitorl(void *elem, unsigned long opaque, unsigned long *key,
  609. size_t index, void *__func)
  610. {
  611. visitorl_t func = __func;
  612. func(elem, opaque, *key, index);
  613. }
  614. EXPORT_SYMBOL_GPL(visitorl);
  615. void visitor32(void *elem, unsigned long opaque, unsigned long *__key,
  616. size_t index, void *__func)
  617. {
  618. visitor32_t func = __func;
  619. u32 *key = (void *)__key;
  620. func(elem, opaque, *key, index);
  621. }
  622. EXPORT_SYMBOL_GPL(visitor32);
  623. void visitor64(void *elem, unsigned long opaque, unsigned long *__key,
  624. size_t index, void *__func)
  625. {
  626. visitor64_t func = __func;
  627. u64 *key = (void *)__key;
  628. func(elem, opaque, *key, index);
  629. }
  630. EXPORT_SYMBOL_GPL(visitor64);
  631. void visitor128(void *elem, unsigned long opaque, unsigned long *__key,
  632. size_t index, void *__func)
  633. {
  634. visitor128_t func = __func;
  635. u64 *key = (void *)__key;
  636. func(elem, opaque, key[0], key[1], index);
  637. }
  638. EXPORT_SYMBOL_GPL(visitor128);
  639. size_t btree_visitor(struct btree_head *head, struct btree_geo *geo,
  640. unsigned long opaque,
  641. void (*func)(void *elem, unsigned long opaque,
  642. unsigned long *key,
  643. size_t index, void *func2),
  644. void *func2)
  645. {
  646. size_t count = 0;
  647. if (!func2)
  648. func = empty;
  649. if (head->node)
  650. count = __btree_for_each(head, geo, head->node, opaque, func,
  651. func2, 0, head->height, 0);
  652. return count;
  653. }
  654. EXPORT_SYMBOL_GPL(btree_visitor);
  655. size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo,
  656. unsigned long opaque,
  657. void (*func)(void *elem, unsigned long opaque,
  658. unsigned long *key,
  659. size_t index, void *func2),
  660. void *func2)
  661. {
  662. size_t count = 0;
  663. if (!func2)
  664. func = empty;
  665. if (head->node)
  666. count = __btree_for_each(head, geo, head->node, opaque, func,
  667. func2, 1, head->height, 0);
  668. __btree_init(head);
  669. return count;
  670. }
  671. EXPORT_SYMBOL_GPL(btree_grim_visitor);
  672. static int __init btree_module_init(void)
  673. {
  674. btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0,
  675. SLAB_HWCACHE_ALIGN, NULL);
  676. return 0;
  677. }
  678. static void __exit btree_module_exit(void)
  679. {
  680. kmem_cache_destroy(btree_cachep);
  681. }
  682. /* If core code starts using btree, initialization should happen even earlier */
  683. module_init(btree_module_init);
  684. module_exit(btree_module_exit);
  685. MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
  686. MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");