dm-cache-background-tracker.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2017 Red Hat. All rights reserved.
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm-cache-background-tracker.h"
  8. /*----------------------------------------------------------------*/
  9. #define DM_MSG_PREFIX "dm-background-tracker"
  10. struct background_tracker {
  11. unsigned int max_work;
  12. atomic_t pending_promotes;
  13. atomic_t pending_writebacks;
  14. atomic_t pending_demotes;
  15. struct list_head issued;
  16. struct list_head queued;
  17. struct rb_root pending;
  18. };
  19. struct kmem_cache *btracker_work_cache = NULL;
  20. struct background_tracker *btracker_create(unsigned int max_work)
  21. {
  22. struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
  23. if (!b) {
  24. DMERR("couldn't create background_tracker");
  25. return NULL;
  26. }
  27. b->max_work = max_work;
  28. atomic_set(&b->pending_promotes, 0);
  29. atomic_set(&b->pending_writebacks, 0);
  30. atomic_set(&b->pending_demotes, 0);
  31. INIT_LIST_HEAD(&b->issued);
  32. INIT_LIST_HEAD(&b->queued);
  33. b->pending = RB_ROOT;
  34. return b;
  35. }
  36. EXPORT_SYMBOL_GPL(btracker_create);
  37. void btracker_destroy(struct background_tracker *b)
  38. {
  39. struct bt_work *w, *tmp;
  40. BUG_ON(!list_empty(&b->issued));
  41. list_for_each_entry_safe (w, tmp, &b->queued, list) {
  42. list_del(&w->list);
  43. kmem_cache_free(btracker_work_cache, w);
  44. }
  45. kfree(b);
  46. }
  47. EXPORT_SYMBOL_GPL(btracker_destroy);
  48. static int cmp_oblock(dm_oblock_t lhs, dm_oblock_t rhs)
  49. {
  50. if (from_oblock(lhs) < from_oblock(rhs))
  51. return -1;
  52. if (from_oblock(rhs) < from_oblock(lhs))
  53. return 1;
  54. return 0;
  55. }
  56. static bool __insert_pending(struct background_tracker *b,
  57. struct bt_work *nw)
  58. {
  59. int cmp;
  60. struct bt_work *w;
  61. struct rb_node **new = &b->pending.rb_node, *parent = NULL;
  62. while (*new) {
  63. w = container_of(*new, struct bt_work, node);
  64. parent = *new;
  65. cmp = cmp_oblock(w->work.oblock, nw->work.oblock);
  66. if (cmp < 0)
  67. new = &((*new)->rb_left);
  68. else if (cmp > 0)
  69. new = &((*new)->rb_right);
  70. else
  71. /* already present */
  72. return false;
  73. }
  74. rb_link_node(&nw->node, parent, new);
  75. rb_insert_color(&nw->node, &b->pending);
  76. return true;
  77. }
  78. static struct bt_work *__find_pending(struct background_tracker *b,
  79. dm_oblock_t oblock)
  80. {
  81. int cmp;
  82. struct bt_work *w;
  83. struct rb_node **new = &b->pending.rb_node;
  84. while (*new) {
  85. w = container_of(*new, struct bt_work, node);
  86. cmp = cmp_oblock(w->work.oblock, oblock);
  87. if (cmp < 0)
  88. new = &((*new)->rb_left);
  89. else if (cmp > 0)
  90. new = &((*new)->rb_right);
  91. else
  92. break;
  93. }
  94. return *new ? w : NULL;
  95. }
  96. static void update_stats(struct background_tracker *b, struct policy_work *w, int delta)
  97. {
  98. switch (w->op) {
  99. case POLICY_PROMOTE:
  100. atomic_add(delta, &b->pending_promotes);
  101. break;
  102. case POLICY_DEMOTE:
  103. atomic_add(delta, &b->pending_demotes);
  104. break;
  105. case POLICY_WRITEBACK:
  106. atomic_add(delta, &b->pending_writebacks);
  107. break;
  108. }
  109. }
  110. unsigned int btracker_nr_writebacks_queued(struct background_tracker *b)
  111. {
  112. return atomic_read(&b->pending_writebacks);
  113. }
  114. EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued);
  115. unsigned int btracker_nr_demotions_queued(struct background_tracker *b)
  116. {
  117. return atomic_read(&b->pending_demotes);
  118. }
  119. EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued);
  120. static bool max_work_reached(struct background_tracker *b)
  121. {
  122. return atomic_read(&b->pending_promotes) +
  123. atomic_read(&b->pending_writebacks) +
  124. atomic_read(&b->pending_demotes) >= b->max_work;
  125. }
  126. static struct bt_work *alloc_work(struct background_tracker *b)
  127. {
  128. if (max_work_reached(b))
  129. return NULL;
  130. return kmem_cache_alloc(btracker_work_cache, GFP_NOWAIT);
  131. }
  132. int btracker_queue(struct background_tracker *b,
  133. struct policy_work *work,
  134. struct policy_work **pwork)
  135. {
  136. struct bt_work *w;
  137. if (pwork)
  138. *pwork = NULL;
  139. w = alloc_work(b);
  140. if (!w)
  141. return -ENOMEM;
  142. memcpy(&w->work, work, sizeof(*work));
  143. if (!__insert_pending(b, w)) {
  144. /*
  145. * There was a race, we'll just ignore this second
  146. * bit of work for the same oblock.
  147. */
  148. kmem_cache_free(btracker_work_cache, w);
  149. return -EINVAL;
  150. }
  151. if (pwork) {
  152. *pwork = &w->work;
  153. list_add(&w->list, &b->issued);
  154. } else
  155. list_add(&w->list, &b->queued);
  156. update_stats(b, &w->work, 1);
  157. return 0;
  158. }
  159. EXPORT_SYMBOL_GPL(btracker_queue);
  160. /*
  161. * Returns -ENODATA if there's no work.
  162. */
  163. int btracker_issue(struct background_tracker *b, struct policy_work **work)
  164. {
  165. struct bt_work *w;
  166. if (list_empty(&b->queued))
  167. return -ENODATA;
  168. w = list_first_entry(&b->queued, struct bt_work, list);
  169. list_move(&w->list, &b->issued);
  170. *work = &w->work;
  171. return 0;
  172. }
  173. EXPORT_SYMBOL_GPL(btracker_issue);
  174. void btracker_complete(struct background_tracker *b,
  175. struct policy_work *op)
  176. {
  177. struct bt_work *w = container_of(op, struct bt_work, work);
  178. update_stats(b, &w->work, -1);
  179. rb_erase(&w->node, &b->pending);
  180. list_del(&w->list);
  181. kmem_cache_free(btracker_work_cache, w);
  182. }
  183. EXPORT_SYMBOL_GPL(btracker_complete);
  184. bool btracker_promotion_already_present(struct background_tracker *b,
  185. dm_oblock_t oblock)
  186. {
  187. return __find_pending(b, oblock) != NULL;
  188. }
  189. EXPORT_SYMBOL_GPL(btracker_promotion_already_present);
  190. /*----------------------------------------------------------------*/