pblk-rl.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /*
  2. * Copyright (C) 2016 CNEX Labs
  3. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4. * Matias Bjorling <matias@cnexlabs.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * pblk-rl.c - pblk's rate limiter for user I/O
  16. *
  17. */
  18. #include "pblk.h"
  19. static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
  20. {
  21. mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
  22. }
  23. int pblk_rl_is_limit(struct pblk_rl *rl)
  24. {
  25. int rb_space;
  26. rb_space = atomic_read(&rl->rb_space);
  27. return (rb_space == 0);
  28. }
  29. int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
  30. {
  31. int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
  32. int rb_space = atomic_read(&rl->rb_space);
  33. if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0))
  34. return NVM_IO_ERR;
  35. if (rb_user_cnt >= rl->rb_user_max)
  36. return NVM_IO_REQUEUE;
  37. return NVM_IO_OK;
  38. }
  39. void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
  40. {
  41. int rb_space = atomic_read(&rl->rb_space);
  42. if (unlikely(rb_space >= 0))
  43. atomic_sub(nr_entries, &rl->rb_space);
  44. }
  45. int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
  46. {
  47. int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
  48. int rb_user_active;
  49. /* If there is no user I/O let GC take over space on the write buffer */
  50. rb_user_active = READ_ONCE(rl->rb_user_active);
  51. return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
  52. }
  53. void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
  54. {
  55. atomic_add(nr_entries, &rl->rb_user_cnt);
  56. /* Release user I/O state. Protect from GC */
  57. smp_store_release(&rl->rb_user_active, 1);
  58. pblk_rl_kick_u_timer(rl);
  59. }
  60. void pblk_rl_werr_line_in(struct pblk_rl *rl)
  61. {
  62. atomic_inc(&rl->werr_lines);
  63. }
  64. void pblk_rl_werr_line_out(struct pblk_rl *rl)
  65. {
  66. atomic_dec(&rl->werr_lines);
  67. }
  68. void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
  69. {
  70. atomic_add(nr_entries, &rl->rb_gc_cnt);
  71. }
  72. void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
  73. {
  74. atomic_sub(nr_user, &rl->rb_user_cnt);
  75. atomic_sub(nr_gc, &rl->rb_gc_cnt);
  76. }
  77. unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
  78. {
  79. return atomic_read(&rl->free_blocks);
  80. }
  81. unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
  82. {
  83. return atomic_read(&rl->free_user_blocks);
  84. }
  85. static void __pblk_rl_update_rates(struct pblk_rl *rl,
  86. unsigned long free_blocks)
  87. {
  88. struct pblk *pblk = container_of(rl, struct pblk, rl);
  89. int max = rl->rb_budget;
  90. int werr_gc_needed = atomic_read(&rl->werr_lines);
  91. if (free_blocks >= rl->high) {
  92. if (werr_gc_needed) {
  93. /* Allocate a small budget for recovering
  94. * lines with write errors
  95. */
  96. rl->rb_gc_max = 1 << rl->rb_windows_pw;
  97. rl->rb_user_max = max - rl->rb_gc_max;
  98. rl->rb_state = PBLK_RL_WERR;
  99. } else {
  100. rl->rb_user_max = max;
  101. rl->rb_gc_max = 0;
  102. rl->rb_state = PBLK_RL_OFF;
  103. }
  104. } else if (free_blocks < rl->high) {
  105. int shift = rl->high_pw - rl->rb_windows_pw;
  106. int user_windows = free_blocks >> shift;
  107. int user_max = user_windows << PBLK_MAX_REQ_ADDRS_PW;
  108. rl->rb_user_max = user_max;
  109. rl->rb_gc_max = max - user_max;
  110. if (free_blocks <= rl->rsv_blocks) {
  111. rl->rb_user_max = 0;
  112. rl->rb_gc_max = max;
  113. }
  114. /* In the worst case, we will need to GC lines in the low list
  115. * (high valid sector count). If there are lines to GC on high
  116. * or mid lists, these will be prioritized
  117. */
  118. rl->rb_state = PBLK_RL_LOW;
  119. }
  120. if (rl->rb_state != PBLK_RL_OFF)
  121. pblk_gc_should_start(pblk);
  122. else
  123. pblk_gc_should_stop(pblk);
  124. }
  125. void pblk_rl_update_rates(struct pblk_rl *rl)
  126. {
  127. __pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
  128. }
  129. void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
  130. {
  131. int blk_in_line = atomic_read(&line->blk_in_line);
  132. int free_blocks;
  133. atomic_add(blk_in_line, &rl->free_blocks);
  134. free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
  135. __pblk_rl_update_rates(rl, free_blocks);
  136. }
  137. void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
  138. bool used)
  139. {
  140. int blk_in_line = atomic_read(&line->blk_in_line);
  141. int free_blocks;
  142. atomic_sub(blk_in_line, &rl->free_blocks);
  143. if (used)
  144. free_blocks = atomic_sub_return(blk_in_line,
  145. &rl->free_user_blocks);
  146. else
  147. free_blocks = atomic_read(&rl->free_user_blocks);
  148. __pblk_rl_update_rates(rl, free_blocks);
  149. }
  150. int pblk_rl_high_thrs(struct pblk_rl *rl)
  151. {
  152. return rl->high;
  153. }
  154. int pblk_rl_max_io(struct pblk_rl *rl)
  155. {
  156. return rl->rb_max_io;
  157. }
  158. static void pblk_rl_u_timer(struct timer_list *t)
  159. {
  160. struct pblk_rl *rl = from_timer(rl, t, u_timer);
  161. /* Release user I/O state. Protect from GC */
  162. smp_store_release(&rl->rb_user_active, 0);
  163. }
  164. void pblk_rl_free(struct pblk_rl *rl)
  165. {
  166. del_timer(&rl->u_timer);
  167. }
  168. void pblk_rl_init(struct pblk_rl *rl, int budget)
  169. {
  170. struct pblk *pblk = container_of(rl, struct pblk, rl);
  171. struct nvm_tgt_dev *dev = pblk->dev;
  172. struct nvm_geo *geo = &dev->geo;
  173. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  174. struct pblk_line_meta *lm = &pblk->lm;
  175. int min_blocks = lm->blk_per_line * PBLK_GC_RSV_LINE;
  176. int sec_meta, blk_meta;
  177. unsigned int rb_windows;
  178. /* Consider sectors used for metadata */
  179. sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
  180. blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
  181. rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
  182. rl->high_pw = get_count_order(rl->high);
  183. rl->rsv_blocks = min_blocks;
  184. /* This will always be a power-of-2 */
  185. rb_windows = budget / PBLK_MAX_REQ_ADDRS;
  186. rl->rb_windows_pw = get_count_order(rb_windows);
  187. /* To start with, all buffer is available to user I/O writers */
  188. rl->rb_budget = budget;
  189. rl->rb_user_max = budget;
  190. rl->rb_max_io = budget >> 1;
  191. rl->rb_gc_max = 0;
  192. rl->rb_state = PBLK_RL_HIGH;
  193. atomic_set(&rl->rb_user_cnt, 0);
  194. atomic_set(&rl->rb_gc_cnt, 0);
  195. atomic_set(&rl->rb_space, -1);
  196. atomic_set(&rl->werr_lines, 0);
  197. timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
  198. rl->rb_user_active = 0;
  199. rl->rb_gc_active = 0;
  200. }