zutil.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2018 HUAWEI, Inc.
  4. * https://www.huawei.com/
  5. * Copyright (C) 2024 Alibaba Cloud
  6. */
  7. #include "internal.h"
  8. struct z_erofs_gbuf {
  9. spinlock_t lock;
  10. void *ptr;
  11. struct page **pages;
  12. unsigned int nrpages;
  13. };
  14. static struct z_erofs_gbuf *z_erofs_gbufpool, *z_erofs_rsvbuf;
  15. static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages,
  16. z_erofs_rsv_nrpages;
  17. module_param_named(global_buffers, z_erofs_gbuf_count, uint, 0444);
  18. module_param_named(reserved_pages, z_erofs_rsv_nrpages, uint, 0444);
  19. atomic_long_t erofs_global_shrink_cnt; /* for all mounted instances */
  20. /* protects `erofs_sb_list_lock` and the mounted `erofs_sb_list` */
  21. static DEFINE_SPINLOCK(erofs_sb_list_lock);
  22. static LIST_HEAD(erofs_sb_list);
  23. static unsigned int shrinker_run_no;
  24. static struct shrinker *erofs_shrinker_info;
  25. static unsigned int z_erofs_gbuf_id(void)
  26. {
  27. return raw_smp_processor_id() % z_erofs_gbuf_count;
  28. }
  29. void *z_erofs_get_gbuf(unsigned int requiredpages)
  30. __acquires(gbuf->lock)
  31. {
  32. struct z_erofs_gbuf *gbuf;
  33. migrate_disable();
  34. gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
  35. spin_lock(&gbuf->lock);
  36. /* check if the buffer is too small */
  37. if (requiredpages > gbuf->nrpages) {
  38. spin_unlock(&gbuf->lock);
  39. migrate_enable();
  40. /* (for sparse checker) pretend gbuf->lock is still taken */
  41. __acquire(gbuf->lock);
  42. return NULL;
  43. }
  44. return gbuf->ptr;
  45. }
  46. void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock)
  47. {
  48. struct z_erofs_gbuf *gbuf;
  49. gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
  50. DBG_BUGON(gbuf->ptr != ptr);
  51. spin_unlock(&gbuf->lock);
  52. migrate_enable();
  53. }
  54. int z_erofs_gbuf_growsize(unsigned int nrpages)
  55. {
  56. static DEFINE_MUTEX(gbuf_resize_mutex);
  57. struct page **tmp_pages = NULL;
  58. struct z_erofs_gbuf *gbuf;
  59. void *ptr, *old_ptr;
  60. int last, i, j;
  61. mutex_lock(&gbuf_resize_mutex);
  62. /* avoid shrinking gbufs, since no idea how many fses rely on */
  63. if (nrpages <= z_erofs_gbuf_nrpages) {
  64. mutex_unlock(&gbuf_resize_mutex);
  65. return 0;
  66. }
  67. for (i = 0; i < z_erofs_gbuf_count; ++i) {
  68. gbuf = &z_erofs_gbufpool[i];
  69. tmp_pages = kcalloc(nrpages, sizeof(*tmp_pages), GFP_KERNEL);
  70. if (!tmp_pages)
  71. goto out;
  72. for (j = 0; j < gbuf->nrpages; ++j)
  73. tmp_pages[j] = gbuf->pages[j];
  74. do {
  75. last = j;
  76. j = alloc_pages_bulk_array(GFP_KERNEL, nrpages,
  77. tmp_pages);
  78. if (last == j)
  79. goto out;
  80. } while (j != nrpages);
  81. ptr = vmap(tmp_pages, nrpages, VM_MAP, PAGE_KERNEL);
  82. if (!ptr)
  83. goto out;
  84. spin_lock(&gbuf->lock);
  85. kfree(gbuf->pages);
  86. gbuf->pages = tmp_pages;
  87. old_ptr = gbuf->ptr;
  88. gbuf->ptr = ptr;
  89. gbuf->nrpages = nrpages;
  90. spin_unlock(&gbuf->lock);
  91. if (old_ptr)
  92. vunmap(old_ptr);
  93. }
  94. z_erofs_gbuf_nrpages = nrpages;
  95. out:
  96. if (i < z_erofs_gbuf_count && tmp_pages) {
  97. for (j = 0; j < nrpages; ++j)
  98. if (tmp_pages[j] && (j >= gbuf->nrpages ||
  99. tmp_pages[j] != gbuf->pages[j]))
  100. __free_page(tmp_pages[j]);
  101. kfree(tmp_pages);
  102. }
  103. mutex_unlock(&gbuf_resize_mutex);
  104. return i < z_erofs_gbuf_count ? -ENOMEM : 0;
  105. }
  106. int __init z_erofs_gbuf_init(void)
  107. {
  108. unsigned int i, total = num_possible_cpus();
  109. if (z_erofs_gbuf_count)
  110. total = min(z_erofs_gbuf_count, total);
  111. z_erofs_gbuf_count = total;
  112. /* The last (special) global buffer is the reserved buffer */
  113. total += !!z_erofs_rsv_nrpages;
  114. z_erofs_gbufpool = kcalloc(total, sizeof(*z_erofs_gbufpool),
  115. GFP_KERNEL);
  116. if (!z_erofs_gbufpool)
  117. return -ENOMEM;
  118. if (z_erofs_rsv_nrpages) {
  119. z_erofs_rsvbuf = &z_erofs_gbufpool[total - 1];
  120. z_erofs_rsvbuf->pages = kcalloc(z_erofs_rsv_nrpages,
  121. sizeof(*z_erofs_rsvbuf->pages), GFP_KERNEL);
  122. if (!z_erofs_rsvbuf->pages) {
  123. z_erofs_rsvbuf = NULL;
  124. z_erofs_rsv_nrpages = 0;
  125. }
  126. }
  127. for (i = 0; i < total; ++i)
  128. spin_lock_init(&z_erofs_gbufpool[i].lock);
  129. return 0;
  130. }
  131. void z_erofs_gbuf_exit(void)
  132. {
  133. int i, j;
  134. for (i = 0; i < z_erofs_gbuf_count + (!!z_erofs_rsvbuf); ++i) {
  135. struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i];
  136. if (gbuf->ptr) {
  137. vunmap(gbuf->ptr);
  138. gbuf->ptr = NULL;
  139. }
  140. if (!gbuf->pages)
  141. continue;
  142. for (j = 0; j < gbuf->nrpages; ++j)
  143. if (gbuf->pages[j])
  144. put_page(gbuf->pages[j]);
  145. kfree(gbuf->pages);
  146. gbuf->pages = NULL;
  147. }
  148. kfree(z_erofs_gbufpool);
  149. }
  150. struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv)
  151. {
  152. struct page *page = *pagepool;
  153. if (page) {
  154. *pagepool = (struct page *)page_private(page);
  155. } else if (tryrsv && z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages) {
  156. spin_lock(&z_erofs_rsvbuf->lock);
  157. if (z_erofs_rsvbuf->nrpages)
  158. page = z_erofs_rsvbuf->pages[--z_erofs_rsvbuf->nrpages];
  159. spin_unlock(&z_erofs_rsvbuf->lock);
  160. }
  161. if (!page)
  162. page = alloc_page(gfp);
  163. DBG_BUGON(page && page_ref_count(page) != 1);
  164. return page;
  165. }
  166. void erofs_release_pages(struct page **pagepool)
  167. {
  168. while (*pagepool) {
  169. struct page *page = *pagepool;
  170. *pagepool = (struct page *)page_private(page);
  171. /* try to fill reserved global pool first */
  172. if (z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages <
  173. z_erofs_rsv_nrpages) {
  174. spin_lock(&z_erofs_rsvbuf->lock);
  175. if (z_erofs_rsvbuf->nrpages < z_erofs_rsv_nrpages) {
  176. z_erofs_rsvbuf->pages[z_erofs_rsvbuf->nrpages++]
  177. = page;
  178. spin_unlock(&z_erofs_rsvbuf->lock);
  179. continue;
  180. }
  181. spin_unlock(&z_erofs_rsvbuf->lock);
  182. }
  183. put_page(page);
  184. }
  185. }
  186. void erofs_shrinker_register(struct super_block *sb)
  187. {
  188. struct erofs_sb_info *sbi = EROFS_SB(sb);
  189. mutex_init(&sbi->umount_mutex);
  190. spin_lock(&erofs_sb_list_lock);
  191. list_add(&sbi->list, &erofs_sb_list);
  192. spin_unlock(&erofs_sb_list_lock);
  193. }
  194. void erofs_shrinker_unregister(struct super_block *sb)
  195. {
  196. struct erofs_sb_info *const sbi = EROFS_SB(sb);
  197. mutex_lock(&sbi->umount_mutex);
  198. while (!xa_empty(&sbi->managed_pslots)) {
  199. z_erofs_shrink_scan(sbi, ~0UL);
  200. cond_resched();
  201. }
  202. spin_lock(&erofs_sb_list_lock);
  203. list_del(&sbi->list);
  204. spin_unlock(&erofs_sb_list_lock);
  205. mutex_unlock(&sbi->umount_mutex);
  206. }
  207. static unsigned long erofs_shrink_count(struct shrinker *shrink,
  208. struct shrink_control *sc)
  209. {
  210. return atomic_long_read(&erofs_global_shrink_cnt);
  211. }
  212. static unsigned long erofs_shrink_scan(struct shrinker *shrink,
  213. struct shrink_control *sc)
  214. {
  215. struct erofs_sb_info *sbi;
  216. struct list_head *p;
  217. unsigned long nr = sc->nr_to_scan;
  218. unsigned int run_no;
  219. unsigned long freed = 0;
  220. spin_lock(&erofs_sb_list_lock);
  221. do {
  222. run_no = ++shrinker_run_no;
  223. } while (run_no == 0);
  224. /* Iterate over all mounted superblocks and try to shrink them */
  225. p = erofs_sb_list.next;
  226. while (p != &erofs_sb_list) {
  227. sbi = list_entry(p, struct erofs_sb_info, list);
  228. /*
  229. * We move the ones we do to the end of the list, so we stop
  230. * when we see one we have already done.
  231. */
  232. if (sbi->shrinker_run_no == run_no)
  233. break;
  234. if (!mutex_trylock(&sbi->umount_mutex)) {
  235. p = p->next;
  236. continue;
  237. }
  238. spin_unlock(&erofs_sb_list_lock);
  239. sbi->shrinker_run_no = run_no;
  240. freed += z_erofs_shrink_scan(sbi, nr - freed);
  241. spin_lock(&erofs_sb_list_lock);
  242. /* Get the next list element before we move this one */
  243. p = p->next;
  244. /*
  245. * Move this one to the end of the list to provide some
  246. * fairness.
  247. */
  248. list_move_tail(&sbi->list, &erofs_sb_list);
  249. mutex_unlock(&sbi->umount_mutex);
  250. if (freed >= nr)
  251. break;
  252. }
  253. spin_unlock(&erofs_sb_list_lock);
  254. return freed;
  255. }
  256. int __init erofs_init_shrinker(void)
  257. {
  258. erofs_shrinker_info = shrinker_alloc(0, "erofs-shrinker");
  259. if (!erofs_shrinker_info)
  260. return -ENOMEM;
  261. erofs_shrinker_info->count_objects = erofs_shrink_count;
  262. erofs_shrinker_info->scan_objects = erofs_shrink_scan;
  263. shrinker_register(erofs_shrinker_info);
  264. return 0;
  265. }
  266. void erofs_exit_shrinker(void)
  267. {
  268. shrinker_free(erofs_shrinker_info);
  269. }