fscache_cache.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* FS-Cache cache handling
  3. *
  4. * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #define FSCACHE_DEBUG_LEVEL CACHE
  8. #include <linux/export.h>
  9. #include <linux/slab.h>
  10. #include "internal.h"
  11. static LIST_HEAD(fscache_caches);
  12. DECLARE_RWSEM(fscache_addremove_sem);
  13. EXPORT_SYMBOL(fscache_addremove_sem);
  14. DECLARE_WAIT_QUEUE_HEAD(fscache_clearance_waiters);
  15. EXPORT_SYMBOL(fscache_clearance_waiters);
  16. static atomic_t fscache_cache_debug_id;
  17. /*
  18. * Allocate a cache cookie.
  19. */
  20. static struct fscache_cache *fscache_alloc_cache(const char *name)
  21. {
  22. struct fscache_cache *cache;
  23. cache = kzalloc(sizeof(*cache), GFP_KERNEL);
  24. if (cache) {
  25. if (name) {
  26. cache->name = kstrdup(name, GFP_KERNEL);
  27. if (!cache->name) {
  28. kfree(cache);
  29. return NULL;
  30. }
  31. }
  32. refcount_set(&cache->ref, 1);
  33. INIT_LIST_HEAD(&cache->cache_link);
  34. cache->debug_id = atomic_inc_return(&fscache_cache_debug_id);
  35. }
  36. return cache;
  37. }
  38. static bool fscache_get_cache_maybe(struct fscache_cache *cache,
  39. enum fscache_cache_trace where)
  40. {
  41. bool success;
  42. int ref;
  43. success = __refcount_inc_not_zero(&cache->ref, &ref);
  44. if (success)
  45. trace_fscache_cache(cache->debug_id, ref + 1, where);
  46. return success;
  47. }
  48. /*
  49. * Look up a cache cookie.
  50. */
  51. struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache)
  52. {
  53. struct fscache_cache *candidate, *cache, *unnamed = NULL;
  54. /* firstly check for the existence of the cache under read lock */
  55. down_read(&fscache_addremove_sem);
  56. list_for_each_entry(cache, &fscache_caches, cache_link) {
  57. if (cache->name && name && strcmp(cache->name, name) == 0 &&
  58. fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
  59. goto got_cache_r;
  60. if (!cache->name && !name &&
  61. fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
  62. goto got_cache_r;
  63. }
  64. if (!name) {
  65. list_for_each_entry(cache, &fscache_caches, cache_link) {
  66. if (cache->name &&
  67. fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
  68. goto got_cache_r;
  69. }
  70. }
  71. up_read(&fscache_addremove_sem);
  72. /* the cache does not exist - create a candidate */
  73. candidate = fscache_alloc_cache(name);
  74. if (!candidate)
  75. return ERR_PTR(-ENOMEM);
  76. /* write lock, search again and add if still not present */
  77. down_write(&fscache_addremove_sem);
  78. list_for_each_entry(cache, &fscache_caches, cache_link) {
  79. if (cache->name && name && strcmp(cache->name, name) == 0 &&
  80. fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
  81. goto got_cache_w;
  82. if (!cache->name) {
  83. unnamed = cache;
  84. if (!name &&
  85. fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
  86. goto got_cache_w;
  87. }
  88. }
  89. if (unnamed && is_cache &&
  90. fscache_get_cache_maybe(unnamed, fscache_cache_get_acquire))
  91. goto use_unnamed_cache;
  92. if (!name) {
  93. list_for_each_entry(cache, &fscache_caches, cache_link) {
  94. if (cache->name &&
  95. fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
  96. goto got_cache_w;
  97. }
  98. }
  99. list_add_tail(&candidate->cache_link, &fscache_caches);
  100. trace_fscache_cache(candidate->debug_id,
  101. refcount_read(&candidate->ref),
  102. fscache_cache_new_acquire);
  103. up_write(&fscache_addremove_sem);
  104. return candidate;
  105. got_cache_r:
  106. up_read(&fscache_addremove_sem);
  107. return cache;
  108. use_unnamed_cache:
  109. cache = unnamed;
  110. cache->name = candidate->name;
  111. candidate->name = NULL;
  112. got_cache_w:
  113. up_write(&fscache_addremove_sem);
  114. kfree(candidate->name);
  115. kfree(candidate);
  116. return cache;
  117. }
  118. /**
  119. * fscache_acquire_cache - Acquire a cache-level cookie.
  120. * @name: The name of the cache.
  121. *
  122. * Get a cookie to represent an actual cache. If a name is given and there is
  123. * a nameless cache record available, this will acquire that and set its name,
  124. * directing all the volumes using it to this cache.
  125. *
  126. * The cache will be switched over to the preparing state if not currently in
  127. * use, otherwise -EBUSY will be returned.
  128. */
  129. struct fscache_cache *fscache_acquire_cache(const char *name)
  130. {
  131. struct fscache_cache *cache;
  132. ASSERT(name);
  133. cache = fscache_lookup_cache(name, true);
  134. if (IS_ERR(cache))
  135. return cache;
  136. if (!fscache_set_cache_state_maybe(cache,
  137. FSCACHE_CACHE_IS_NOT_PRESENT,
  138. FSCACHE_CACHE_IS_PREPARING)) {
  139. pr_warn("Cache tag %s in use\n", name);
  140. fscache_put_cache(cache, fscache_cache_put_cache);
  141. return ERR_PTR(-EBUSY);
  142. }
  143. return cache;
  144. }
  145. EXPORT_SYMBOL(fscache_acquire_cache);
  146. /**
  147. * fscache_put_cache - Release a cache-level cookie.
  148. * @cache: The cache cookie to be released
  149. * @where: An indication of where the release happened
  150. *
  151. * Release the caller's reference on a cache-level cookie. The @where
  152. * indication should give information about the circumstances in which the call
  153. * occurs and will be logged through a tracepoint.
  154. */
  155. void fscache_put_cache(struct fscache_cache *cache,
  156. enum fscache_cache_trace where)
  157. {
  158. unsigned int debug_id;
  159. bool zero;
  160. int ref;
  161. if (IS_ERR_OR_NULL(cache))
  162. return;
  163. debug_id = cache->debug_id;
  164. zero = __refcount_dec_and_test(&cache->ref, &ref);
  165. trace_fscache_cache(debug_id, ref - 1, where);
  166. if (zero) {
  167. down_write(&fscache_addremove_sem);
  168. list_del_init(&cache->cache_link);
  169. up_write(&fscache_addremove_sem);
  170. kfree(cache->name);
  171. kfree(cache);
  172. }
  173. }
  174. /**
  175. * fscache_relinquish_cache - Reset cache state and release cookie
  176. * @cache: The cache cookie to be released
  177. *
  178. * Reset the state of a cache and release the caller's reference on a cache
  179. * cookie.
  180. */
  181. void fscache_relinquish_cache(struct fscache_cache *cache)
  182. {
  183. enum fscache_cache_trace where =
  184. (cache->state == FSCACHE_CACHE_IS_PREPARING) ?
  185. fscache_cache_put_prep_failed :
  186. fscache_cache_put_relinquish;
  187. cache->ops = NULL;
  188. cache->cache_priv = NULL;
  189. fscache_set_cache_state(cache, FSCACHE_CACHE_IS_NOT_PRESENT);
  190. fscache_put_cache(cache, where);
  191. }
  192. EXPORT_SYMBOL(fscache_relinquish_cache);
  193. /**
  194. * fscache_add_cache - Declare a cache as being open for business
  195. * @cache: The cache-level cookie representing the cache
  196. * @ops: Table of cache operations to use
  197. * @cache_priv: Private data for the cache record
  198. *
  199. * Add a cache to the system, making it available for netfs's to use.
  200. *
  201. * See Documentation/filesystems/caching/backend-api.rst for a complete
  202. * description.
  203. */
  204. int fscache_add_cache(struct fscache_cache *cache,
  205. const struct fscache_cache_ops *ops,
  206. void *cache_priv)
  207. {
  208. int n_accesses;
  209. _enter("{%s,%s}", ops->name, cache->name);
  210. BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
  211. /* Get a ref on the cache cookie and keep its n_accesses counter raised
  212. * by 1 to prevent wakeups from transitioning it to 0 until we're
  213. * withdrawing caching services from it.
  214. */
  215. n_accesses = atomic_inc_return(&cache->n_accesses);
  216. trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
  217. n_accesses, fscache_access_cache_pin);
  218. down_write(&fscache_addremove_sem);
  219. cache->ops = ops;
  220. cache->cache_priv = cache_priv;
  221. fscache_set_cache_state(cache, FSCACHE_CACHE_IS_ACTIVE);
  222. up_write(&fscache_addremove_sem);
  223. pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
  224. _leave(" = 0 [%s]", cache->name);
  225. return 0;
  226. }
  227. EXPORT_SYMBOL(fscache_add_cache);
  228. /**
  229. * fscache_begin_cache_access - Pin a cache so it can be accessed
  230. * @cache: The cache-level cookie
  231. * @why: An indication of the circumstances of the access for tracing
  232. *
  233. * Attempt to pin the cache to prevent it from going away whilst we're
  234. * accessing it and returns true if successful. This works as follows:
  235. *
  236. * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
  237. * then we return false to indicate access was not permitted.
  238. *
  239. * (2) If the cache tests as live, then we increment the n_accesses count and
  240. * then recheck the liveness, ending the access if it ceased to be live.
  241. *
  242. * (3) When we end the access, we decrement n_accesses and wake up the any
  243. * waiters if it reaches 0.
  244. *
  245. * (4) Whilst the cache is caching, n_accesses is kept artificially
  246. * incremented to prevent wakeups from happening.
  247. *
  248. * (5) When the cache is taken offline, the state is changed to prevent new
  249. * accesses, n_accesses is decremented and we wait for n_accesses to
  250. * become 0.
  251. */
  252. bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
  253. {
  254. int n_accesses;
  255. if (!fscache_cache_is_live(cache))
  256. return false;
  257. n_accesses = atomic_inc_return(&cache->n_accesses);
  258. smp_mb__after_atomic(); /* Reread live flag after n_accesses */
  259. trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
  260. n_accesses, why);
  261. if (!fscache_cache_is_live(cache)) {
  262. fscache_end_cache_access(cache, fscache_access_unlive);
  263. return false;
  264. }
  265. return true;
  266. }
  267. /**
  268. * fscache_end_cache_access - Unpin a cache at the end of an access.
  269. * @cache: The cache-level cookie
  270. * @why: An indication of the circumstances of the access for tracing
  271. *
  272. * Unpin a cache after we've accessed it. The @why indicator is merely
  273. * provided for tracing purposes.
  274. */
  275. void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
  276. {
  277. int n_accesses;
  278. smp_mb__before_atomic();
  279. n_accesses = atomic_dec_return(&cache->n_accesses);
  280. trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
  281. n_accesses, why);
  282. if (n_accesses == 0)
  283. wake_up_var(&cache->n_accesses);
  284. }
  285. /**
  286. * fscache_io_error - Note a cache I/O error
  287. * @cache: The record describing the cache
  288. *
  289. * Note that an I/O error occurred in a cache and that it should no longer be
  290. * used for anything. This also reports the error into the kernel log.
  291. *
  292. * See Documentation/filesystems/caching/backend-api.rst for a complete
  293. * description.
  294. */
  295. void fscache_io_error(struct fscache_cache *cache)
  296. {
  297. if (fscache_set_cache_state_maybe(cache,
  298. FSCACHE_CACHE_IS_ACTIVE,
  299. FSCACHE_CACHE_GOT_IOERROR))
  300. pr_err("Cache '%s' stopped due to I/O error\n",
  301. cache->name);
  302. }
  303. EXPORT_SYMBOL(fscache_io_error);
  304. /**
  305. * fscache_withdraw_cache - Withdraw a cache from the active service
  306. * @cache: The cache cookie
  307. *
  308. * Begin the process of withdrawing a cache from service. This stops new
  309. * cache-level and volume-level accesses from taking place and waits for
  310. * currently ongoing cache-level accesses to end.
  311. */
  312. void fscache_withdraw_cache(struct fscache_cache *cache)
  313. {
  314. int n_accesses;
  315. pr_notice("Withdrawing cache \"%s\" (%u objs)\n",
  316. cache->name, atomic_read(&cache->object_count));
  317. fscache_set_cache_state(cache, FSCACHE_CACHE_IS_WITHDRAWN);
  318. /* Allow wakeups on dec-to-0 */
  319. n_accesses = atomic_dec_return(&cache->n_accesses);
  320. trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
  321. n_accesses, fscache_access_cache_unpin);
  322. wait_var_event(&cache->n_accesses,
  323. atomic_read(&cache->n_accesses) == 0);
  324. }
  325. EXPORT_SYMBOL(fscache_withdraw_cache);
  326. #ifdef CONFIG_PROC_FS
  327. static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] = "-PAEW";
  328. /*
  329. * Generate a list of caches in /proc/fs/fscache/caches
  330. */
  331. static int fscache_caches_seq_show(struct seq_file *m, void *v)
  332. {
  333. struct fscache_cache *cache;
  334. if (v == &fscache_caches) {
  335. seq_puts(m,
  336. "CACHE REF VOLS OBJS ACCES S NAME\n"
  337. "======== ===== ===== ===== ===== = ===============\n"
  338. );
  339. return 0;
  340. }
  341. cache = list_entry(v, struct fscache_cache, cache_link);
  342. seq_printf(m,
  343. "%08x %5d %5d %5d %5d %c %s\n",
  344. cache->debug_id,
  345. refcount_read(&cache->ref),
  346. atomic_read(&cache->n_volumes),
  347. atomic_read(&cache->object_count),
  348. atomic_read(&cache->n_accesses),
  349. fscache_cache_states[cache->state],
  350. cache->name ?: "-");
  351. return 0;
  352. }
  353. static void *fscache_caches_seq_start(struct seq_file *m, loff_t *_pos)
  354. __acquires(fscache_addremove_sem)
  355. {
  356. down_read(&fscache_addremove_sem);
  357. return seq_list_start_head(&fscache_caches, *_pos);
  358. }
  359. static void *fscache_caches_seq_next(struct seq_file *m, void *v, loff_t *_pos)
  360. {
  361. return seq_list_next(v, &fscache_caches, _pos);
  362. }
  363. static void fscache_caches_seq_stop(struct seq_file *m, void *v)
  364. __releases(fscache_addremove_sem)
  365. {
  366. up_read(&fscache_addremove_sem);
  367. }
  368. const struct seq_operations fscache_caches_seq_ops = {
  369. .start = fscache_caches_seq_start,
  370. .next = fscache_caches_seq_next,
  371. .stop = fscache_caches_seq_stop,
  372. .show = fscache_caches_seq_show,
  373. };
  374. #endif /* CONFIG_PROC_FS */