fscache_io.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Cache data I/O routines
  3. *
  4. * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #define FSCACHE_DEBUG_LEVEL OPERATION
  8. #include <linux/fscache-cache.h>
  9. #include <linux/uio.h>
  10. #include <linux/bvec.h>
  11. #include <linux/slab.h>
  12. #include <linux/uio.h>
  13. #include "internal.h"
  14. /**
  15. * fscache_wait_for_operation - Wait for an object become accessible
  16. * @cres: The cache resources for the operation being performed
  17. * @want_state: The minimum state the object must be at
  18. *
  19. * See if the target cache object is at the specified minimum state of
  20. * accessibility yet, and if not, wait for it.
  21. */
  22. bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
  23. enum fscache_want_state want_state)
  24. {
  25. struct fscache_cookie *cookie = fscache_cres_cookie(cres);
  26. enum fscache_cookie_state state;
  27. again:
  28. if (!fscache_cache_is_live(cookie->volume->cache)) {
  29. _leave(" [broken]");
  30. return false;
  31. }
  32. state = fscache_cookie_state(cookie);
  33. _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
  34. switch (state) {
  35. case FSCACHE_COOKIE_STATE_CREATING:
  36. case FSCACHE_COOKIE_STATE_INVALIDATING:
  37. if (want_state == FSCACHE_WANT_PARAMS)
  38. goto ready; /* There can be no content */
  39. fallthrough;
  40. case FSCACHE_COOKIE_STATE_LOOKING_UP:
  41. case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
  42. wait_var_event(&cookie->state,
  43. fscache_cookie_state(cookie) != state);
  44. goto again;
  45. case FSCACHE_COOKIE_STATE_ACTIVE:
  46. goto ready;
  47. case FSCACHE_COOKIE_STATE_DROPPED:
  48. case FSCACHE_COOKIE_STATE_RELINQUISHING:
  49. default:
  50. _leave(" [not live]");
  51. return false;
  52. }
  53. ready:
  54. if (!cres->cache_priv2)
  55. return cookie->volume->cache->ops->begin_operation(cres, want_state);
  56. return true;
  57. }
  58. EXPORT_SYMBOL(fscache_wait_for_operation);
  59. /*
  60. * Begin an I/O operation on the cache, waiting till we reach the right state.
  61. *
  62. * Attaches the resources required to the operation resources record.
  63. */
  64. static int fscache_begin_operation(struct netfs_cache_resources *cres,
  65. struct fscache_cookie *cookie,
  66. enum fscache_want_state want_state,
  67. enum fscache_access_trace why)
  68. {
  69. enum fscache_cookie_state state;
  70. long timeo;
  71. bool once_only = false;
  72. cres->ops = NULL;
  73. cres->cache_priv = cookie;
  74. cres->cache_priv2 = NULL;
  75. cres->debug_id = cookie->debug_id;
  76. cres->inval_counter = cookie->inval_counter;
  77. if (!fscache_begin_cookie_access(cookie, why)) {
  78. cres->cache_priv = NULL;
  79. return -ENOBUFS;
  80. }
  81. again:
  82. spin_lock(&cookie->lock);
  83. state = fscache_cookie_state(cookie);
  84. _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
  85. switch (state) {
  86. case FSCACHE_COOKIE_STATE_LOOKING_UP:
  87. case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
  88. case FSCACHE_COOKIE_STATE_INVALIDATING:
  89. goto wait_for_file_wrangling;
  90. case FSCACHE_COOKIE_STATE_CREATING:
  91. if (want_state == FSCACHE_WANT_PARAMS)
  92. goto ready; /* There can be no content */
  93. goto wait_for_file_wrangling;
  94. case FSCACHE_COOKIE_STATE_ACTIVE:
  95. goto ready;
  96. case FSCACHE_COOKIE_STATE_DROPPED:
  97. case FSCACHE_COOKIE_STATE_RELINQUISHING:
  98. WARN(1, "Can't use cookie in state %u\n", cookie->state);
  99. goto not_live;
  100. default:
  101. goto not_live;
  102. }
  103. ready:
  104. spin_unlock(&cookie->lock);
  105. if (!cookie->volume->cache->ops->begin_operation(cres, want_state))
  106. goto failed;
  107. return 0;
  108. wait_for_file_wrangling:
  109. spin_unlock(&cookie->lock);
  110. trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
  111. atomic_read(&cookie->n_accesses),
  112. fscache_access_io_wait);
  113. timeo = wait_var_event_timeout(&cookie->state,
  114. fscache_cookie_state(cookie) != state, 20 * HZ);
  115. if (timeo <= 1 && !once_only) {
  116. pr_warn("%s: cookie state change wait timed out: cookie->state=%u state=%u",
  117. __func__, fscache_cookie_state(cookie), state);
  118. fscache_print_cookie(cookie, 'O');
  119. once_only = true;
  120. }
  121. goto again;
  122. not_live:
  123. spin_unlock(&cookie->lock);
  124. failed:
  125. cres->cache_priv = NULL;
  126. cres->ops = NULL;
  127. fscache_end_cookie_access(cookie, fscache_access_io_not_live);
  128. _leave(" = -ENOBUFS");
  129. return -ENOBUFS;
  130. }
  131. int __fscache_begin_read_operation(struct netfs_cache_resources *cres,
  132. struct fscache_cookie *cookie)
  133. {
  134. return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
  135. fscache_access_io_read);
  136. }
  137. EXPORT_SYMBOL(__fscache_begin_read_operation);
  138. int __fscache_begin_write_operation(struct netfs_cache_resources *cres,
  139. struct fscache_cookie *cookie)
  140. {
  141. return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
  142. fscache_access_io_write);
  143. }
  144. EXPORT_SYMBOL(__fscache_begin_write_operation);
  145. struct fscache_write_request {
  146. struct netfs_cache_resources cache_resources;
  147. struct address_space *mapping;
  148. loff_t start;
  149. size_t len;
  150. bool set_bits;
  151. bool using_pgpriv2;
  152. netfs_io_terminated_t term_func;
  153. void *term_func_priv;
  154. };
  155. void __fscache_clear_page_bits(struct address_space *mapping,
  156. loff_t start, size_t len)
  157. {
  158. pgoff_t first = start / PAGE_SIZE;
  159. pgoff_t last = (start + len - 1) / PAGE_SIZE;
  160. struct page *page;
  161. if (len) {
  162. XA_STATE(xas, &mapping->i_pages, first);
  163. rcu_read_lock();
  164. xas_for_each(&xas, page, last) {
  165. folio_end_private_2(page_folio(page));
  166. }
  167. rcu_read_unlock();
  168. }
  169. }
  170. EXPORT_SYMBOL(__fscache_clear_page_bits);
  171. /*
  172. * Deal with the completion of writing the data to the cache.
  173. */
  174. static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
  175. bool was_async)
  176. {
  177. struct fscache_write_request *wreq = priv;
  178. if (wreq->using_pgpriv2)
  179. fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
  180. wreq->set_bits);
  181. if (wreq->term_func)
  182. wreq->term_func(wreq->term_func_priv, transferred_or_error,
  183. was_async);
  184. fscache_end_operation(&wreq->cache_resources);
  185. kfree(wreq);
  186. }
  187. void __fscache_write_to_cache(struct fscache_cookie *cookie,
  188. struct address_space *mapping,
  189. loff_t start, size_t len, loff_t i_size,
  190. netfs_io_terminated_t term_func,
  191. void *term_func_priv,
  192. bool using_pgpriv2, bool cond)
  193. {
  194. struct fscache_write_request *wreq;
  195. struct netfs_cache_resources *cres;
  196. struct iov_iter iter;
  197. int ret = -ENOBUFS;
  198. if (len == 0)
  199. goto abandon;
  200. _enter("%llx,%zx", start, len);
  201. wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
  202. if (!wreq)
  203. goto abandon;
  204. wreq->mapping = mapping;
  205. wreq->start = start;
  206. wreq->len = len;
  207. wreq->using_pgpriv2 = using_pgpriv2;
  208. wreq->set_bits = cond;
  209. wreq->term_func = term_func;
  210. wreq->term_func_priv = term_func_priv;
  211. cres = &wreq->cache_resources;
  212. if (fscache_begin_operation(cres, cookie, FSCACHE_WANT_WRITE,
  213. fscache_access_io_write) < 0)
  214. goto abandon_free;
  215. ret = cres->ops->prepare_write(cres, &start, &len, len, i_size, false);
  216. if (ret < 0)
  217. goto abandon_end;
  218. /* TODO: Consider clearing page bits now for space the write isn't
  219. * covering. This is more complicated than it appears when THPs are
  220. * taken into account.
  221. */
  222. iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
  223. fscache_write(cres, start, &iter, fscache_wreq_done, wreq);
  224. return;
  225. abandon_end:
  226. return fscache_wreq_done(wreq, ret, false);
  227. abandon_free:
  228. kfree(wreq);
  229. abandon:
  230. if (using_pgpriv2)
  231. fscache_clear_page_bits(mapping, start, len, cond);
  232. if (term_func)
  233. term_func(term_func_priv, ret, false);
  234. }
  235. EXPORT_SYMBOL(__fscache_write_to_cache);
  236. /*
  237. * Change the size of a backing object.
  238. */
  239. void __fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
  240. {
  241. struct netfs_cache_resources cres;
  242. trace_fscache_resize(cookie, new_size);
  243. if (fscache_begin_operation(&cres, cookie, FSCACHE_WANT_WRITE,
  244. fscache_access_io_resize) == 0) {
  245. fscache_stat(&fscache_n_resizes);
  246. set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
  247. /* We cannot defer a resize as we need to do it inside the
  248. * netfs's inode lock so that we're serialised with respect to
  249. * writes.
  250. */
  251. cookie->volume->cache->ops->resize_cookie(&cres, new_size);
  252. fscache_end_operation(&cres);
  253. } else {
  254. fscache_stat(&fscache_n_resizes_null);
  255. }
  256. }
  257. EXPORT_SYMBOL(__fscache_resize_cookie);