internal.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /* Internal definitions for network filesystem support
  3. *
  4. * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #include <linux/slab.h>
  8. #include <linux/seq_file.h>
  9. #include <linux/folio_queue.h>
  10. #include <linux/netfs.h>
  11. #include <linux/fscache.h>
  12. #include <linux/fscache-cache.h>
  13. #include <trace/events/netfs.h>
  14. #include <trace/events/fscache.h>
  15. #ifdef pr_fmt
  16. #undef pr_fmt
  17. #endif
  18. #define pr_fmt(fmt) "netfs: " fmt
  19. /*
  20. * buffered_read.c
  21. */
  22. int netfs_prefetch_for_write(struct file *file, struct folio *folio,
  23. size_t offset, size_t len);
  24. /*
  25. * main.c
  26. */
  27. extern unsigned int netfs_debug;
  28. extern struct list_head netfs_io_requests;
  29. extern spinlock_t netfs_proc_lock;
  30. extern mempool_t netfs_request_pool;
  31. extern mempool_t netfs_subrequest_pool;
  32. #ifdef CONFIG_PROC_FS
  33. static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq)
  34. {
  35. spin_lock(&netfs_proc_lock);
  36. list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests);
  37. spin_unlock(&netfs_proc_lock);
  38. }
  39. static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq)
  40. {
  41. if (!list_empty(&rreq->proc_link)) {
  42. spin_lock(&netfs_proc_lock);
  43. list_del_rcu(&rreq->proc_link);
  44. spin_unlock(&netfs_proc_lock);
  45. }
  46. }
  47. #else
  48. static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {}
  49. static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
  50. #endif
  51. /*
  52. * misc.c
  53. */
  54. struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq);
  55. int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
  56. bool needs_put);
  57. struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq);
  58. void netfs_clear_buffer(struct netfs_io_request *rreq);
  59. void netfs_reset_iter(struct netfs_io_subrequest *subreq);
  60. /*
  61. * objects.c
  62. */
  63. struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
  64. struct file *file,
  65. loff_t start, size_t len,
  66. enum netfs_io_origin origin);
  67. void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
  68. void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
  69. void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
  70. enum netfs_rreq_ref_trace what);
  71. struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
  72. static inline void netfs_see_request(struct netfs_io_request *rreq,
  73. enum netfs_rreq_ref_trace what)
  74. {
  75. trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
  76. }
  77. /*
  78. * read_collect.c
  79. */
  80. void netfs_read_termination_worker(struct work_struct *work);
  81. void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async);
  82. /*
  83. * read_pgpriv2.c
  84. */
  85. void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
  86. struct netfs_io_request *rreq,
  87. struct folio_queue *folioq,
  88. int slot);
  89. void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq);
  90. bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq);
  91. /*
  92. * read_retry.c
  93. */
  94. void netfs_retry_reads(struct netfs_io_request *rreq);
  95. void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq);
  96. /*
  97. * stats.c
  98. */
  99. #ifdef CONFIG_NETFS_STATS
  100. extern atomic_t netfs_n_rh_dio_read;
  101. extern atomic_t netfs_n_rh_readahead;
  102. extern atomic_t netfs_n_rh_read_folio;
  103. extern atomic_t netfs_n_rh_rreq;
  104. extern atomic_t netfs_n_rh_sreq;
  105. extern atomic_t netfs_n_rh_download;
  106. extern atomic_t netfs_n_rh_download_done;
  107. extern atomic_t netfs_n_rh_download_failed;
  108. extern atomic_t netfs_n_rh_download_instead;
  109. extern atomic_t netfs_n_rh_read;
  110. extern atomic_t netfs_n_rh_read_done;
  111. extern atomic_t netfs_n_rh_read_failed;
  112. extern atomic_t netfs_n_rh_zero;
  113. extern atomic_t netfs_n_rh_short_read;
  114. extern atomic_t netfs_n_rh_write;
  115. extern atomic_t netfs_n_rh_write_begin;
  116. extern atomic_t netfs_n_rh_write_done;
  117. extern atomic_t netfs_n_rh_write_failed;
  118. extern atomic_t netfs_n_rh_write_zskip;
  119. extern atomic_t netfs_n_wh_buffered_write;
  120. extern atomic_t netfs_n_wh_writethrough;
  121. extern atomic_t netfs_n_wh_dio_write;
  122. extern atomic_t netfs_n_wh_writepages;
  123. extern atomic_t netfs_n_wh_copy_to_cache;
  124. extern atomic_t netfs_n_wh_wstream_conflict;
  125. extern atomic_t netfs_n_wh_upload;
  126. extern atomic_t netfs_n_wh_upload_done;
  127. extern atomic_t netfs_n_wh_upload_failed;
  128. extern atomic_t netfs_n_wh_write;
  129. extern atomic_t netfs_n_wh_write_done;
  130. extern atomic_t netfs_n_wh_write_failed;
  131. extern atomic_t netfs_n_wb_lock_skip;
  132. extern atomic_t netfs_n_wb_lock_wait;
  133. extern atomic_t netfs_n_folioq;
  134. int netfs_stats_show(struct seq_file *m, void *v);
  135. static inline void netfs_stat(atomic_t *stat)
  136. {
  137. atomic_inc(stat);
  138. }
  139. static inline void netfs_stat_d(atomic_t *stat)
  140. {
  141. atomic_dec(stat);
  142. }
  143. #else
  144. #define netfs_stat(x) do {} while(0)
  145. #define netfs_stat_d(x) do {} while(0)
  146. #endif
  147. /*
  148. * write_collect.c
  149. */
  150. int netfs_folio_written_back(struct folio *folio);
  151. void netfs_write_collection_worker(struct work_struct *work);
  152. void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async);
  153. /*
  154. * write_issue.c
  155. */
  156. struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
  157. struct file *file,
  158. loff_t start,
  159. enum netfs_io_origin origin);
  160. void netfs_reissue_write(struct netfs_io_stream *stream,
  161. struct netfs_io_subrequest *subreq,
  162. struct iov_iter *source);
  163. void netfs_issue_write(struct netfs_io_request *wreq,
  164. struct netfs_io_stream *stream);
  165. int netfs_advance_write(struct netfs_io_request *wreq,
  166. struct netfs_io_stream *stream,
  167. loff_t start, size_t len, bool to_eof);
  168. struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
  169. int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
  170. struct folio *folio, size_t copied, bool to_page_end,
  171. struct folio **writethrough_cache);
  172. int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
  173. struct folio *writethrough_cache);
  174. int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
  175. /*
  176. * Miscellaneous functions.
  177. */
  178. static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
  179. {
  180. #if IS_ENABLED(CONFIG_FSCACHE)
  181. struct fscache_cookie *cookie = ctx->cache;
  182. return fscache_cookie_valid(cookie) && cookie->cache_priv &&
  183. fscache_cookie_enabled(cookie);
  184. #else
  185. return false;
  186. #endif
  187. }
  188. /*
  189. * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap).
  190. */
  191. static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
  192. {
  193. if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE)
  194. refcount_inc(&netfs_group->ref);
  195. return netfs_group;
  196. }
  197. /*
  198. * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
  199. */
  200. static inline void netfs_put_group(struct netfs_group *netfs_group)
  201. {
  202. if (netfs_group &&
  203. netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
  204. refcount_dec_and_test(&netfs_group->ref))
  205. netfs_group->free(netfs_group);
  206. }
  207. /*
  208. * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
  209. */
  210. static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
  211. {
  212. if (netfs_group &&
  213. netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
  214. refcount_sub_and_test(nr, &netfs_group->ref))
  215. netfs_group->free(netfs_group);
  216. }
  217. /*
  218. * fscache-cache.c
  219. */
  220. #ifdef CONFIG_PROC_FS
  221. extern const struct seq_operations fscache_caches_seq_ops;
  222. #endif
  223. bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
  224. void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
  225. struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
  226. void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
  227. static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
  228. {
  229. return smp_load_acquire(&cache->state);
  230. }
  231. static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
  232. {
  233. return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
  234. }
  235. static inline void fscache_set_cache_state(struct fscache_cache *cache,
  236. enum fscache_cache_state new_state)
  237. {
  238. smp_store_release(&cache->state, new_state);
  239. }
  240. static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
  241. enum fscache_cache_state old_state,
  242. enum fscache_cache_state new_state)
  243. {
  244. return try_cmpxchg_release(&cache->state, &old_state, new_state);
  245. }
  246. /*
  247. * fscache-cookie.c
  248. */
  249. extern struct kmem_cache *fscache_cookie_jar;
  250. #ifdef CONFIG_PROC_FS
  251. extern const struct seq_operations fscache_cookies_seq_ops;
  252. #endif
  253. extern struct timer_list fscache_cookie_lru_timer;
  254. extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
  255. extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
  256. enum fscache_access_trace why);
  257. static inline void fscache_see_cookie(struct fscache_cookie *cookie,
  258. enum fscache_cookie_trace where)
  259. {
  260. trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
  261. where);
  262. }
  263. /*
  264. * fscache-main.c
  265. */
  266. extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
  267. #ifdef CONFIG_FSCACHE
  268. int __init fscache_init(void);
  269. void __exit fscache_exit(void);
  270. #else
  271. static inline int fscache_init(void) { return 0; }
  272. static inline void fscache_exit(void) {}
  273. #endif
  274. /*
  275. * fscache-proc.c
  276. */
  277. #ifdef CONFIG_PROC_FS
  278. extern int __init fscache_proc_init(void);
  279. extern void fscache_proc_cleanup(void);
  280. #else
  281. #define fscache_proc_init() (0)
  282. #define fscache_proc_cleanup() do {} while (0)
  283. #endif
  284. /*
  285. * fscache-stats.c
  286. */
  287. #ifdef CONFIG_FSCACHE_STATS
  288. extern atomic_t fscache_n_volumes;
  289. extern atomic_t fscache_n_volumes_collision;
  290. extern atomic_t fscache_n_volumes_nomem;
  291. extern atomic_t fscache_n_cookies;
  292. extern atomic_t fscache_n_cookies_lru;
  293. extern atomic_t fscache_n_cookies_lru_expired;
  294. extern atomic_t fscache_n_cookies_lru_removed;
  295. extern atomic_t fscache_n_cookies_lru_dropped;
  296. extern atomic_t fscache_n_acquires;
  297. extern atomic_t fscache_n_acquires_ok;
  298. extern atomic_t fscache_n_acquires_oom;
  299. extern atomic_t fscache_n_invalidates;
  300. extern atomic_t fscache_n_relinquishes;
  301. extern atomic_t fscache_n_relinquishes_retire;
  302. extern atomic_t fscache_n_relinquishes_dropped;
  303. extern atomic_t fscache_n_resizes;
  304. extern atomic_t fscache_n_resizes_null;
  305. static inline void fscache_stat(atomic_t *stat)
  306. {
  307. atomic_inc(stat);
  308. }
  309. static inline void fscache_stat_d(atomic_t *stat)
  310. {
  311. atomic_dec(stat);
  312. }
  313. #define __fscache_stat(stat) (stat)
  314. int fscache_stats_show(struct seq_file *m);
  315. #else
  316. #define __fscache_stat(stat) (NULL)
  317. #define fscache_stat(stat) do {} while (0)
  318. #define fscache_stat_d(stat) do {} while (0)
  319. static inline int fscache_stats_show(struct seq_file *m) { return 0; }
  320. #endif
  321. /*
  322. * fscache-volume.c
  323. */
  324. #ifdef CONFIG_PROC_FS
  325. extern const struct seq_operations fscache_volumes_seq_ops;
  326. #endif
  327. struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
  328. enum fscache_volume_trace where);
  329. bool fscache_begin_volume_access(struct fscache_volume *volume,
  330. struct fscache_cookie *cookie,
  331. enum fscache_access_trace why);
  332. void fscache_create_volume(struct fscache_volume *volume, bool wait);
  333. /*****************************************************************************/
  334. /*
  335. * debug tracing
  336. */
  337. #define dbgprintk(FMT, ...) \
  338. printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
  339. #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
  340. #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
  341. #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
  342. #ifdef __KDEBUG
  343. #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
  344. #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
  345. #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
  346. #elif defined(CONFIG_NETFS_DEBUG)
  347. #define _enter(FMT, ...) \
  348. do { \
  349. if (netfs_debug) \
  350. kenter(FMT, ##__VA_ARGS__); \
  351. } while (0)
  352. #define _leave(FMT, ...) \
  353. do { \
  354. if (netfs_debug) \
  355. kleave(FMT, ##__VA_ARGS__); \
  356. } while (0)
  357. #define _debug(FMT, ...) \
  358. do { \
  359. if (netfs_debug) \
  360. kdebug(FMT, ##__VA_ARGS__); \
  361. } while (0)
  362. #else
  363. #define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
  364. #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
  365. #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
  366. #endif
  367. /*
  368. * assertions
  369. */
  370. #if 1 /* defined(__KDEBUGALL) */
  371. #define ASSERT(X) \
  372. do { \
  373. if (unlikely(!(X))) { \
  374. pr_err("\n"); \
  375. pr_err("Assertion failed\n"); \
  376. BUG(); \
  377. } \
  378. } while (0)
  379. #define ASSERTCMP(X, OP, Y) \
  380. do { \
  381. if (unlikely(!((X) OP (Y)))) { \
  382. pr_err("\n"); \
  383. pr_err("Assertion failed\n"); \
  384. pr_err("%lx " #OP " %lx is false\n", \
  385. (unsigned long)(X), (unsigned long)(Y)); \
  386. BUG(); \
  387. } \
  388. } while (0)
  389. #define ASSERTIF(C, X) \
  390. do { \
  391. if (unlikely((C) && !(X))) { \
  392. pr_err("\n"); \
  393. pr_err("Assertion failed\n"); \
  394. BUG(); \
  395. } \
  396. } while (0)
  397. #define ASSERTIFCMP(C, X, OP, Y) \
  398. do { \
  399. if (unlikely((C) && !((X) OP (Y)))) { \
  400. pr_err("\n"); \
  401. pr_err("Assertion failed\n"); \
  402. pr_err("%lx " #OP " %lx is false\n", \
  403. (unsigned long)(X), (unsigned long)(Y)); \
  404. BUG(); \
  405. } \
  406. } while (0)
  407. #else
  408. #define ASSERT(X) do {} while (0)
  409. #define ASSERTCMP(X, OP, Y) do {} while (0)
  410. #define ASSERTIF(C, X) do {} while (0)
  411. #define ASSERTIFCMP(C, X, OP, Y) do {} while (0)
  412. #endif /* assert or not */