read_pgpriv2.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Read with PG_private_2 [DEPRECATED].
  3. *
  4. * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #include <linux/export.h>
  8. #include <linux/fs.h>
  9. #include <linux/mm.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/slab.h>
  12. #include <linux/task_io_accounting_ops.h>
  13. #include "internal.h"
  14. /*
  15. * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2. The
  16. * third mark in the folio queue is used to indicate that this folio needs
  17. * writing.
  18. */
  19. void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
  20. struct netfs_io_request *rreq,
  21. struct folio_queue *folioq,
  22. int slot)
  23. {
  24. struct folio *folio = folioq_folio(folioq, slot);
  25. trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
  26. folio_start_private_2(folio);
  27. folioq_mark3(folioq, slot);
  28. }
  29. /*
  30. * [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an
  31. * unrecoverable error.
  32. */
  33. static void netfs_pgpriv2_cancel(struct folio_queue *folioq)
  34. {
  35. struct folio *folio;
  36. int slot;
  37. while (folioq) {
  38. if (!folioq->marks3) {
  39. folioq = folioq->next;
  40. continue;
  41. }
  42. slot = __ffs(folioq->marks3);
  43. folio = folioq_folio(folioq, slot);
  44. trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
  45. folio_end_private_2(folio);
  46. folioq_unmark3(folioq, slot);
  47. }
  48. }
  49. /*
  50. * [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
  51. */
  52. static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio *folio)
  53. {
  54. struct netfs_io_stream *cache = &wreq->io_streams[1];
  55. size_t fsize = folio_size(folio), flen = fsize;
  56. loff_t fpos = folio_pos(folio), i_size;
  57. bool to_eof = false;
  58. _enter("");
  59. /* netfs_perform_write() may shift i_size around the page or from out
  60. * of the page to beyond it, but cannot move i_size into or through the
  61. * page since we have it locked.
  62. */
  63. i_size = i_size_read(wreq->inode);
  64. if (fpos >= i_size) {
  65. /* mmap beyond eof. */
  66. _debug("beyond eof");
  67. folio_end_private_2(folio);
  68. return 0;
  69. }
  70. if (fpos + fsize > wreq->i_size)
  71. wreq->i_size = i_size;
  72. if (flen > i_size - fpos) {
  73. flen = i_size - fpos;
  74. to_eof = true;
  75. } else if (flen == i_size - fpos) {
  76. to_eof = true;
  77. }
  78. _debug("folio %zx %zx", flen, fsize);
  79. trace_netfs_folio(folio, netfs_folio_trace_store_copy);
  80. /* Attach the folio to the rolling buffer. */
  81. if (netfs_buffer_append_folio(wreq, folio, false) < 0)
  82. return -ENOMEM;
  83. cache->submit_extendable_to = fsize;
  84. cache->submit_off = 0;
  85. cache->submit_len = flen;
  86. /* Attach the folio to one or more subrequests. For a big folio, we
  87. * could end up with thousands of subrequests if the wsize is small -
  88. * but we might need to wait during the creation of subrequests for
  89. * network resources (eg. SMB credits).
  90. */
  91. do {
  92. ssize_t part;
  93. wreq->io_iter.iov_offset = cache->submit_off;
  94. atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
  95. cache->submit_extendable_to = fsize - cache->submit_off;
  96. part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
  97. cache->submit_len, to_eof);
  98. cache->submit_off += part;
  99. if (part > cache->submit_len)
  100. cache->submit_len = 0;
  101. else
  102. cache->submit_len -= part;
  103. } while (cache->submit_len > 0);
  104. wreq->io_iter.iov_offset = 0;
  105. iov_iter_advance(&wreq->io_iter, fsize);
  106. atomic64_set(&wreq->issued_to, fpos + fsize);
  107. if (flen < fsize)
  108. netfs_issue_write(wreq, cache);
  109. _leave(" = 0");
  110. return 0;
  111. }
  112. /*
  113. * [DEPRECATED] Go through the buffer and write any folios that are marked with
  114. * the third mark to the cache.
  115. */
  116. void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
  117. {
  118. struct netfs_io_request *wreq;
  119. struct folio_queue *folioq;
  120. struct folio *folio;
  121. int error = 0;
  122. int slot = 0;
  123. _enter("");
  124. if (!fscache_resources_valid(&rreq->cache_resources))
  125. goto couldnt_start;
  126. /* Need the first folio to be able to set up the op. */
  127. for (folioq = rreq->buffer; folioq; folioq = folioq->next) {
  128. if (folioq->marks3) {
  129. slot = __ffs(folioq->marks3);
  130. break;
  131. }
  132. }
  133. if (!folioq)
  134. return;
  135. folio = folioq_folio(folioq, slot);
  136. wreq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
  137. NETFS_PGPRIV2_COPY_TO_CACHE);
  138. if (IS_ERR(wreq)) {
  139. kleave(" [create %ld]", PTR_ERR(wreq));
  140. goto couldnt_start;
  141. }
  142. trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
  143. netfs_stat(&netfs_n_wh_copy_to_cache);
  144. if (!wreq->io_streams[1].avail) {
  145. netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
  146. goto couldnt_start;
  147. }
  148. for (;;) {
  149. error = netfs_pgpriv2_copy_folio(wreq, folio);
  150. if (error < 0)
  151. break;
  152. folioq_unmark3(folioq, slot);
  153. while (!folioq->marks3) {
  154. folioq = folioq->next;
  155. if (!folioq)
  156. goto end_of_queue;
  157. }
  158. slot = __ffs(folioq->marks3);
  159. folio = folioq_folio(folioq, slot);
  160. }
  161. end_of_queue:
  162. netfs_issue_write(wreq, &wreq->io_streams[1]);
  163. smp_wmb(); /* Write lists before ALL_QUEUED. */
  164. set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
  165. netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
  166. _leave(" = %d", error);
  167. couldnt_start:
  168. netfs_pgpriv2_cancel(rreq->buffer);
  169. }
  170. /*
  171. * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
  172. * copying.
  173. */
  174. bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
  175. {
  176. struct folio_queue *folioq = wreq->buffer;
  177. unsigned long long collected_to = wreq->collected_to;
  178. unsigned int slot = wreq->buffer_head_slot;
  179. bool made_progress = false;
  180. if (slot >= folioq_nr_slots(folioq)) {
  181. folioq = netfs_delete_buffer_head(wreq);
  182. slot = 0;
  183. }
  184. for (;;) {
  185. struct folio *folio;
  186. unsigned long long fpos, fend;
  187. size_t fsize, flen;
  188. folio = folioq_folio(folioq, slot);
  189. if (WARN_ONCE(!folio_test_private_2(folio),
  190. "R=%08x: folio %lx is not marked private_2\n",
  191. wreq->debug_id, folio->index))
  192. trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
  193. fpos = folio_pos(folio);
  194. fsize = folio_size(folio);
  195. flen = fsize;
  196. fend = min_t(unsigned long long, fpos + flen, wreq->i_size);
  197. trace_netfs_collect_folio(wreq, folio, fend, collected_to);
  198. /* Unlock any folio we've transferred all of. */
  199. if (collected_to < fend)
  200. break;
  201. trace_netfs_folio(folio, netfs_folio_trace_end_copy);
  202. folio_end_private_2(folio);
  203. wreq->cleaned_to = fpos + fsize;
  204. made_progress = true;
  205. /* Clean up the head folioq. If we clear an entire folioq, then
  206. * we can get rid of it provided it's not also the tail folioq
  207. * being filled by the issuer.
  208. */
  209. folioq_clear(folioq, slot);
  210. slot++;
  211. if (slot >= folioq_nr_slots(folioq)) {
  212. if (READ_ONCE(wreq->buffer_tail) == folioq)
  213. break;
  214. folioq = netfs_delete_buffer_head(wreq);
  215. slot = 0;
  216. }
  217. if (fpos + fsize >= collected_to)
  218. break;
  219. }
  220. wreq->buffer = folioq;
  221. wreq->buffer_head_slot = slot;
  222. return made_progress;
  223. }