direct_write.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Unbuffered and direct write support.
  3. *
  4. * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #include <linux/export.h>
  8. #include <linux/uio.h>
  9. #include "internal.h"
  10. static void netfs_cleanup_dio_write(struct netfs_io_request *wreq)
  11. {
  12. struct inode *inode = wreq->inode;
  13. unsigned long long end = wreq->start + wreq->transferred;
  14. if (wreq->error || end <= i_size_read(inode))
  15. return;
  16. spin_lock(&inode->i_lock);
  17. if (end > i_size_read(inode)) {
  18. if (wreq->netfs_ops->update_i_size)
  19. wreq->netfs_ops->update_i_size(inode, end);
  20. else
  21. i_size_write(inode, end);
  22. }
  23. spin_unlock(&inode->i_lock);
  24. }
  25. /*
  26. * Perform an unbuffered write where we may have to do an RMW operation on an
  27. * encrypted file. This can also be used for direct I/O writes.
  28. */
  29. ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter,
  30. struct netfs_group *netfs_group)
  31. {
  32. struct netfs_io_request *wreq;
  33. unsigned long long start = iocb->ki_pos;
  34. unsigned long long end = start + iov_iter_count(iter);
  35. ssize_t ret, n;
  36. size_t len = iov_iter_count(iter);
  37. bool async = !is_sync_kiocb(iocb);
  38. _enter("");
  39. /* We're going to need a bounce buffer if what we transmit is going to
  40. * be different in some way to the source buffer, e.g. because it gets
  41. * encrypted/compressed or because it needs expanding to a block size.
  42. */
  43. // TODO
  44. _debug("uw %llx-%llx", start, end);
  45. wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
  46. iocb->ki_flags & IOCB_DIRECT ?
  47. NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
  48. if (IS_ERR(wreq))
  49. return PTR_ERR(wreq);
  50. wreq->io_streams[0].avail = true;
  51. trace_netfs_write(wreq, (iocb->ki_flags & IOCB_DIRECT ?
  52. netfs_write_trace_dio_write :
  53. netfs_write_trace_unbuffered_write));
  54. {
  55. /* If this is an async op and we're not using a bounce buffer,
  56. * we have to save the source buffer as the iterator is only
  57. * good until we return. In such a case, extract an iterator
  58. * to represent as much of the the output buffer as we can
  59. * manage. Note that the extraction might not be able to
  60. * allocate a sufficiently large bvec array and may shorten the
  61. * request.
  62. */
  63. if (user_backed_iter(iter)) {
  64. n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
  65. if (n < 0) {
  66. ret = n;
  67. goto out;
  68. }
  69. wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
  70. wreq->direct_bv_count = n;
  71. wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
  72. } else {
  73. /* If this is a kernel-generated async DIO request,
  74. * assume that any resources the iterator points to
  75. * (eg. a bio_vec array) will persist till the end of
  76. * the op.
  77. */
  78. wreq->iter = *iter;
  79. }
  80. wreq->io_iter = wreq->iter;
  81. }
  82. __set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
  83. /* Copy the data into the bounce buffer and encrypt it. */
  84. // TODO
  85. /* Dispatch the write. */
  86. __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
  87. if (async)
  88. wreq->iocb = iocb;
  89. wreq->len = iov_iter_count(&wreq->io_iter);
  90. wreq->cleanup = netfs_cleanup_dio_write;
  91. ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
  92. if (ret < 0) {
  93. _debug("begin = %zd", ret);
  94. goto out;
  95. }
  96. if (!async) {
  97. trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
  98. wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
  99. TASK_UNINTERRUPTIBLE);
  100. smp_rmb(); /* Read error/transferred after RIP flag */
  101. ret = wreq->error;
  102. if (ret == 0) {
  103. ret = wreq->transferred;
  104. iocb->ki_pos += ret;
  105. }
  106. } else {
  107. ret = -EIOCBQUEUED;
  108. }
  109. out:
  110. netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
  111. return ret;
  112. }
  113. EXPORT_SYMBOL(netfs_unbuffered_write_iter_locked);
  114. /**
  115. * netfs_unbuffered_write_iter - Unbuffered write to a file
  116. * @iocb: IO state structure
  117. * @from: iov_iter with data to write
  118. *
  119. * Do an unbuffered write to a file, writing the data directly to the server
  120. * and not lodging the data in the pagecache.
  121. *
  122. * Return:
  123. * * Negative error code if no data has been written at all of
  124. * vfs_fsync_range() failed for a synchronous write
  125. * * Number of bytes written, even for truncated writes
  126. */
  127. ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
  128. {
  129. struct file *file = iocb->ki_filp;
  130. struct address_space *mapping = file->f_mapping;
  131. struct inode *inode = mapping->host;
  132. struct netfs_inode *ictx = netfs_inode(inode);
  133. ssize_t ret;
  134. loff_t pos = iocb->ki_pos;
  135. unsigned long long end = pos + iov_iter_count(from) - 1;
  136. _enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
  137. if (!iov_iter_count(from))
  138. return 0;
  139. trace_netfs_write_iter(iocb, from);
  140. netfs_stat(&netfs_n_wh_dio_write);
  141. ret = netfs_start_io_direct(inode);
  142. if (ret < 0)
  143. return ret;
  144. ret = generic_write_checks(iocb, from);
  145. if (ret <= 0)
  146. goto out;
  147. ret = file_remove_privs(file);
  148. if (ret < 0)
  149. goto out;
  150. ret = file_update_time(file);
  151. if (ret < 0)
  152. goto out;
  153. if (iocb->ki_flags & IOCB_NOWAIT) {
  154. /* We could block if there are any pages in the range. */
  155. ret = -EAGAIN;
  156. if (filemap_range_has_page(mapping, pos, end))
  157. if (filemap_invalidate_inode(inode, true, pos, end))
  158. goto out;
  159. } else {
  160. ret = filemap_write_and_wait_range(mapping, pos, end);
  161. if (ret < 0)
  162. goto out;
  163. }
  164. /*
  165. * After a write we want buffered reads to be sure to go to disk to get
  166. * the new data. We invalidate clean cached page from the region we're
  167. * about to write. We do this *before* the write so that we can return
  168. * without clobbering -EIOCBQUEUED from ->direct_IO().
  169. */
  170. ret = filemap_invalidate_inode(inode, true, pos, end);
  171. if (ret < 0)
  172. goto out;
  173. end = iocb->ki_pos + iov_iter_count(from);
  174. if (end > ictx->zero_point)
  175. ictx->zero_point = end;
  176. fscache_invalidate(netfs_i_cookie(ictx), NULL, i_size_read(inode),
  177. FSCACHE_INVAL_DIO_WRITE);
  178. ret = netfs_unbuffered_write_iter_locked(iocb, from, NULL);
  179. out:
  180. netfs_end_io_direct(inode);
  181. return ret;
  182. }
  183. EXPORT_SYMBOL(netfs_unbuffered_write_iter);