sync.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/errno.h>
  4. #include <linux/fs.h>
  5. #include <linux/file.h>
  6. #include <linux/mm.h>
  7. #include <linux/slab.h>
  8. #include <linux/namei.h>
  9. #include <linux/io_uring.h>
  10. #include <linux/fsnotify.h>
  11. #include <uapi/linux/io_uring.h>
  12. #include "io_uring.h"
  13. #include "sync.h"
  14. struct io_sync {
  15. struct file *file;
  16. loff_t len;
  17. loff_t off;
  18. int flags;
  19. int mode;
  20. };
  21. int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  22. {
  23. struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
  24. if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
  25. return -EINVAL;
  26. sync->off = READ_ONCE(sqe->off);
  27. sync->len = READ_ONCE(sqe->len);
  28. sync->flags = READ_ONCE(sqe->sync_range_flags);
  29. req->flags |= REQ_F_FORCE_ASYNC;
  30. return 0;
  31. }
  32. int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
  33. {
  34. struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
  35. int ret;
  36. /* sync_file_range always requires a blocking context */
  37. WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
  38. ret = sync_file_range(req->file, sync->off, sync->len, sync->flags);
  39. io_req_set_res(req, ret, 0);
  40. return IOU_OK;
  41. }
  42. int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  43. {
  44. struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
  45. if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
  46. return -EINVAL;
  47. sync->flags = READ_ONCE(sqe->fsync_flags);
  48. if (unlikely(sync->flags & ~IORING_FSYNC_DATASYNC))
  49. return -EINVAL;
  50. sync->off = READ_ONCE(sqe->off);
  51. sync->len = READ_ONCE(sqe->len);
  52. req->flags |= REQ_F_FORCE_ASYNC;
  53. return 0;
  54. }
  55. int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
  56. {
  57. struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
  58. loff_t end = sync->off + sync->len;
  59. int ret;
  60. /* fsync always requires a blocking context */
  61. WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
  62. ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX,
  63. sync->flags & IORING_FSYNC_DATASYNC);
  64. io_req_set_res(req, ret, 0);
  65. return IOU_OK;
  66. }
  67. int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  68. {
  69. struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
  70. if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
  71. return -EINVAL;
  72. sync->off = READ_ONCE(sqe->off);
  73. sync->len = READ_ONCE(sqe->addr);
  74. sync->mode = READ_ONCE(sqe->len);
  75. req->flags |= REQ_F_FORCE_ASYNC;
  76. return 0;
  77. }
  78. int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
  79. {
  80. struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
  81. int ret;
  82. /* fallocate always requiring blocking context */
  83. WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
  84. ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len);
  85. if (ret >= 0)
  86. fsnotify_modify(req->file);
  87. io_req_set_res(req, ret, 0);
  88. return IOU_OK;
  89. }