openclose.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/errno.h>
  4. #include <linux/fs.h>
  5. #include <linux/file.h>
  6. #include <linux/fdtable.h>
  7. #include <linux/fsnotify.h>
  8. #include <linux/namei.h>
  9. #include <linux/io_uring.h>
  10. #include <uapi/linux/io_uring.h>
  11. #include "../fs/internal.h"
  12. #include "io_uring.h"
  13. #include "rsrc.h"
  14. #include "openclose.h"
  15. struct io_open {
  16. struct file *file;
  17. int dfd;
  18. u32 file_slot;
  19. struct filename *filename;
  20. struct open_how how;
  21. unsigned long nofile;
  22. };
  23. struct io_close {
  24. struct file *file;
  25. int fd;
  26. u32 file_slot;
  27. };
  28. struct io_fixed_install {
  29. struct file *file;
  30. unsigned int o_flags;
  31. };
  32. static bool io_openat_force_async(struct io_open *open)
  33. {
  34. /*
  35. * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
  36. * it'll always -EAGAIN. Note that we test for __O_TMPFILE because
  37. * O_TMPFILE includes O_DIRECTORY, which isn't a flag we need to force
  38. * async for.
  39. */
  40. return open->how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE);
  41. }
  42. static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  43. {
  44. struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
  45. const char __user *fname;
  46. int ret;
  47. if (unlikely(sqe->buf_index))
  48. return -EINVAL;
  49. if (unlikely(req->flags & REQ_F_FIXED_FILE))
  50. return -EBADF;
  51. /* open.how should be already initialised */
  52. if (!(open->how.flags & O_PATH) && force_o_largefile())
  53. open->how.flags |= O_LARGEFILE;
  54. open->dfd = READ_ONCE(sqe->fd);
  55. fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
  56. open->filename = getname(fname);
  57. if (IS_ERR(open->filename)) {
  58. ret = PTR_ERR(open->filename);
  59. open->filename = NULL;
  60. return ret;
  61. }
  62. open->file_slot = READ_ONCE(sqe->file_index);
  63. if (open->file_slot && (open->how.flags & O_CLOEXEC))
  64. return -EINVAL;
  65. open->nofile = rlimit(RLIMIT_NOFILE);
  66. req->flags |= REQ_F_NEED_CLEANUP;
  67. if (io_openat_force_async(open))
  68. req->flags |= REQ_F_FORCE_ASYNC;
  69. return 0;
  70. }
  71. int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  72. {
  73. struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
  74. u64 mode = READ_ONCE(sqe->len);
  75. u64 flags = READ_ONCE(sqe->open_flags);
  76. open->how = build_open_how(flags, mode);
  77. return __io_openat_prep(req, sqe);
  78. }
  79. int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  80. {
  81. struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
  82. struct open_how __user *how;
  83. size_t len;
  84. int ret;
  85. how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
  86. len = READ_ONCE(sqe->len);
  87. if (len < OPEN_HOW_SIZE_VER0)
  88. return -EINVAL;
  89. ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len);
  90. if (ret)
  91. return ret;
  92. return __io_openat_prep(req, sqe);
  93. }
  94. int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
  95. {
  96. struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
  97. struct open_flags op;
  98. struct file *file;
  99. bool resolve_nonblock, nonblock_set;
  100. bool fixed = !!open->file_slot;
  101. int ret;
  102. ret = build_open_flags(&open->how, &op);
  103. if (ret)
  104. goto err;
  105. nonblock_set = op.open_flag & O_NONBLOCK;
  106. resolve_nonblock = open->how.resolve & RESOLVE_CACHED;
  107. if (issue_flags & IO_URING_F_NONBLOCK) {
  108. WARN_ON_ONCE(io_openat_force_async(open));
  109. op.lookup_flags |= LOOKUP_CACHED;
  110. op.open_flag |= O_NONBLOCK;
  111. }
  112. if (!fixed) {
  113. ret = __get_unused_fd_flags(open->how.flags, open->nofile);
  114. if (ret < 0)
  115. goto err;
  116. }
  117. file = do_filp_open(open->dfd, open->filename, &op);
  118. if (IS_ERR(file)) {
  119. /*
  120. * We could hang on to this 'fd' on retrying, but seems like
  121. * marginal gain for something that is now known to be a slower
  122. * path. So just put it, and we'll get a new one when we retry.
  123. */
  124. if (!fixed)
  125. put_unused_fd(ret);
  126. ret = PTR_ERR(file);
  127. /* only retry if RESOLVE_CACHED wasn't already set by application */
  128. if (ret == -EAGAIN &&
  129. (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
  130. return -EAGAIN;
  131. goto err;
  132. }
  133. if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
  134. file->f_flags &= ~O_NONBLOCK;
  135. if (!fixed)
  136. fd_install(ret, file);
  137. else
  138. ret = io_fixed_fd_install(req, issue_flags, file,
  139. open->file_slot);
  140. err:
  141. putname(open->filename);
  142. req->flags &= ~REQ_F_NEED_CLEANUP;
  143. if (ret < 0)
  144. req_set_fail(req);
  145. io_req_set_res(req, ret, 0);
  146. return IOU_OK;
  147. }
  148. int io_openat(struct io_kiocb *req, unsigned int issue_flags)
  149. {
  150. return io_openat2(req, issue_flags);
  151. }
  152. void io_open_cleanup(struct io_kiocb *req)
  153. {
  154. struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
  155. if (open->filename)
  156. putname(open->filename);
  157. }
  158. int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
  159. unsigned int offset)
  160. {
  161. int ret;
  162. io_ring_submit_lock(ctx, issue_flags);
  163. ret = io_fixed_fd_remove(ctx, offset);
  164. io_ring_submit_unlock(ctx, issue_flags);
  165. return ret;
  166. }
  167. static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
  168. {
  169. struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
  170. return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1);
  171. }
  172. int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  173. {
  174. struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
  175. if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
  176. return -EINVAL;
  177. if (req->flags & REQ_F_FIXED_FILE)
  178. return -EBADF;
  179. close->fd = READ_ONCE(sqe->fd);
  180. close->file_slot = READ_ONCE(sqe->file_index);
  181. if (close->file_slot && close->fd)
  182. return -EINVAL;
  183. return 0;
  184. }
  185. int io_close(struct io_kiocb *req, unsigned int issue_flags)
  186. {
  187. struct files_struct *files = current->files;
  188. struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
  189. struct file *file;
  190. int ret = -EBADF;
  191. if (close->file_slot) {
  192. ret = io_close_fixed(req, issue_flags);
  193. goto err;
  194. }
  195. spin_lock(&files->file_lock);
  196. file = files_lookup_fd_locked(files, close->fd);
  197. if (!file || io_is_uring_fops(file)) {
  198. spin_unlock(&files->file_lock);
  199. goto err;
  200. }
  201. /* if the file has a flush method, be safe and punt to async */
  202. if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
  203. spin_unlock(&files->file_lock);
  204. return -EAGAIN;
  205. }
  206. file = file_close_fd_locked(files, close->fd);
  207. spin_unlock(&files->file_lock);
  208. if (!file)
  209. goto err;
  210. /* No ->flush() or already async, safely close from here */
  211. ret = filp_close(file, current->files);
  212. err:
  213. if (ret < 0)
  214. req_set_fail(req);
  215. io_req_set_res(req, ret, 0);
  216. return IOU_OK;
  217. }
  218. int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  219. {
  220. struct io_fixed_install *ifi;
  221. unsigned int flags;
  222. if (sqe->off || sqe->addr || sqe->len || sqe->buf_index ||
  223. sqe->splice_fd_in || sqe->addr3)
  224. return -EINVAL;
  225. /* must be a fixed file */
  226. if (!(req->flags & REQ_F_FIXED_FILE))
  227. return -EBADF;
  228. flags = READ_ONCE(sqe->install_fd_flags);
  229. if (flags & ~IORING_FIXED_FD_NO_CLOEXEC)
  230. return -EINVAL;
  231. /* ensure the task's creds are used when installing/receiving fds */
  232. if (req->flags & REQ_F_CREDS)
  233. return -EPERM;
  234. /* default to O_CLOEXEC, disable if IORING_FIXED_FD_NO_CLOEXEC is set */
  235. ifi = io_kiocb_to_cmd(req, struct io_fixed_install);
  236. ifi->o_flags = O_CLOEXEC;
  237. if (flags & IORING_FIXED_FD_NO_CLOEXEC)
  238. ifi->o_flags = 0;
  239. return 0;
  240. }
  241. int io_install_fixed_fd(struct io_kiocb *req, unsigned int issue_flags)
  242. {
  243. struct io_fixed_install *ifi;
  244. int ret;
  245. ifi = io_kiocb_to_cmd(req, struct io_fixed_install);
  246. ret = receive_fd(req->file, NULL, ifi->o_flags);
  247. if (ret < 0)
  248. req_set_fail(req);
  249. io_req_set_res(req, ret, 0);
  250. return IOU_OK;
  251. }