uring_cmd.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/errno.h>
  4. #include <linux/file.h>
  5. #include <linux/io_uring/cmd.h>
  6. #include <linux/io_uring/net.h>
  7. #include <linux/security.h>
  8. #include <linux/nospec.h>
  9. #include <net/sock.h>
  10. #include <uapi/linux/io_uring.h>
  11. #include <asm/ioctls.h>
  12. #include "io_uring.h"
  13. #include "alloc_cache.h"
  14. #include "rsrc.h"
  15. #include "uring_cmd.h"
  16. static struct uring_cache *io_uring_async_get(struct io_kiocb *req)
  17. {
  18. struct io_ring_ctx *ctx = req->ctx;
  19. struct uring_cache *cache;
  20. cache = io_alloc_cache_get(&ctx->uring_cache);
  21. if (cache) {
  22. req->flags |= REQ_F_ASYNC_DATA;
  23. req->async_data = cache;
  24. return cache;
  25. }
  26. if (!io_alloc_async_data(req))
  27. return req->async_data;
  28. return NULL;
  29. }
  30. static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
  31. {
  32. struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
  33. struct uring_cache *cache = req->async_data;
  34. if (issue_flags & IO_URING_F_UNLOCKED)
  35. return;
  36. if (io_alloc_cache_put(&req->ctx->uring_cache, cache)) {
  37. ioucmd->sqe = NULL;
  38. req->async_data = NULL;
  39. req->flags &= ~REQ_F_ASYNC_DATA;
  40. }
  41. }
  42. bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
  43. struct task_struct *task, bool cancel_all)
  44. {
  45. struct hlist_node *tmp;
  46. struct io_kiocb *req;
  47. bool ret = false;
  48. lockdep_assert_held(&ctx->uring_lock);
  49. hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
  50. hash_node) {
  51. struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
  52. struct io_uring_cmd);
  53. struct file *file = req->file;
  54. if (!cancel_all && req->task != task)
  55. continue;
  56. if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
  57. file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
  58. IO_URING_F_COMPLETE_DEFER);
  59. ret = true;
  60. }
  61. }
  62. io_submit_flush_completions(ctx);
  63. return ret;
  64. }
  65. static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
  66. unsigned int issue_flags)
  67. {
  68. struct io_kiocb *req = cmd_to_io_kiocb(cmd);
  69. struct io_ring_ctx *ctx = req->ctx;
  70. if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
  71. return;
  72. cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
  73. io_ring_submit_lock(ctx, issue_flags);
  74. hlist_del(&req->hash_node);
  75. io_ring_submit_unlock(ctx, issue_flags);
  76. }
  77. /*
  78. * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
  79. * will try to cancel this issued command by sending ->uring_cmd() with
  80. * issue_flags of IO_URING_F_CANCEL.
  81. *
  82. * The command is guaranteed to not be done when calling ->uring_cmd()
  83. * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
  84. * with race between io_uring canceling and normal completion.
  85. */
  86. void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
  87. unsigned int issue_flags)
  88. {
  89. struct io_kiocb *req = cmd_to_io_kiocb(cmd);
  90. struct io_ring_ctx *ctx = req->ctx;
  91. if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
  92. cmd->flags |= IORING_URING_CMD_CANCELABLE;
  93. io_ring_submit_lock(ctx, issue_flags);
  94. hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
  95. io_ring_submit_unlock(ctx, issue_flags);
  96. }
  97. }
  98. EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
  99. static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
  100. {
  101. struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
  102. unsigned int flags = IO_URING_F_COMPLETE_DEFER;
  103. if (io_should_terminate_tw(req->ctx))
  104. flags |= IO_URING_F_TASK_DEAD;
  105. /* task_work executor checks the deffered list completion */
  106. ioucmd->task_work_cb(ioucmd, flags);
  107. }
  108. void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
  109. void (*task_work_cb)(struct io_uring_cmd *, unsigned),
  110. unsigned flags)
  111. {
  112. struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
  113. ioucmd->task_work_cb = task_work_cb;
  114. req->io_task_work.func = io_uring_cmd_work;
  115. __io_req_task_work_add(req, flags);
  116. }
  117. EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
  118. static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
  119. u64 extra1, u64 extra2)
  120. {
  121. req->big_cqe.extra1 = extra1;
  122. req->big_cqe.extra2 = extra2;
  123. }
  124. /*
  125. * Called by consumers of io_uring_cmd, if they originally returned
  126. * -EIOCBQUEUED upon receiving the command.
  127. */
  128. void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
  129. unsigned issue_flags)
  130. {
  131. struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
  132. io_uring_cmd_del_cancelable(ioucmd, issue_flags);
  133. if (ret < 0)
  134. req_set_fail(req);
  135. io_req_set_res(req, ret, 0);
  136. if (req->ctx->flags & IORING_SETUP_CQE32)
  137. io_req_set_cqe32_extra(req, res2, 0);
  138. io_req_uring_cleanup(req, issue_flags);
  139. if (req->ctx->flags & IORING_SETUP_IOPOLL) {
  140. /* order with io_iopoll_req_issued() checking ->iopoll_complete */
  141. smp_store_release(&req->iopoll_completed, 1);
  142. } else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
  143. if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
  144. return;
  145. io_req_complete_defer(req);
  146. } else {
  147. req->io_task_work.func = io_req_task_complete;
  148. io_req_task_work_add(req);
  149. }
  150. }
  151. EXPORT_SYMBOL_GPL(io_uring_cmd_done);
  152. static int io_uring_cmd_prep_setup(struct io_kiocb *req,
  153. const struct io_uring_sqe *sqe)
  154. {
  155. struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
  156. struct uring_cache *cache;
  157. cache = io_uring_async_get(req);
  158. if (unlikely(!cache))
  159. return -ENOMEM;
  160. if (!(req->flags & REQ_F_FORCE_ASYNC)) {
  161. /* defer memcpy until we need it */
  162. ioucmd->sqe = sqe;
  163. return 0;
  164. }
  165. memcpy(req->async_data, sqe, uring_sqe_size(req->ctx));
  166. ioucmd->sqe = req->async_data;
  167. return 0;
  168. }
  169. int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  170. {
  171. struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
  172. if (sqe->__pad1)
  173. return -EINVAL;
  174. ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
  175. if (ioucmd->flags & ~IORING_URING_CMD_MASK)
  176. return -EINVAL;
  177. if (ioucmd->flags & IORING_URING_CMD_FIXED) {
  178. struct io_ring_ctx *ctx = req->ctx;
  179. u16 index;
  180. req->buf_index = READ_ONCE(sqe->buf_index);
  181. if (unlikely(req->buf_index >= ctx->nr_user_bufs))
  182. return -EFAULT;
  183. index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
  184. req->imu = ctx->user_bufs[index];
  185. io_req_set_rsrc_node(req, ctx, 0);
  186. }
  187. ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
  188. return io_uring_cmd_prep_setup(req, sqe);
  189. }
  190. int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
  191. {
  192. struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
  193. struct io_ring_ctx *ctx = req->ctx;
  194. struct file *file = req->file;
  195. int ret;
  196. if (!file->f_op->uring_cmd)
  197. return -EOPNOTSUPP;
  198. ret = security_uring_cmd(ioucmd);
  199. if (ret)
  200. return ret;
  201. if (ctx->flags & IORING_SETUP_SQE128)
  202. issue_flags |= IO_URING_F_SQE128;
  203. if (ctx->flags & IORING_SETUP_CQE32)
  204. issue_flags |= IO_URING_F_CQE32;
  205. if (ctx->compat)
  206. issue_flags |= IO_URING_F_COMPAT;
  207. if (ctx->flags & IORING_SETUP_IOPOLL) {
  208. if (!file->f_op->uring_cmd_iopoll)
  209. return -EOPNOTSUPP;
  210. issue_flags |= IO_URING_F_IOPOLL;
  211. req->iopoll_completed = 0;
  212. }
  213. ret = file->f_op->uring_cmd(ioucmd, issue_flags);
  214. if (ret == -EAGAIN) {
  215. struct uring_cache *cache = req->async_data;
  216. if (ioucmd->sqe != (void *) cache)
  217. memcpy(cache, ioucmd->sqe, uring_sqe_size(req->ctx));
  218. return -EAGAIN;
  219. } else if (ret == -EIOCBQUEUED) {
  220. return -EIOCBQUEUED;
  221. }
  222. if (ret < 0)
  223. req_set_fail(req);
  224. io_req_uring_cleanup(req, issue_flags);
  225. io_req_set_res(req, ret, 0);
  226. return IOU_OK;
  227. }
  228. int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
  229. struct iov_iter *iter, void *ioucmd)
  230. {
  231. struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
  232. return io_import_fixed(rw, iter, req->imu, ubuf, len);
  233. }
  234. EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
  235. void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
  236. {
  237. struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
  238. io_req_queue_iowq(req);
  239. }
  240. static inline int io_uring_cmd_getsockopt(struct socket *sock,
  241. struct io_uring_cmd *cmd,
  242. unsigned int issue_flags)
  243. {
  244. bool compat = !!(issue_flags & IO_URING_F_COMPAT);
  245. int optlen, optname, level, err;
  246. void __user *optval;
  247. level = READ_ONCE(cmd->sqe->level);
  248. if (level != SOL_SOCKET)
  249. return -EOPNOTSUPP;
  250. optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
  251. optname = READ_ONCE(cmd->sqe->optname);
  252. optlen = READ_ONCE(cmd->sqe->optlen);
  253. err = do_sock_getsockopt(sock, compat, level, optname,
  254. USER_SOCKPTR(optval),
  255. KERNEL_SOCKPTR(&optlen));
  256. if (err)
  257. return err;
  258. /* On success, return optlen */
  259. return optlen;
  260. }
  261. static inline int io_uring_cmd_setsockopt(struct socket *sock,
  262. struct io_uring_cmd *cmd,
  263. unsigned int issue_flags)
  264. {
  265. bool compat = !!(issue_flags & IO_URING_F_COMPAT);
  266. int optname, optlen, level;
  267. void __user *optval;
  268. sockptr_t optval_s;
  269. optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
  270. optname = READ_ONCE(cmd->sqe->optname);
  271. optlen = READ_ONCE(cmd->sqe->optlen);
  272. level = READ_ONCE(cmd->sqe->level);
  273. optval_s = USER_SOCKPTR(optval);
  274. return do_sock_setsockopt(sock, compat, level, optname, optval_s,
  275. optlen);
  276. }
  277. #if defined(CONFIG_NET)
  278. int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
  279. {
  280. struct socket *sock = cmd->file->private_data;
  281. struct sock *sk = sock->sk;
  282. struct proto *prot = READ_ONCE(sk->sk_prot);
  283. int ret, arg = 0;
  284. if (!prot || !prot->ioctl)
  285. return -EOPNOTSUPP;
  286. switch (cmd->cmd_op) {
  287. case SOCKET_URING_OP_SIOCINQ:
  288. ret = prot->ioctl(sk, SIOCINQ, &arg);
  289. if (ret)
  290. return ret;
  291. return arg;
  292. case SOCKET_URING_OP_SIOCOUTQ:
  293. ret = prot->ioctl(sk, SIOCOUTQ, &arg);
  294. if (ret)
  295. return ret;
  296. return arg;
  297. case SOCKET_URING_OP_GETSOCKOPT:
  298. return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
  299. case SOCKET_URING_OP_SETSOCKOPT:
  300. return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
  301. default:
  302. return -EOPNOTSUPP;
  303. }
  304. }
  305. EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
  306. #endif