cancel.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/errno.h>
  4. #include <linux/fs.h>
  5. #include <linux/file.h>
  6. #include <linux/mm.h>
  7. #include <linux/slab.h>
  8. #include <linux/namei.h>
  9. #include <linux/nospec.h>
  10. #include <linux/io_uring.h>
  11. #include <uapi/linux/io_uring.h>
  12. #include "io_uring.h"
  13. #include "tctx.h"
  14. #include "poll.h"
  15. #include "timeout.h"
  16. #include "waitid.h"
  17. #include "futex.h"
  18. #include "cancel.h"
  19. struct io_cancel {
  20. struct file *file;
  21. u64 addr;
  22. u32 flags;
  23. s32 fd;
  24. u8 opcode;
  25. };
  26. #define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
  27. IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED | \
  28. IORING_ASYNC_CANCEL_USERDATA | IORING_ASYNC_CANCEL_OP)
  29. /*
  30. * Returns true if the request matches the criteria outlined by 'cd'.
  31. */
  32. bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
  33. {
  34. bool match_user_data = cd->flags & IORING_ASYNC_CANCEL_USERDATA;
  35. if (req->ctx != cd->ctx)
  36. return false;
  37. if (!(cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP)))
  38. match_user_data = true;
  39. if (cd->flags & IORING_ASYNC_CANCEL_ANY)
  40. goto check_seq;
  41. if (cd->flags & IORING_ASYNC_CANCEL_FD) {
  42. if (req->file != cd->file)
  43. return false;
  44. }
  45. if (cd->flags & IORING_ASYNC_CANCEL_OP) {
  46. if (req->opcode != cd->opcode)
  47. return false;
  48. }
  49. if (match_user_data && req->cqe.user_data != cd->data)
  50. return false;
  51. if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
  52. check_seq:
  53. if (io_cancel_match_sequence(req, cd->seq))
  54. return false;
  55. }
  56. return true;
  57. }
  58. static bool io_cancel_cb(struct io_wq_work *work, void *data)
  59. {
  60. struct io_kiocb *req = container_of(work, struct io_kiocb, work);
  61. struct io_cancel_data *cd = data;
  62. return io_cancel_req_match(req, cd);
  63. }
  64. static int io_async_cancel_one(struct io_uring_task *tctx,
  65. struct io_cancel_data *cd)
  66. {
  67. enum io_wq_cancel cancel_ret;
  68. int ret = 0;
  69. bool all;
  70. if (!tctx || !tctx->io_wq)
  71. return -ENOENT;
  72. all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
  73. cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
  74. switch (cancel_ret) {
  75. case IO_WQ_CANCEL_OK:
  76. ret = 0;
  77. break;
  78. case IO_WQ_CANCEL_RUNNING:
  79. ret = -EALREADY;
  80. break;
  81. case IO_WQ_CANCEL_NOTFOUND:
  82. ret = -ENOENT;
  83. break;
  84. }
  85. return ret;
  86. }
  87. int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
  88. unsigned issue_flags)
  89. {
  90. struct io_ring_ctx *ctx = cd->ctx;
  91. int ret;
  92. WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
  93. ret = io_async_cancel_one(tctx, cd);
  94. /*
  95. * Fall-through even for -EALREADY, as we may have poll armed
  96. * that need unarming.
  97. */
  98. if (!ret)
  99. return 0;
  100. ret = io_poll_cancel(ctx, cd, issue_flags);
  101. if (ret != -ENOENT)
  102. return ret;
  103. ret = io_waitid_cancel(ctx, cd, issue_flags);
  104. if (ret != -ENOENT)
  105. return ret;
  106. ret = io_futex_cancel(ctx, cd, issue_flags);
  107. if (ret != -ENOENT)
  108. return ret;
  109. spin_lock(&ctx->completion_lock);
  110. if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
  111. ret = io_timeout_cancel(ctx, cd);
  112. spin_unlock(&ctx->completion_lock);
  113. return ret;
  114. }
  115. int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  116. {
  117. struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
  118. if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
  119. return -EINVAL;
  120. if (sqe->off || sqe->splice_fd_in)
  121. return -EINVAL;
  122. cancel->addr = READ_ONCE(sqe->addr);
  123. cancel->flags = READ_ONCE(sqe->cancel_flags);
  124. if (cancel->flags & ~CANCEL_FLAGS)
  125. return -EINVAL;
  126. if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
  127. if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
  128. return -EINVAL;
  129. cancel->fd = READ_ONCE(sqe->fd);
  130. }
  131. if (cancel->flags & IORING_ASYNC_CANCEL_OP) {
  132. if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
  133. return -EINVAL;
  134. cancel->opcode = READ_ONCE(sqe->len);
  135. }
  136. return 0;
  137. }
  138. static int __io_async_cancel(struct io_cancel_data *cd,
  139. struct io_uring_task *tctx,
  140. unsigned int issue_flags)
  141. {
  142. bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
  143. struct io_ring_ctx *ctx = cd->ctx;
  144. struct io_tctx_node *node;
  145. int ret, nr = 0;
  146. do {
  147. ret = io_try_cancel(tctx, cd, issue_flags);
  148. if (ret == -ENOENT)
  149. break;
  150. if (!all)
  151. return ret;
  152. nr++;
  153. } while (1);
  154. /* slow path, try all io-wq's */
  155. io_ring_submit_lock(ctx, issue_flags);
  156. ret = -ENOENT;
  157. list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
  158. ret = io_async_cancel_one(node->task->io_uring, cd);
  159. if (ret != -ENOENT) {
  160. if (!all)
  161. break;
  162. nr++;
  163. }
  164. }
  165. io_ring_submit_unlock(ctx, issue_flags);
  166. return all ? nr : ret;
  167. }
  168. int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
  169. {
  170. struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
  171. struct io_cancel_data cd = {
  172. .ctx = req->ctx,
  173. .data = cancel->addr,
  174. .flags = cancel->flags,
  175. .opcode = cancel->opcode,
  176. .seq = atomic_inc_return(&req->ctx->cancel_seq),
  177. };
  178. struct io_uring_task *tctx = req->task->io_uring;
  179. int ret;
  180. if (cd.flags & IORING_ASYNC_CANCEL_FD) {
  181. if (req->flags & REQ_F_FIXED_FILE ||
  182. cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
  183. req->flags |= REQ_F_FIXED_FILE;
  184. req->file = io_file_get_fixed(req, cancel->fd,
  185. issue_flags);
  186. } else {
  187. req->file = io_file_get_normal(req, cancel->fd);
  188. }
  189. if (!req->file) {
  190. ret = -EBADF;
  191. goto done;
  192. }
  193. cd.file = req->file;
  194. }
  195. ret = __io_async_cancel(&cd, tctx, issue_flags);
  196. done:
  197. if (ret < 0)
  198. req_set_fail(req);
  199. io_req_set_res(req, ret, 0);
  200. return IOU_OK;
  201. }
  202. void init_hash_table(struct io_hash_table *table, unsigned size)
  203. {
  204. unsigned int i;
  205. for (i = 0; i < size; i++) {
  206. spin_lock_init(&table->hbs[i].lock);
  207. INIT_HLIST_HEAD(&table->hbs[i].list);
  208. }
  209. }
  210. static int __io_sync_cancel(struct io_uring_task *tctx,
  211. struct io_cancel_data *cd, int fd)
  212. {
  213. struct io_ring_ctx *ctx = cd->ctx;
  214. /* fixed must be grabbed every time since we drop the uring_lock */
  215. if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
  216. (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
  217. if (unlikely(fd >= ctx->nr_user_files))
  218. return -EBADF;
  219. fd = array_index_nospec(fd, ctx->nr_user_files);
  220. cd->file = io_file_from_index(&ctx->file_table, fd);
  221. if (!cd->file)
  222. return -EBADF;
  223. }
  224. return __io_async_cancel(cd, tctx, 0);
  225. }
  226. int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
  227. __must_hold(&ctx->uring_lock)
  228. {
  229. struct io_cancel_data cd = {
  230. .ctx = ctx,
  231. .seq = atomic_inc_return(&ctx->cancel_seq),
  232. };
  233. ktime_t timeout = KTIME_MAX;
  234. struct io_uring_sync_cancel_reg sc;
  235. struct file *file = NULL;
  236. DEFINE_WAIT(wait);
  237. int ret, i;
  238. if (copy_from_user(&sc, arg, sizeof(sc)))
  239. return -EFAULT;
  240. if (sc.flags & ~CANCEL_FLAGS)
  241. return -EINVAL;
  242. for (i = 0; i < ARRAY_SIZE(sc.pad); i++)
  243. if (sc.pad[i])
  244. return -EINVAL;
  245. for (i = 0; i < ARRAY_SIZE(sc.pad2); i++)
  246. if (sc.pad2[i])
  247. return -EINVAL;
  248. cd.data = sc.addr;
  249. cd.flags = sc.flags;
  250. cd.opcode = sc.opcode;
  251. /* we can grab a normal file descriptor upfront */
  252. if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
  253. !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
  254. file = fget(sc.fd);
  255. if (!file)
  256. return -EBADF;
  257. cd.file = file;
  258. }
  259. ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
  260. /* found something, done! */
  261. if (ret != -EALREADY)
  262. goto out;
  263. if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
  264. struct timespec64 ts = {
  265. .tv_sec = sc.timeout.tv_sec,
  266. .tv_nsec = sc.timeout.tv_nsec
  267. };
  268. timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
  269. }
  270. /*
  271. * Keep looking until we get -ENOENT. we'll get woken everytime
  272. * every time a request completes and will retry the cancelation.
  273. */
  274. do {
  275. cd.seq = atomic_inc_return(&ctx->cancel_seq);
  276. prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
  277. ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
  278. mutex_unlock(&ctx->uring_lock);
  279. if (ret != -EALREADY)
  280. break;
  281. ret = io_run_task_work_sig(ctx);
  282. if (ret < 0)
  283. break;
  284. ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
  285. if (!ret) {
  286. ret = -ETIME;
  287. break;
  288. }
  289. mutex_lock(&ctx->uring_lock);
  290. } while (1);
  291. finish_wait(&ctx->cq_wait, &wait);
  292. mutex_lock(&ctx->uring_lock);
  293. if (ret == -ENOENT || ret > 0)
  294. ret = 0;
  295. out:
  296. if (file)
  297. fput(file);
  298. return ret;
  299. }