timeout.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/errno.h>
  4. #include <linux/file.h>
  5. #include <linux/io_uring.h>
  6. #include <trace/events/io_uring.h>
  7. #include <uapi/linux/io_uring.h>
  8. #include "io_uring.h"
  9. #include "refs.h"
  10. #include "cancel.h"
  11. #include "timeout.h"
  12. struct io_timeout {
  13. struct file *file;
  14. u32 off;
  15. u32 target_seq;
  16. u32 repeats;
  17. struct list_head list;
  18. /* head of the link, used by linked timeouts only */
  19. struct io_kiocb *head;
  20. /* for linked completions */
  21. struct io_kiocb *prev;
  22. };
  23. struct io_timeout_rem {
  24. struct file *file;
  25. u64 addr;
  26. /* timeout update */
  27. struct timespec64 ts;
  28. u32 flags;
  29. bool ltimeout;
  30. };
  31. static inline bool io_is_timeout_noseq(struct io_kiocb *req)
  32. {
  33. struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
  34. struct io_timeout_data *data = req->async_data;
  35. return !timeout->off || data->flags & IORING_TIMEOUT_MULTISHOT;
  36. }
  37. static inline void io_put_req(struct io_kiocb *req)
  38. {
  39. if (req_ref_put_and_test(req)) {
  40. io_queue_next(req);
  41. io_free_req(req);
  42. }
  43. }
  44. static inline bool io_timeout_finish(struct io_timeout *timeout,
  45. struct io_timeout_data *data)
  46. {
  47. if (!(data->flags & IORING_TIMEOUT_MULTISHOT))
  48. return true;
  49. if (!timeout->off || (timeout->repeats && --timeout->repeats))
  50. return false;
  51. return true;
  52. }
  53. static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer);
  54. static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
  55. {
  56. struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
  57. struct io_timeout_data *data = req->async_data;
  58. struct io_ring_ctx *ctx = req->ctx;
  59. if (!io_timeout_finish(timeout, data)) {
  60. if (io_req_post_cqe(req, -ETIME, IORING_CQE_F_MORE)) {
  61. /* re-arm timer */
  62. spin_lock_irq(&ctx->timeout_lock);
  63. list_add(&timeout->list, ctx->timeout_list.prev);
  64. data->timer.function = io_timeout_fn;
  65. hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
  66. spin_unlock_irq(&ctx->timeout_lock);
  67. return;
  68. }
  69. }
  70. io_req_task_complete(req, ts);
  71. }
  72. static bool io_kill_timeout(struct io_kiocb *req, int status)
  73. __must_hold(&req->ctx->timeout_lock)
  74. {
  75. struct io_timeout_data *io = req->async_data;
  76. if (hrtimer_try_to_cancel(&io->timer) != -1) {
  77. struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
  78. if (status)
  79. req_set_fail(req);
  80. atomic_set(&req->ctx->cq_timeouts,
  81. atomic_read(&req->ctx->cq_timeouts) + 1);
  82. list_del_init(&timeout->list);
  83. io_req_queue_tw_complete(req, status);
  84. return true;
  85. }
  86. return false;
  87. }
  88. __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
  89. {
  90. u32 seq;
  91. struct io_timeout *timeout, *tmp;
  92. spin_lock_irq(&ctx->timeout_lock);
  93. seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
  94. list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
  95. struct io_kiocb *req = cmd_to_io_kiocb(timeout);
  96. u32 events_needed, events_got;
  97. if (io_is_timeout_noseq(req))
  98. break;
  99. /*
  100. * Since seq can easily wrap around over time, subtract
  101. * the last seq at which timeouts were flushed before comparing.
  102. * Assuming not more than 2^31-1 events have happened since,
  103. * these subtractions won't have wrapped, so we can check if
  104. * target is in [last_seq, current_seq] by comparing the two.
  105. */
  106. events_needed = timeout->target_seq - ctx->cq_last_tm_flush;
  107. events_got = seq - ctx->cq_last_tm_flush;
  108. if (events_got < events_needed)
  109. break;
  110. io_kill_timeout(req, 0);
  111. }
  112. ctx->cq_last_tm_flush = seq;
  113. spin_unlock_irq(&ctx->timeout_lock);
  114. }
  115. static void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts)
  116. {
  117. io_tw_lock(link->ctx, ts);
  118. while (link) {
  119. struct io_kiocb *nxt = link->link;
  120. long res = -ECANCELED;
  121. if (link->flags & REQ_F_FAIL)
  122. res = link->cqe.res;
  123. link->link = NULL;
  124. io_req_set_res(link, res, 0);
  125. io_req_task_complete(link, ts);
  126. link = nxt;
  127. }
  128. }
  129. static void io_fail_links(struct io_kiocb *req)
  130. __must_hold(&req->ctx->completion_lock)
  131. {
  132. struct io_kiocb *link = req->link;
  133. bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
  134. if (!link)
  135. return;
  136. while (link) {
  137. if (ignore_cqes)
  138. link->flags |= REQ_F_CQE_SKIP;
  139. else
  140. link->flags &= ~REQ_F_CQE_SKIP;
  141. trace_io_uring_fail_link(req, link);
  142. link = link->link;
  143. }
  144. link = req->link;
  145. link->io_task_work.func = io_req_tw_fail_links;
  146. io_req_task_work_add(link);
  147. req->link = NULL;
  148. }
  149. static inline void io_remove_next_linked(struct io_kiocb *req)
  150. {
  151. struct io_kiocb *nxt = req->link;
  152. req->link = nxt->link;
  153. nxt->link = NULL;
  154. }
  155. void io_disarm_next(struct io_kiocb *req)
  156. __must_hold(&req->ctx->completion_lock)
  157. {
  158. struct io_kiocb *link = NULL;
  159. if (req->flags & REQ_F_ARM_LTIMEOUT) {
  160. link = req->link;
  161. req->flags &= ~REQ_F_ARM_LTIMEOUT;
  162. if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
  163. io_remove_next_linked(req);
  164. io_req_queue_tw_complete(link, -ECANCELED);
  165. }
  166. } else if (req->flags & REQ_F_LINK_TIMEOUT) {
  167. struct io_ring_ctx *ctx = req->ctx;
  168. spin_lock_irq(&ctx->timeout_lock);
  169. link = io_disarm_linked_timeout(req);
  170. spin_unlock_irq(&ctx->timeout_lock);
  171. if (link)
  172. io_req_queue_tw_complete(link, -ECANCELED);
  173. }
  174. if (unlikely((req->flags & REQ_F_FAIL) &&
  175. !(req->flags & REQ_F_HARDLINK)))
  176. io_fail_links(req);
  177. }
  178. struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
  179. struct io_kiocb *link)
  180. __must_hold(&req->ctx->completion_lock)
  181. __must_hold(&req->ctx->timeout_lock)
  182. {
  183. struct io_timeout_data *io = link->async_data;
  184. struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout);
  185. io_remove_next_linked(req);
  186. timeout->head = NULL;
  187. if (hrtimer_try_to_cancel(&io->timer) != -1) {
  188. list_del(&timeout->list);
  189. return link;
  190. }
  191. return NULL;
  192. }
  193. static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
  194. {
  195. struct io_timeout_data *data = container_of(timer,
  196. struct io_timeout_data, timer);
  197. struct io_kiocb *req = data->req;
  198. struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
  199. struct io_ring_ctx *ctx = req->ctx;
  200. unsigned long flags;
  201. spin_lock_irqsave(&ctx->timeout_lock, flags);
  202. list_del_init(&timeout->list);
  203. atomic_set(&req->ctx->cq_timeouts,
  204. atomic_read(&req->ctx->cq_timeouts) + 1);
  205. spin_unlock_irqrestore(&ctx->timeout_lock, flags);
  206. if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
  207. req_set_fail(req);
  208. io_req_set_res(req, -ETIME, 0);
  209. req->io_task_work.func = io_timeout_complete;
  210. io_req_task_work_add(req);
  211. return HRTIMER_NORESTART;
  212. }
  213. static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
  214. struct io_cancel_data *cd)
  215. __must_hold(&ctx->timeout_lock)
  216. {
  217. struct io_timeout *timeout;
  218. struct io_timeout_data *io;
  219. struct io_kiocb *req = NULL;
  220. list_for_each_entry(timeout, &ctx->timeout_list, list) {
  221. struct io_kiocb *tmp = cmd_to_io_kiocb(timeout);
  222. if (io_cancel_req_match(tmp, cd)) {
  223. req = tmp;
  224. break;
  225. }
  226. }
  227. if (!req)
  228. return ERR_PTR(-ENOENT);
  229. io = req->async_data;
  230. if (hrtimer_try_to_cancel(&io->timer) == -1)
  231. return ERR_PTR(-EALREADY);
  232. timeout = io_kiocb_to_cmd(req, struct io_timeout);
  233. list_del_init(&timeout->list);
  234. return req;
  235. }
  236. int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
  237. __must_hold(&ctx->completion_lock)
  238. {
  239. struct io_kiocb *req;
  240. spin_lock_irq(&ctx->timeout_lock);
  241. req = io_timeout_extract(ctx, cd);
  242. spin_unlock_irq(&ctx->timeout_lock);
  243. if (IS_ERR(req))
  244. return PTR_ERR(req);
  245. io_req_task_queue_fail(req, -ECANCELED);
  246. return 0;
  247. }
  248. static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *ts)
  249. {
  250. struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
  251. struct io_kiocb *prev = timeout->prev;
  252. int ret = -ENOENT;
  253. if (prev) {
  254. if (!(req->task->flags & PF_EXITING)) {
  255. struct io_cancel_data cd = {
  256. .ctx = req->ctx,
  257. .data = prev->cqe.user_data,
  258. };
  259. ret = io_try_cancel(req->task->io_uring, &cd, 0);
  260. }
  261. io_req_set_res(req, ret ?: -ETIME, 0);
  262. io_req_task_complete(req, ts);
  263. io_put_req(prev);
  264. } else {
  265. io_req_set_res(req, -ETIME, 0);
  266. io_req_task_complete(req, ts);
  267. }
  268. }
  269. static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
  270. {
  271. struct io_timeout_data *data = container_of(timer,
  272. struct io_timeout_data, timer);
  273. struct io_kiocb *prev, *req = data->req;
  274. struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
  275. struct io_ring_ctx *ctx = req->ctx;
  276. unsigned long flags;
  277. spin_lock_irqsave(&ctx->timeout_lock, flags);
  278. prev = timeout->head;
  279. timeout->head = NULL;
  280. /*
  281. * We don't expect the list to be empty, that will only happen if we
  282. * race with the completion of the linked work.
  283. */
  284. if (prev) {
  285. io_remove_next_linked(prev);
  286. if (!req_ref_inc_not_zero(prev))
  287. prev = NULL;
  288. }
  289. list_del(&timeout->list);
  290. timeout->prev = prev;
  291. spin_unlock_irqrestore(&ctx->timeout_lock, flags);
  292. req->io_task_work.func = io_req_task_link_timeout;
  293. io_req_task_work_add(req);
  294. return HRTIMER_NORESTART;
  295. }
  296. static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
  297. {
  298. switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
  299. case IORING_TIMEOUT_BOOTTIME:
  300. return CLOCK_BOOTTIME;
  301. case IORING_TIMEOUT_REALTIME:
  302. return CLOCK_REALTIME;
  303. default:
  304. /* can't happen, vetted at prep time */
  305. WARN_ON_ONCE(1);
  306. fallthrough;
  307. case 0:
  308. return CLOCK_MONOTONIC;
  309. }
  310. }
  311. static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
  312. struct timespec64 *ts, enum hrtimer_mode mode)
  313. __must_hold(&ctx->timeout_lock)
  314. {
  315. struct io_timeout_data *io;
  316. struct io_timeout *timeout;
  317. struct io_kiocb *req = NULL;
  318. list_for_each_entry(timeout, &ctx->ltimeout_list, list) {
  319. struct io_kiocb *tmp = cmd_to_io_kiocb(timeout);
  320. if (user_data == tmp->cqe.user_data) {
  321. req = tmp;
  322. break;
  323. }
  324. }
  325. if (!req)
  326. return -ENOENT;
  327. io = req->async_data;
  328. if (hrtimer_try_to_cancel(&io->timer) == -1)
  329. return -EALREADY;
  330. hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
  331. io->timer.function = io_link_timeout_fn;
  332. hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
  333. return 0;
  334. }
  335. static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
  336. struct timespec64 *ts, enum hrtimer_mode mode)
  337. __must_hold(&ctx->timeout_lock)
  338. {
  339. struct io_cancel_data cd = { .ctx = ctx, .data = user_data, };
  340. struct io_kiocb *req = io_timeout_extract(ctx, &cd);
  341. struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
  342. struct io_timeout_data *data;
  343. if (IS_ERR(req))
  344. return PTR_ERR(req);
  345. timeout->off = 0; /* noseq */
  346. data = req->async_data;
  347. data->ts = *ts;
  348. list_add_tail(&timeout->list, &ctx->timeout_list);
  349. hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
  350. data->timer.function = io_timeout_fn;
  351. hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), mode);
  352. return 0;
  353. }
  354. int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  355. {
  356. struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
  357. if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
  358. return -EINVAL;
  359. if (sqe->buf_index || sqe->len || sqe->splice_fd_in)
  360. return -EINVAL;
  361. tr->ltimeout = false;
  362. tr->addr = READ_ONCE(sqe->addr);
  363. tr->flags = READ_ONCE(sqe->timeout_flags);
  364. if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
  365. if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
  366. return -EINVAL;
  367. if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
  368. tr->ltimeout = true;
  369. if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
  370. return -EINVAL;
  371. if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
  372. return -EFAULT;
  373. if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0)
  374. return -EINVAL;
  375. } else if (tr->flags) {
  376. /* timeout removal doesn't support flags */
  377. return -EINVAL;
  378. }
  379. return 0;
  380. }
  381. static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
  382. {
  383. return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
  384. : HRTIMER_MODE_REL;
  385. }
  386. /*
  387. * Remove or update an existing timeout command
  388. */
  389. int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
  390. {
  391. struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
  392. struct io_ring_ctx *ctx = req->ctx;
  393. int ret;
  394. if (!(tr->flags & IORING_TIMEOUT_UPDATE)) {
  395. struct io_cancel_data cd = { .ctx = ctx, .data = tr->addr, };
  396. spin_lock(&ctx->completion_lock);
  397. ret = io_timeout_cancel(ctx, &cd);
  398. spin_unlock(&ctx->completion_lock);
  399. } else {
  400. enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
  401. spin_lock_irq(&ctx->timeout_lock);
  402. if (tr->ltimeout)
  403. ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
  404. else
  405. ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
  406. spin_unlock_irq(&ctx->timeout_lock);
  407. }
  408. if (ret < 0)
  409. req_set_fail(req);
  410. io_req_set_res(req, ret, 0);
  411. return IOU_OK;
  412. }
  413. static int __io_timeout_prep(struct io_kiocb *req,
  414. const struct io_uring_sqe *sqe,
  415. bool is_timeout_link)
  416. {
  417. struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
  418. struct io_timeout_data *data;
  419. unsigned flags;
  420. u32 off = READ_ONCE(sqe->off);
  421. if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in)
  422. return -EINVAL;
  423. if (off && is_timeout_link)
  424. return -EINVAL;
  425. flags = READ_ONCE(sqe->timeout_flags);
  426. if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
  427. IORING_TIMEOUT_ETIME_SUCCESS |
  428. IORING_TIMEOUT_MULTISHOT))
  429. return -EINVAL;
  430. /* more than one clock specified is invalid, obviously */
  431. if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
  432. return -EINVAL;
  433. /* multishot requests only make sense with rel values */
  434. if (!(~flags & (IORING_TIMEOUT_MULTISHOT | IORING_TIMEOUT_ABS)))
  435. return -EINVAL;
  436. INIT_LIST_HEAD(&timeout->list);
  437. timeout->off = off;
  438. if (unlikely(off && !req->ctx->off_timeout_used))
  439. req->ctx->off_timeout_used = true;
  440. /*
  441. * for multishot reqs w/ fixed nr of repeats, repeats tracks the
  442. * remaining nr
  443. */
  444. timeout->repeats = 0;
  445. if ((flags & IORING_TIMEOUT_MULTISHOT) && off > 0)
  446. timeout->repeats = off;
  447. if (WARN_ON_ONCE(req_has_async_data(req)))
  448. return -EFAULT;
  449. if (io_alloc_async_data(req))
  450. return -ENOMEM;
  451. data = req->async_data;
  452. data->req = req;
  453. data->flags = flags;
  454. if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
  455. return -EFAULT;
  456. if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
  457. return -EINVAL;
  458. data->mode = io_translate_timeout_mode(flags);
  459. hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
  460. if (is_timeout_link) {
  461. struct io_submit_link *link = &req->ctx->submit_state.link;
  462. if (!link->head)
  463. return -EINVAL;
  464. if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
  465. return -EINVAL;
  466. timeout->head = link->last;
  467. link->last->flags |= REQ_F_ARM_LTIMEOUT;
  468. }
  469. return 0;
  470. }
  471. int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  472. {
  473. return __io_timeout_prep(req, sqe, false);
  474. }
  475. int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  476. {
  477. return __io_timeout_prep(req, sqe, true);
  478. }
  479. int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
  480. {
  481. struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
  482. struct io_ring_ctx *ctx = req->ctx;
  483. struct io_timeout_data *data = req->async_data;
  484. struct list_head *entry;
  485. u32 tail, off = timeout->off;
  486. spin_lock_irq(&ctx->timeout_lock);
  487. /*
  488. * sqe->off holds how many events that need to occur for this
  489. * timeout event to be satisfied. If it isn't set, then this is
  490. * a pure timeout request, sequence isn't used.
  491. */
  492. if (io_is_timeout_noseq(req)) {
  493. entry = ctx->timeout_list.prev;
  494. goto add;
  495. }
  496. tail = data_race(ctx->cached_cq_tail) - atomic_read(&ctx->cq_timeouts);
  497. timeout->target_seq = tail + off;
  498. /* Update the last seq here in case io_flush_timeouts() hasn't.
  499. * This is safe because ->completion_lock is held, and submissions
  500. * and completions are never mixed in the same ->completion_lock section.
  501. */
  502. ctx->cq_last_tm_flush = tail;
  503. /*
  504. * Insertion sort, ensuring the first entry in the list is always
  505. * the one we need first.
  506. */
  507. list_for_each_prev(entry, &ctx->timeout_list) {
  508. struct io_timeout *nextt = list_entry(entry, struct io_timeout, list);
  509. struct io_kiocb *nxt = cmd_to_io_kiocb(nextt);
  510. if (io_is_timeout_noseq(nxt))
  511. continue;
  512. /* nxt.seq is behind @tail, otherwise would've been completed */
  513. if (off >= nextt->target_seq - tail)
  514. break;
  515. }
  516. add:
  517. list_add(&timeout->list, entry);
  518. data->timer.function = io_timeout_fn;
  519. hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
  520. spin_unlock_irq(&ctx->timeout_lock);
  521. return IOU_ISSUE_SKIP_COMPLETE;
  522. }
  523. void io_queue_linked_timeout(struct io_kiocb *req)
  524. {
  525. struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
  526. struct io_ring_ctx *ctx = req->ctx;
  527. spin_lock_irq(&ctx->timeout_lock);
  528. /*
  529. * If the back reference is NULL, then our linked request finished
  530. * before we got a chance to setup the timer
  531. */
  532. if (timeout->head) {
  533. struct io_timeout_data *data = req->async_data;
  534. data->timer.function = io_link_timeout_fn;
  535. hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
  536. data->mode);
  537. list_add_tail(&timeout->list, &ctx->ltimeout_list);
  538. }
  539. spin_unlock_irq(&ctx->timeout_lock);
  540. /* drop submission reference */
  541. io_put_req(req);
  542. }
  543. static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
  544. bool cancel_all)
  545. __must_hold(&head->ctx->timeout_lock)
  546. {
  547. struct io_kiocb *req;
  548. if (task && head->task != task)
  549. return false;
  550. if (cancel_all)
  551. return true;
  552. io_for_each_link(req, head) {
  553. if (req->flags & REQ_F_INFLIGHT)
  554. return true;
  555. }
  556. return false;
  557. }
  558. /* Returns true if we found and killed one or more timeouts */
  559. __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
  560. bool cancel_all)
  561. {
  562. struct io_timeout *timeout, *tmp;
  563. int canceled = 0;
  564. /*
  565. * completion_lock is needed for io_match_task(). Take it before
  566. * timeout_lockfirst to keep locking ordering.
  567. */
  568. spin_lock(&ctx->completion_lock);
  569. spin_lock_irq(&ctx->timeout_lock);
  570. list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
  571. struct io_kiocb *req = cmd_to_io_kiocb(timeout);
  572. if (io_match_task(req, tsk, cancel_all) &&
  573. io_kill_timeout(req, -ECANCELED))
  574. canceled++;
  575. }
  576. spin_unlock_irq(&ctx->timeout_lock);
  577. spin_unlock(&ctx->completion_lock);
  578. return canceled != 0;
  579. }