tctx.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/errno.h>
  4. #include <linux/file.h>
  5. #include <linux/mm.h>
  6. #include <linux/slab.h>
  7. #include <linux/nospec.h>
  8. #include <linux/io_uring.h>
  9. #include <uapi/linux/io_uring.h>
  10. #include "io_uring.h"
  11. #include "tctx.h"
  12. static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
  13. struct task_struct *task)
  14. {
  15. struct io_wq_hash *hash;
  16. struct io_wq_data data;
  17. unsigned int concurrency;
  18. mutex_lock(&ctx->uring_lock);
  19. hash = ctx->hash_map;
  20. if (!hash) {
  21. hash = kzalloc(sizeof(*hash), GFP_KERNEL);
  22. if (!hash) {
  23. mutex_unlock(&ctx->uring_lock);
  24. return ERR_PTR(-ENOMEM);
  25. }
  26. refcount_set(&hash->refs, 1);
  27. init_waitqueue_head(&hash->wait);
  28. ctx->hash_map = hash;
  29. }
  30. mutex_unlock(&ctx->uring_lock);
  31. data.hash = hash;
  32. data.task = task;
  33. data.free_work = io_wq_free_work;
  34. data.do_work = io_wq_submit_work;
  35. /* Do QD, or 4 * CPUS, whatever is smallest */
  36. concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
  37. return io_wq_create(concurrency, &data);
  38. }
  39. void __io_uring_free(struct task_struct *tsk)
  40. {
  41. struct io_uring_task *tctx = tsk->io_uring;
  42. struct io_tctx_node *node;
  43. unsigned long index;
  44. /*
  45. * Fault injection forcing allocation errors in the xa_store() path
  46. * can lead to xa_empty() returning false, even though no actual
  47. * node is stored in the xarray. Until that gets sorted out, attempt
  48. * an iteration here and warn if any entries are found.
  49. */
  50. xa_for_each(&tctx->xa, index, node) {
  51. WARN_ON_ONCE(1);
  52. break;
  53. }
  54. WARN_ON_ONCE(tctx->io_wq);
  55. WARN_ON_ONCE(tctx->cached_refs);
  56. percpu_counter_destroy(&tctx->inflight);
  57. kfree(tctx);
  58. tsk->io_uring = NULL;
  59. }
  60. __cold int io_uring_alloc_task_context(struct task_struct *task,
  61. struct io_ring_ctx *ctx)
  62. {
  63. struct io_uring_task *tctx;
  64. int ret;
  65. tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
  66. if (unlikely(!tctx))
  67. return -ENOMEM;
  68. ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
  69. if (unlikely(ret)) {
  70. kfree(tctx);
  71. return ret;
  72. }
  73. tctx->io_wq = io_init_wq_offload(ctx, task);
  74. if (IS_ERR(tctx->io_wq)) {
  75. ret = PTR_ERR(tctx->io_wq);
  76. percpu_counter_destroy(&tctx->inflight);
  77. kfree(tctx);
  78. return ret;
  79. }
  80. xa_init(&tctx->xa);
  81. init_waitqueue_head(&tctx->wait);
  82. atomic_set(&tctx->in_cancel, 0);
  83. atomic_set(&tctx->inflight_tracked, 0);
  84. task->io_uring = tctx;
  85. init_llist_head(&tctx->task_list);
  86. init_task_work(&tctx->task_work, tctx_task_work);
  87. return 0;
  88. }
  89. int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
  90. {
  91. struct io_uring_task *tctx = current->io_uring;
  92. struct io_tctx_node *node;
  93. int ret;
  94. if (unlikely(!tctx)) {
  95. ret = io_uring_alloc_task_context(current, ctx);
  96. if (unlikely(ret))
  97. return ret;
  98. tctx = current->io_uring;
  99. if (ctx->iowq_limits_set) {
  100. unsigned int limits[2] = { ctx->iowq_limits[0],
  101. ctx->iowq_limits[1], };
  102. ret = io_wq_max_workers(tctx->io_wq, limits);
  103. if (ret)
  104. return ret;
  105. }
  106. }
  107. if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
  108. node = kmalloc(sizeof(*node), GFP_KERNEL);
  109. if (!node)
  110. return -ENOMEM;
  111. node->ctx = ctx;
  112. node->task = current;
  113. ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
  114. node, GFP_KERNEL));
  115. if (ret) {
  116. kfree(node);
  117. return ret;
  118. }
  119. mutex_lock(&ctx->uring_lock);
  120. list_add(&node->ctx_node, &ctx->tctx_list);
  121. mutex_unlock(&ctx->uring_lock);
  122. }
  123. return 0;
  124. }
  125. int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx)
  126. {
  127. int ret;
  128. if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
  129. && ctx->submitter_task != current)
  130. return -EEXIST;
  131. ret = __io_uring_add_tctx_node(ctx);
  132. if (ret)
  133. return ret;
  134. current->io_uring->last = ctx;
  135. return 0;
  136. }
  137. /*
  138. * Remove this io_uring_file -> task mapping.
  139. */
  140. __cold void io_uring_del_tctx_node(unsigned long index)
  141. {
  142. struct io_uring_task *tctx = current->io_uring;
  143. struct io_tctx_node *node;
  144. if (!tctx)
  145. return;
  146. node = xa_erase(&tctx->xa, index);
  147. if (!node)
  148. return;
  149. WARN_ON_ONCE(current != node->task);
  150. WARN_ON_ONCE(list_empty(&node->ctx_node));
  151. mutex_lock(&node->ctx->uring_lock);
  152. list_del(&node->ctx_node);
  153. mutex_unlock(&node->ctx->uring_lock);
  154. if (tctx->last == node->ctx)
  155. tctx->last = NULL;
  156. kfree(node);
  157. }
  158. __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
  159. {
  160. struct io_wq *wq = tctx->io_wq;
  161. struct io_tctx_node *node;
  162. unsigned long index;
  163. xa_for_each(&tctx->xa, index, node) {
  164. io_uring_del_tctx_node(index);
  165. cond_resched();
  166. }
  167. if (wq) {
  168. /*
  169. * Must be after io_uring_del_tctx_node() (removes nodes under
  170. * uring_lock) to avoid race with io_uring_try_cancel_iowq().
  171. */
  172. io_wq_put_and_exit(wq);
  173. tctx->io_wq = NULL;
  174. }
  175. }
  176. void io_uring_unreg_ringfd(void)
  177. {
  178. struct io_uring_task *tctx = current->io_uring;
  179. int i;
  180. for (i = 0; i < IO_RINGFD_REG_MAX; i++) {
  181. if (tctx->registered_rings[i]) {
  182. fput(tctx->registered_rings[i]);
  183. tctx->registered_rings[i] = NULL;
  184. }
  185. }
  186. }
  187. int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
  188. int start, int end)
  189. {
  190. int offset;
  191. for (offset = start; offset < end; offset++) {
  192. offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
  193. if (tctx->registered_rings[offset])
  194. continue;
  195. tctx->registered_rings[offset] = file;
  196. return offset;
  197. }
  198. return -EBUSY;
  199. }
  200. static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
  201. int start, int end)
  202. {
  203. struct file *file;
  204. int offset;
  205. file = fget(fd);
  206. if (!file) {
  207. return -EBADF;
  208. } else if (!io_is_uring_fops(file)) {
  209. fput(file);
  210. return -EOPNOTSUPP;
  211. }
  212. offset = io_ring_add_registered_file(tctx, file, start, end);
  213. if (offset < 0)
  214. fput(file);
  215. return offset;
  216. }
  217. /*
  218. * Register a ring fd to avoid fdget/fdput for each io_uring_enter()
  219. * invocation. User passes in an array of struct io_uring_rsrc_update
  220. * with ->data set to the ring_fd, and ->offset given for the desired
  221. * index. If no index is desired, application may set ->offset == -1U
  222. * and we'll find an available index. Returns number of entries
  223. * successfully processed, or < 0 on error if none were processed.
  224. */
  225. int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
  226. unsigned nr_args)
  227. {
  228. struct io_uring_rsrc_update __user *arg = __arg;
  229. struct io_uring_rsrc_update reg;
  230. struct io_uring_task *tctx;
  231. int ret, i;
  232. if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
  233. return -EINVAL;
  234. mutex_unlock(&ctx->uring_lock);
  235. ret = __io_uring_add_tctx_node(ctx);
  236. mutex_lock(&ctx->uring_lock);
  237. if (ret)
  238. return ret;
  239. tctx = current->io_uring;
  240. for (i = 0; i < nr_args; i++) {
  241. int start, end;
  242. if (copy_from_user(&reg, &arg[i], sizeof(reg))) {
  243. ret = -EFAULT;
  244. break;
  245. }
  246. if (reg.resv) {
  247. ret = -EINVAL;
  248. break;
  249. }
  250. if (reg.offset == -1U) {
  251. start = 0;
  252. end = IO_RINGFD_REG_MAX;
  253. } else {
  254. if (reg.offset >= IO_RINGFD_REG_MAX) {
  255. ret = -EINVAL;
  256. break;
  257. }
  258. start = reg.offset;
  259. end = start + 1;
  260. }
  261. ret = io_ring_add_registered_fd(tctx, reg.data, start, end);
  262. if (ret < 0)
  263. break;
  264. reg.offset = ret;
  265. if (copy_to_user(&arg[i], &reg, sizeof(reg))) {
  266. fput(tctx->registered_rings[reg.offset]);
  267. tctx->registered_rings[reg.offset] = NULL;
  268. ret = -EFAULT;
  269. break;
  270. }
  271. }
  272. return i ? i : ret;
  273. }
  274. int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
  275. unsigned nr_args)
  276. {
  277. struct io_uring_rsrc_update __user *arg = __arg;
  278. struct io_uring_task *tctx = current->io_uring;
  279. struct io_uring_rsrc_update reg;
  280. int ret = 0, i;
  281. if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
  282. return -EINVAL;
  283. if (!tctx)
  284. return 0;
  285. for (i = 0; i < nr_args; i++) {
  286. if (copy_from_user(&reg, &arg[i], sizeof(reg))) {
  287. ret = -EFAULT;
  288. break;
  289. }
  290. if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) {
  291. ret = -EINVAL;
  292. break;
  293. }
  294. reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX);
  295. if (tctx->registered_rings[reg.offset]) {
  296. fput(tctx->registered_rings[reg.offset]);
  297. tctx->registered_rings[reg.offset] = NULL;
  298. }
  299. }
  300. return i ? i : ret;
  301. }