io_uring.c 104 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Shared application/kernel submission and completion ring pairs, for
  4. * supporting fast/efficient IO.
  5. *
  6. * A note on the read/write ordering memory barriers that are matched between
  7. * the application and kernel side.
  8. *
  9. * After the application reads the CQ ring tail, it must use an
  10. * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
  11. * before writing the tail (using smp_load_acquire to read the tail will
  12. * do). It also needs a smp_mb() before updating CQ head (ordering the
  13. * entry load(s) with the head store), pairing with an implicit barrier
  14. * through a control-dependency in io_get_cqe (smp_store_release to
  15. * store head will do). Failure to do so could lead to reading invalid
  16. * CQ entries.
  17. *
  18. * Likewise, the application must use an appropriate smp_wmb() before
  19. * writing the SQ tail (ordering SQ entry stores with the tail store),
  20. * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
  21. * to store the tail will do). And it needs a barrier ordering the SQ
  22. * head load before writing new SQ entries (smp_load_acquire to read
  23. * head will do).
  24. *
  25. * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
  26. * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
  27. * updating the SQ tail; a full memory barrier smp_mb() is needed
  28. * between.
  29. *
  30. * Also see the examples in the liburing library:
  31. *
  32. * git://git.kernel.dk/liburing
  33. *
  34. * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
  35. * from data shared between the kernel and application. This is done both
  36. * for ordering purposes, but also to ensure that once a value is loaded from
  37. * data that the application could potentially modify, it remains stable.
  38. *
  39. * Copyright (C) 2018-2019 Jens Axboe
  40. * Copyright (c) 2018-2019 Christoph Hellwig
  41. */
  42. #include <linux/kernel.h>
  43. #include <linux/init.h>
  44. #include <linux/errno.h>
  45. #include <linux/syscalls.h>
  46. #include <net/compat.h>
  47. #include <linux/refcount.h>
  48. #include <linux/uio.h>
  49. #include <linux/bits.h>
  50. #include <linux/sched/signal.h>
  51. #include <linux/fs.h>
  52. #include <linux/file.h>
  53. #include <linux/fdtable.h>
  54. #include <linux/mm.h>
  55. #include <linux/mman.h>
  56. #include <linux/percpu.h>
  57. #include <linux/slab.h>
  58. #include <linux/bvec.h>
  59. #include <linux/net.h>
  60. #include <net/sock.h>
  61. #include <linux/anon_inodes.h>
  62. #include <linux/sched/mm.h>
  63. #include <linux/uaccess.h>
  64. #include <linux/nospec.h>
  65. #include <linux/fsnotify.h>
  66. #include <linux/fadvise.h>
  67. #include <linux/task_work.h>
  68. #include <linux/io_uring.h>
  69. #include <linux/io_uring/cmd.h>
  70. #include <linux/audit.h>
  71. #include <linux/security.h>
  72. #include <asm/shmparam.h>
  73. #define CREATE_TRACE_POINTS
  74. #include <trace/events/io_uring.h>
  75. #include <uapi/linux/io_uring.h>
  76. #include "io-wq.h"
  77. #include "io_uring.h"
  78. #include "opdef.h"
  79. #include "refs.h"
  80. #include "tctx.h"
  81. #include "register.h"
  82. #include "sqpoll.h"
  83. #include "fdinfo.h"
  84. #include "kbuf.h"
  85. #include "rsrc.h"
  86. #include "cancel.h"
  87. #include "net.h"
  88. #include "notif.h"
  89. #include "waitid.h"
  90. #include "futex.h"
  91. #include "napi.h"
  92. #include "uring_cmd.h"
  93. #include "msg_ring.h"
  94. #include "memmap.h"
  95. #include "timeout.h"
  96. #include "poll.h"
  97. #include "rw.h"
  98. #include "alloc_cache.h"
  99. #include "eventfd.h"
  100. #define IORING_MAX_ENTRIES 32768
  101. #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
  102. #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
  103. IOSQE_IO_HARDLINK | IOSQE_ASYNC)
  104. #define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
  105. IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
  106. #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
  107. REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
  108. REQ_F_ASYNC_DATA)
  109. #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
  110. IO_REQ_CLEAN_FLAGS)
  111. #define IO_TCTX_REFS_CACHE_NR (1U << 10)
  112. #define IO_COMPL_BATCH 32
  113. #define IO_REQ_ALLOC_BATCH 8
  114. struct io_defer_entry {
  115. struct list_head list;
  116. struct io_kiocb *req;
  117. u32 seq;
  118. };
  119. /* requests with any of those set should undergo io_disarm_next() */
  120. #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
  121. #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
  122. /*
  123. * No waiters. It's larger than any valid value of the tw counter
  124. * so that tests against ->cq_wait_nr would fail and skip wake_up().
  125. */
  126. #define IO_CQ_WAKE_INIT (-1U)
  127. /* Forced wake up if there is a waiter regardless of ->cq_wait_nr */
  128. #define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1)
  129. static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
  130. struct task_struct *task,
  131. bool cancel_all);
  132. static void io_queue_sqe(struct io_kiocb *req);
  133. struct kmem_cache *req_cachep;
  134. static struct workqueue_struct *iou_wq __ro_after_init;
  135. static int __read_mostly sysctl_io_uring_disabled;
  136. static int __read_mostly sysctl_io_uring_group = -1;
  137. #ifdef CONFIG_SYSCTL
  138. static struct ctl_table kernel_io_uring_disabled_table[] = {
  139. {
  140. .procname = "io_uring_disabled",
  141. .data = &sysctl_io_uring_disabled,
  142. .maxlen = sizeof(sysctl_io_uring_disabled),
  143. .mode = 0644,
  144. .proc_handler = proc_dointvec_minmax,
  145. .extra1 = SYSCTL_ZERO,
  146. .extra2 = SYSCTL_TWO,
  147. },
  148. {
  149. .procname = "io_uring_group",
  150. .data = &sysctl_io_uring_group,
  151. .maxlen = sizeof(gid_t),
  152. .mode = 0644,
  153. .proc_handler = proc_dointvec,
  154. },
  155. };
  156. #endif
  157. static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
  158. {
  159. return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
  160. }
  161. static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
  162. {
  163. return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
  164. }
  165. static bool io_match_linked(struct io_kiocb *head)
  166. {
  167. struct io_kiocb *req;
  168. io_for_each_link(req, head) {
  169. if (req->flags & REQ_F_INFLIGHT)
  170. return true;
  171. }
  172. return false;
  173. }
  174. /*
  175. * As io_match_task() but protected against racing with linked timeouts.
  176. * User must not hold timeout_lock.
  177. */
  178. bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
  179. bool cancel_all)
  180. {
  181. bool matched;
  182. if (task && head->task != task)
  183. return false;
  184. if (cancel_all)
  185. return true;
  186. if (head->flags & REQ_F_LINK_TIMEOUT) {
  187. struct io_ring_ctx *ctx = head->ctx;
  188. /* protect against races with linked timeouts */
  189. spin_lock_irq(&ctx->timeout_lock);
  190. matched = io_match_linked(head);
  191. spin_unlock_irq(&ctx->timeout_lock);
  192. } else {
  193. matched = io_match_linked(head);
  194. }
  195. return matched;
  196. }
  197. static inline void req_fail_link_node(struct io_kiocb *req, int res)
  198. {
  199. req_set_fail(req);
  200. io_req_set_res(req, res, 0);
  201. }
  202. static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
  203. {
  204. wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
  205. }
  206. static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
  207. {
  208. struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
  209. complete(&ctx->ref_comp);
  210. }
  211. static __cold void io_fallback_req_func(struct work_struct *work)
  212. {
  213. struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
  214. fallback_work.work);
  215. struct llist_node *node = llist_del_all(&ctx->fallback_llist);
  216. struct io_kiocb *req, *tmp;
  217. struct io_tw_state ts = {};
  218. percpu_ref_get(&ctx->refs);
  219. mutex_lock(&ctx->uring_lock);
  220. llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
  221. req->io_task_work.func(req, &ts);
  222. io_submit_flush_completions(ctx);
  223. mutex_unlock(&ctx->uring_lock);
  224. percpu_ref_put(&ctx->refs);
  225. }
  226. static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
  227. {
  228. unsigned hash_buckets = 1U << bits;
  229. size_t hash_size = hash_buckets * sizeof(table->hbs[0]);
  230. table->hbs = kmalloc(hash_size, GFP_KERNEL);
  231. if (!table->hbs)
  232. return -ENOMEM;
  233. table->hash_bits = bits;
  234. init_hash_table(table, hash_buckets);
  235. return 0;
  236. }
  237. static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
  238. {
  239. struct io_ring_ctx *ctx;
  240. int hash_bits;
  241. bool ret;
  242. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  243. if (!ctx)
  244. return NULL;
  245. xa_init(&ctx->io_bl_xa);
  246. /*
  247. * Use 5 bits less than the max cq entries, that should give us around
  248. * 32 entries per hash list if totally full and uniformly spread, but
  249. * don't keep too many buckets to not overconsume memory.
  250. */
  251. hash_bits = ilog2(p->cq_entries) - 5;
  252. hash_bits = clamp(hash_bits, 1, 8);
  253. if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
  254. goto err;
  255. if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
  256. goto err;
  257. if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
  258. 0, GFP_KERNEL))
  259. goto err;
  260. ctx->flags = p->flags;
  261. atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
  262. init_waitqueue_head(&ctx->sqo_sq_wait);
  263. INIT_LIST_HEAD(&ctx->sqd_list);
  264. INIT_LIST_HEAD(&ctx->cq_overflow_list);
  265. INIT_LIST_HEAD(&ctx->io_buffers_cache);
  266. ret = io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
  267. sizeof(struct io_rsrc_node));
  268. ret |= io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX,
  269. sizeof(struct async_poll));
  270. ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
  271. sizeof(struct io_async_msghdr));
  272. ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
  273. sizeof(struct io_async_rw));
  274. ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
  275. sizeof(struct uring_cache));
  276. ret |= io_futex_cache_init(ctx);
  277. if (ret)
  278. goto free_ref;
  279. init_completion(&ctx->ref_comp);
  280. xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
  281. mutex_init(&ctx->uring_lock);
  282. init_waitqueue_head(&ctx->cq_wait);
  283. init_waitqueue_head(&ctx->poll_wq);
  284. init_waitqueue_head(&ctx->rsrc_quiesce_wq);
  285. spin_lock_init(&ctx->completion_lock);
  286. spin_lock_init(&ctx->timeout_lock);
  287. INIT_WQ_LIST(&ctx->iopoll_list);
  288. INIT_LIST_HEAD(&ctx->io_buffers_comp);
  289. INIT_LIST_HEAD(&ctx->defer_list);
  290. INIT_LIST_HEAD(&ctx->timeout_list);
  291. INIT_LIST_HEAD(&ctx->ltimeout_list);
  292. INIT_LIST_HEAD(&ctx->rsrc_ref_list);
  293. init_llist_head(&ctx->work_llist);
  294. INIT_LIST_HEAD(&ctx->tctx_list);
  295. ctx->submit_state.free_list.next = NULL;
  296. INIT_HLIST_HEAD(&ctx->waitid_list);
  297. #ifdef CONFIG_FUTEX
  298. INIT_HLIST_HEAD(&ctx->futex_list);
  299. #endif
  300. INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
  301. INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
  302. INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd);
  303. io_napi_init(ctx);
  304. return ctx;
  305. free_ref:
  306. percpu_ref_exit(&ctx->refs);
  307. err:
  308. io_alloc_cache_free(&ctx->rsrc_node_cache, kfree);
  309. io_alloc_cache_free(&ctx->apoll_cache, kfree);
  310. io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
  311. io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
  312. io_alloc_cache_free(&ctx->uring_cache, kfree);
  313. io_futex_cache_free(ctx);
  314. kfree(ctx->cancel_table.hbs);
  315. kfree(ctx->cancel_table_locked.hbs);
  316. xa_destroy(&ctx->io_bl_xa);
  317. kfree(ctx);
  318. return NULL;
  319. }
  320. static void io_account_cq_overflow(struct io_ring_ctx *ctx)
  321. {
  322. struct io_rings *r = ctx->rings;
  323. WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
  324. ctx->cq_extra--;
  325. }
  326. static bool req_need_defer(struct io_kiocb *req, u32 seq)
  327. {
  328. if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
  329. struct io_ring_ctx *ctx = req->ctx;
  330. return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
  331. }
  332. return false;
  333. }
  334. static void io_clean_op(struct io_kiocb *req)
  335. {
  336. if (req->flags & REQ_F_BUFFER_SELECTED) {
  337. spin_lock(&req->ctx->completion_lock);
  338. io_kbuf_drop(req);
  339. spin_unlock(&req->ctx->completion_lock);
  340. }
  341. if (req->flags & REQ_F_NEED_CLEANUP) {
  342. const struct io_cold_def *def = &io_cold_defs[req->opcode];
  343. if (def->cleanup)
  344. def->cleanup(req);
  345. }
  346. if ((req->flags & REQ_F_POLLED) && req->apoll) {
  347. kfree(req->apoll->double_poll);
  348. kfree(req->apoll);
  349. req->apoll = NULL;
  350. }
  351. if (req->flags & REQ_F_INFLIGHT) {
  352. struct io_uring_task *tctx = req->task->io_uring;
  353. atomic_dec(&tctx->inflight_tracked);
  354. }
  355. if (req->flags & REQ_F_CREDS)
  356. put_cred(req->creds);
  357. if (req->flags & REQ_F_ASYNC_DATA) {
  358. kfree(req->async_data);
  359. req->async_data = NULL;
  360. }
  361. req->flags &= ~IO_REQ_CLEAN_FLAGS;
  362. }
  363. static inline void io_req_track_inflight(struct io_kiocb *req)
  364. {
  365. if (!(req->flags & REQ_F_INFLIGHT)) {
  366. req->flags |= REQ_F_INFLIGHT;
  367. atomic_inc(&req->task->io_uring->inflight_tracked);
  368. }
  369. }
  370. static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
  371. {
  372. if (WARN_ON_ONCE(!req->link))
  373. return NULL;
  374. req->flags &= ~REQ_F_ARM_LTIMEOUT;
  375. req->flags |= REQ_F_LINK_TIMEOUT;
  376. /* linked timeouts should have two refs once prep'ed */
  377. io_req_set_refcount(req);
  378. __io_req_set_refcount(req->link, 2);
  379. return req->link;
  380. }
  381. static void io_prep_async_work(struct io_kiocb *req)
  382. {
  383. const struct io_issue_def *def = &io_issue_defs[req->opcode];
  384. struct io_ring_ctx *ctx = req->ctx;
  385. if (!(req->flags & REQ_F_CREDS)) {
  386. req->flags |= REQ_F_CREDS;
  387. req->creds = get_current_cred();
  388. }
  389. req->work.list.next = NULL;
  390. atomic_set(&req->work.flags, 0);
  391. if (req->flags & REQ_F_FORCE_ASYNC)
  392. atomic_or(IO_WQ_WORK_CONCURRENT, &req->work.flags);
  393. if (req->file && !(req->flags & REQ_F_FIXED_FILE))
  394. req->flags |= io_file_get_flags(req->file);
  395. if (req->file && (req->flags & REQ_F_ISREG)) {
  396. bool should_hash = def->hash_reg_file;
  397. /* don't serialize this request if the fs doesn't need it */
  398. if (should_hash && (req->file->f_flags & O_DIRECT) &&
  399. (req->file->f_op->fop_flags & FOP_DIO_PARALLEL_WRITE))
  400. should_hash = false;
  401. if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL))
  402. io_wq_hash_work(&req->work, file_inode(req->file));
  403. } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
  404. if (def->unbound_nonreg_file)
  405. atomic_or(IO_WQ_WORK_UNBOUND, &req->work.flags);
  406. }
  407. }
  408. static void io_prep_async_link(struct io_kiocb *req)
  409. {
  410. struct io_kiocb *cur;
  411. if (req->flags & REQ_F_LINK_TIMEOUT) {
  412. struct io_ring_ctx *ctx = req->ctx;
  413. spin_lock_irq(&ctx->timeout_lock);
  414. io_for_each_link(cur, req)
  415. io_prep_async_work(cur);
  416. spin_unlock_irq(&ctx->timeout_lock);
  417. } else {
  418. io_for_each_link(cur, req)
  419. io_prep_async_work(cur);
  420. }
  421. }
  422. static void io_queue_iowq(struct io_kiocb *req)
  423. {
  424. struct io_uring_task *tctx = req->task->io_uring;
  425. BUG_ON(!tctx);
  426. if ((current->flags & PF_KTHREAD) || !tctx->io_wq) {
  427. io_req_task_queue_fail(req, -ECANCELED);
  428. return;
  429. }
  430. /* init ->work of the whole link before punting */
  431. io_prep_async_link(req);
  432. /*
  433. * Not expected to happen, but if we do have a bug where this _can_
  434. * happen, catch it here and ensure the request is marked as
  435. * canceled. That will make io-wq go through the usual work cancel
  436. * procedure rather than attempt to run this request (or create a new
  437. * worker for it).
  438. */
  439. if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
  440. atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags);
  441. trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
  442. io_wq_enqueue(tctx->io_wq, &req->work);
  443. }
  444. static void io_req_queue_iowq_tw(struct io_kiocb *req, struct io_tw_state *ts)
  445. {
  446. io_queue_iowq(req);
  447. }
  448. void io_req_queue_iowq(struct io_kiocb *req)
  449. {
  450. req->io_task_work.func = io_req_queue_iowq_tw;
  451. io_req_task_work_add(req);
  452. }
  453. static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
  454. {
  455. while (!list_empty(&ctx->defer_list)) {
  456. struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
  457. struct io_defer_entry, list);
  458. if (req_need_defer(de->req, de->seq))
  459. break;
  460. list_del_init(&de->list);
  461. io_req_task_queue(de->req);
  462. kfree(de);
  463. }
  464. }
  465. void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
  466. {
  467. if (ctx->poll_activated)
  468. io_poll_wq_wake(ctx);
  469. if (ctx->off_timeout_used)
  470. io_flush_timeouts(ctx);
  471. if (ctx->drain_active) {
  472. spin_lock(&ctx->completion_lock);
  473. io_queue_deferred(ctx);
  474. spin_unlock(&ctx->completion_lock);
  475. }
  476. if (ctx->has_evfd)
  477. io_eventfd_flush_signal(ctx);
  478. }
  479. static inline void __io_cq_lock(struct io_ring_ctx *ctx)
  480. {
  481. if (!ctx->lockless_cq)
  482. spin_lock(&ctx->completion_lock);
  483. }
  484. static inline void io_cq_lock(struct io_ring_ctx *ctx)
  485. __acquires(ctx->completion_lock)
  486. {
  487. spin_lock(&ctx->completion_lock);
  488. }
  489. static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
  490. {
  491. io_commit_cqring(ctx);
  492. if (!ctx->task_complete) {
  493. if (!ctx->lockless_cq)
  494. spin_unlock(&ctx->completion_lock);
  495. /* IOPOLL rings only need to wake up if it's also SQPOLL */
  496. if (!ctx->syscall_iopoll)
  497. io_cqring_wake(ctx);
  498. }
  499. io_commit_cqring_flush(ctx);
  500. }
  501. static void io_cq_unlock_post(struct io_ring_ctx *ctx)
  502. __releases(ctx->completion_lock)
  503. {
  504. io_commit_cqring(ctx);
  505. spin_unlock(&ctx->completion_lock);
  506. io_cqring_wake(ctx);
  507. io_commit_cqring_flush(ctx);
  508. }
  509. static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying)
  510. {
  511. size_t cqe_size = sizeof(struct io_uring_cqe);
  512. lockdep_assert_held(&ctx->uring_lock);
  513. /* don't abort if we're dying, entries must get freed */
  514. if (!dying && __io_cqring_events(ctx) == ctx->cq_entries)
  515. return;
  516. if (ctx->flags & IORING_SETUP_CQE32)
  517. cqe_size <<= 1;
  518. io_cq_lock(ctx);
  519. while (!list_empty(&ctx->cq_overflow_list)) {
  520. struct io_uring_cqe *cqe;
  521. struct io_overflow_cqe *ocqe;
  522. ocqe = list_first_entry(&ctx->cq_overflow_list,
  523. struct io_overflow_cqe, list);
  524. if (!dying) {
  525. if (!io_get_cqe_overflow(ctx, &cqe, true))
  526. break;
  527. memcpy(cqe, &ocqe->cqe, cqe_size);
  528. }
  529. list_del(&ocqe->list);
  530. kfree(ocqe);
  531. /*
  532. * For silly syzbot cases that deliberately overflow by huge
  533. * amounts, check if we need to resched and drop and
  534. * reacquire the locks if so. Nothing real would ever hit this.
  535. * Ideally we'd have a non-posting unlock for this, but hard
  536. * to care for a non-real case.
  537. */
  538. if (need_resched()) {
  539. ctx->cqe_sentinel = ctx->cqe_cached;
  540. io_cq_unlock_post(ctx);
  541. mutex_unlock(&ctx->uring_lock);
  542. cond_resched();
  543. mutex_lock(&ctx->uring_lock);
  544. io_cq_lock(ctx);
  545. }
  546. }
  547. if (list_empty(&ctx->cq_overflow_list)) {
  548. clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
  549. atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
  550. }
  551. io_cq_unlock_post(ctx);
  552. }
  553. static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
  554. {
  555. if (ctx->rings)
  556. __io_cqring_overflow_flush(ctx, true);
  557. }
  558. static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
  559. {
  560. mutex_lock(&ctx->uring_lock);
  561. __io_cqring_overflow_flush(ctx, false);
  562. mutex_unlock(&ctx->uring_lock);
  563. }
  564. /* can be called by any task */
  565. static void io_put_task_remote(struct task_struct *task)
  566. {
  567. struct io_uring_task *tctx = task->io_uring;
  568. percpu_counter_sub(&tctx->inflight, 1);
  569. if (unlikely(atomic_read(&tctx->in_cancel)))
  570. wake_up(&tctx->wait);
  571. put_task_struct(task);
  572. }
  573. /* used by a task to put its own references */
  574. static void io_put_task_local(struct task_struct *task)
  575. {
  576. task->io_uring->cached_refs++;
  577. }
  578. /* must to be called somewhat shortly after putting a request */
  579. static inline void io_put_task(struct task_struct *task)
  580. {
  581. if (likely(task == current))
  582. io_put_task_local(task);
  583. else
  584. io_put_task_remote(task);
  585. }
  586. void io_task_refs_refill(struct io_uring_task *tctx)
  587. {
  588. unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
  589. percpu_counter_add(&tctx->inflight, refill);
  590. refcount_add(refill, &current->usage);
  591. tctx->cached_refs += refill;
  592. }
  593. static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
  594. {
  595. struct io_uring_task *tctx = task->io_uring;
  596. unsigned int refs = tctx->cached_refs;
  597. if (refs) {
  598. tctx->cached_refs = 0;
  599. percpu_counter_sub(&tctx->inflight, refs);
  600. put_task_struct_many(task, refs);
  601. }
  602. }
  603. static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
  604. s32 res, u32 cflags, u64 extra1, u64 extra2)
  605. {
  606. struct io_overflow_cqe *ocqe;
  607. size_t ocq_size = sizeof(struct io_overflow_cqe);
  608. bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
  609. lockdep_assert_held(&ctx->completion_lock);
  610. if (is_cqe32)
  611. ocq_size += sizeof(struct io_uring_cqe);
  612. ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
  613. trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
  614. if (!ocqe) {
  615. /*
  616. * If we're in ring overflow flush mode, or in task cancel mode,
  617. * or cannot allocate an overflow entry, then we need to drop it
  618. * on the floor.
  619. */
  620. io_account_cq_overflow(ctx);
  621. set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
  622. return false;
  623. }
  624. if (list_empty(&ctx->cq_overflow_list)) {
  625. set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
  626. atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
  627. }
  628. ocqe->cqe.user_data = user_data;
  629. ocqe->cqe.res = res;
  630. ocqe->cqe.flags = cflags;
  631. if (is_cqe32) {
  632. ocqe->cqe.big_cqe[0] = extra1;
  633. ocqe->cqe.big_cqe[1] = extra2;
  634. }
  635. list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
  636. return true;
  637. }
  638. static void io_req_cqe_overflow(struct io_kiocb *req)
  639. {
  640. io_cqring_event_overflow(req->ctx, req->cqe.user_data,
  641. req->cqe.res, req->cqe.flags,
  642. req->big_cqe.extra1, req->big_cqe.extra2);
  643. memset(&req->big_cqe, 0, sizeof(req->big_cqe));
  644. }
  645. /*
  646. * writes to the cq entry need to come after reading head; the
  647. * control dependency is enough as we're using WRITE_ONCE to
  648. * fill the cq entry
  649. */
  650. bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow)
  651. {
  652. struct io_rings *rings = ctx->rings;
  653. unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
  654. unsigned int free, queued, len;
  655. /*
  656. * Posting into the CQ when there are pending overflowed CQEs may break
  657. * ordering guarantees, which will affect links, F_MORE users and more.
  658. * Force overflow the completion.
  659. */
  660. if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
  661. return false;
  662. /* userspace may cheat modifying the tail, be safe and do min */
  663. queued = min(__io_cqring_events(ctx), ctx->cq_entries);
  664. free = ctx->cq_entries - queued;
  665. /* we need a contiguous range, limit based on the current array offset */
  666. len = min(free, ctx->cq_entries - off);
  667. if (!len)
  668. return false;
  669. if (ctx->flags & IORING_SETUP_CQE32) {
  670. off <<= 1;
  671. len <<= 1;
  672. }
  673. ctx->cqe_cached = &rings->cqes[off];
  674. ctx->cqe_sentinel = ctx->cqe_cached + len;
  675. return true;
  676. }
  677. static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
  678. u32 cflags)
  679. {
  680. struct io_uring_cqe *cqe;
  681. ctx->cq_extra++;
  682. /*
  683. * If we can't get a cq entry, userspace overflowed the
  684. * submission (by quite a lot). Increment the overflow count in
  685. * the ring.
  686. */
  687. if (likely(io_get_cqe(ctx, &cqe))) {
  688. trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
  689. WRITE_ONCE(cqe->user_data, user_data);
  690. WRITE_ONCE(cqe->res, res);
  691. WRITE_ONCE(cqe->flags, cflags);
  692. if (ctx->flags & IORING_SETUP_CQE32) {
  693. WRITE_ONCE(cqe->big_cqe[0], 0);
  694. WRITE_ONCE(cqe->big_cqe[1], 0);
  695. }
  696. return true;
  697. }
  698. return false;
  699. }
  700. static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res,
  701. u32 cflags)
  702. {
  703. bool filled;
  704. filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
  705. if (!filled)
  706. filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
  707. return filled;
  708. }
  709. bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
  710. {
  711. bool filled;
  712. io_cq_lock(ctx);
  713. filled = __io_post_aux_cqe(ctx, user_data, res, cflags);
  714. io_cq_unlock_post(ctx);
  715. return filled;
  716. }
  717. /*
  718. * Must be called from inline task_work so we now a flush will happen later,
  719. * and obviously with ctx->uring_lock held (tw always has that).
  720. */
  721. void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
  722. {
  723. if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) {
  724. spin_lock(&ctx->completion_lock);
  725. io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
  726. spin_unlock(&ctx->completion_lock);
  727. }
  728. ctx->submit_state.cq_flush = true;
  729. }
  730. /*
  731. * A helper for multishot requests posting additional CQEs.
  732. * Should only be used from a task_work including IO_URING_F_MULTISHOT.
  733. */
  734. bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
  735. {
  736. struct io_ring_ctx *ctx = req->ctx;
  737. bool posted;
  738. /*
  739. * If multishot has already posted deferred completions, ensure that
  740. * those are flushed first before posting this one. If not, CQEs
  741. * could get reordered.
  742. */
  743. if (!wq_list_empty(&ctx->submit_state.compl_reqs))
  744. __io_submit_flush_completions(ctx);
  745. lockdep_assert(!io_wq_current_is_worker());
  746. lockdep_assert_held(&ctx->uring_lock);
  747. if (!ctx->lockless_cq) {
  748. spin_lock(&ctx->completion_lock);
  749. posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
  750. spin_unlock(&ctx->completion_lock);
  751. } else {
  752. posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
  753. }
  754. ctx->submit_state.cq_flush = true;
  755. return posted;
  756. }
  757. static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
  758. {
  759. struct io_ring_ctx *ctx = req->ctx;
  760. /*
  761. * All execution paths but io-wq use the deferred completions by
  762. * passing IO_URING_F_COMPLETE_DEFER and thus should not end up here.
  763. */
  764. if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_IOWQ)))
  765. return;
  766. /*
  767. * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
  768. * the submitter task context, IOPOLL protects with uring_lock.
  769. */
  770. if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) {
  771. req->io_task_work.func = io_req_task_complete;
  772. io_req_task_work_add(req);
  773. return;
  774. }
  775. io_cq_lock(ctx);
  776. if (!(req->flags & REQ_F_CQE_SKIP)) {
  777. if (!io_fill_cqe_req(ctx, req))
  778. io_req_cqe_overflow(req);
  779. }
  780. io_cq_unlock_post(ctx);
  781. /*
  782. * We don't free the request here because we know it's called from
  783. * io-wq only, which holds a reference, so it cannot be the last put.
  784. */
  785. req_ref_put(req);
  786. }
  787. void io_req_defer_failed(struct io_kiocb *req, s32 res)
  788. __must_hold(&ctx->uring_lock)
  789. {
  790. const struct io_cold_def *def = &io_cold_defs[req->opcode];
  791. lockdep_assert_held(&req->ctx->uring_lock);
  792. req_set_fail(req);
  793. io_req_set_res(req, res, io_put_kbuf(req, res, IO_URING_F_UNLOCKED));
  794. if (def->fail)
  795. def->fail(req);
  796. io_req_complete_defer(req);
  797. }
  798. /*
  799. * Don't initialise the fields below on every allocation, but do that in
  800. * advance and keep them valid across allocations.
  801. */
  802. static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
  803. {
  804. req->ctx = ctx;
  805. req->link = NULL;
  806. req->async_data = NULL;
  807. /* not necessary, but safer to zero */
  808. memset(&req->cqe, 0, sizeof(req->cqe));
  809. memset(&req->big_cqe, 0, sizeof(req->big_cqe));
  810. }
  811. /*
  812. * A request might get retired back into the request caches even before opcode
  813. * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
  814. * Because of that, io_alloc_req() should be called only under ->uring_lock
  815. * and with extra caution to not get a request that is still worked on.
  816. */
  817. __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
  818. __must_hold(&ctx->uring_lock)
  819. {
  820. gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
  821. void *reqs[IO_REQ_ALLOC_BATCH];
  822. int ret;
  823. ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
  824. /*
  825. * Bulk alloc is all-or-nothing. If we fail to get a batch,
  826. * retry single alloc to be on the safe side.
  827. */
  828. if (unlikely(ret <= 0)) {
  829. reqs[0] = kmem_cache_alloc(req_cachep, gfp);
  830. if (!reqs[0])
  831. return false;
  832. ret = 1;
  833. }
  834. percpu_ref_get_many(&ctx->refs, ret);
  835. while (ret--) {
  836. struct io_kiocb *req = reqs[ret];
  837. io_preinit_req(req, ctx);
  838. io_req_add_to_cache(req, ctx);
  839. }
  840. return true;
  841. }
  842. __cold void io_free_req(struct io_kiocb *req)
  843. {
  844. /* refs were already put, restore them for io_req_task_complete() */
  845. req->flags &= ~REQ_F_REFCOUNT;
  846. /* we only want to free it, don't post CQEs */
  847. req->flags |= REQ_F_CQE_SKIP;
  848. req->io_task_work.func = io_req_task_complete;
  849. io_req_task_work_add(req);
  850. }
  851. static void __io_req_find_next_prep(struct io_kiocb *req)
  852. {
  853. struct io_ring_ctx *ctx = req->ctx;
  854. spin_lock(&ctx->completion_lock);
  855. io_disarm_next(req);
  856. spin_unlock(&ctx->completion_lock);
  857. }
  858. static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
  859. {
  860. struct io_kiocb *nxt;
  861. /*
  862. * If LINK is set, we have dependent requests in this chain. If we
  863. * didn't fail this request, queue the first one up, moving any other
  864. * dependencies to the next request. In case of failure, fail the rest
  865. * of the chain.
  866. */
  867. if (unlikely(req->flags & IO_DISARM_MASK))
  868. __io_req_find_next_prep(req);
  869. nxt = req->link;
  870. req->link = NULL;
  871. return nxt;
  872. }
  873. static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
  874. {
  875. if (!ctx)
  876. return;
  877. if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
  878. atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
  879. io_submit_flush_completions(ctx);
  880. mutex_unlock(&ctx->uring_lock);
  881. percpu_ref_put(&ctx->refs);
  882. }
  883. /*
  884. * Run queued task_work, returning the number of entries processed in *count.
  885. * If more entries than max_entries are available, stop processing once this
  886. * is reached and return the rest of the list.
  887. */
  888. struct llist_node *io_handle_tw_list(struct llist_node *node,
  889. unsigned int *count,
  890. unsigned int max_entries)
  891. {
  892. struct io_ring_ctx *ctx = NULL;
  893. struct io_tw_state ts = { };
  894. do {
  895. struct llist_node *next = node->next;
  896. struct io_kiocb *req = container_of(node, struct io_kiocb,
  897. io_task_work.node);
  898. if (req->ctx != ctx) {
  899. ctx_flush_and_put(ctx, &ts);
  900. ctx = req->ctx;
  901. mutex_lock(&ctx->uring_lock);
  902. percpu_ref_get(&ctx->refs);
  903. }
  904. INDIRECT_CALL_2(req->io_task_work.func,
  905. io_poll_task_func, io_req_rw_complete,
  906. req, &ts);
  907. node = next;
  908. (*count)++;
  909. if (unlikely(need_resched())) {
  910. ctx_flush_and_put(ctx, &ts);
  911. ctx = NULL;
  912. cond_resched();
  913. }
  914. } while (node && *count < max_entries);
  915. ctx_flush_and_put(ctx, &ts);
  916. return node;
  917. }
  918. /**
  919. * io_llist_xchg - swap all entries in a lock-less list
  920. * @head: the head of lock-less list to delete all entries
  921. * @new: new entry as the head of the list
  922. *
  923. * If list is empty, return NULL, otherwise, return the pointer to the first entry.
  924. * The order of entries returned is from the newest to the oldest added one.
  925. */
  926. static inline struct llist_node *io_llist_xchg(struct llist_head *head,
  927. struct llist_node *new)
  928. {
  929. return xchg(&head->first, new);
  930. }
  931. static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
  932. {
  933. struct llist_node *node = llist_del_all(&tctx->task_list);
  934. struct io_ring_ctx *last_ctx = NULL;
  935. struct io_kiocb *req;
  936. while (node) {
  937. req = container_of(node, struct io_kiocb, io_task_work.node);
  938. node = node->next;
  939. if (last_ctx != req->ctx) {
  940. if (last_ctx) {
  941. if (sync)
  942. flush_delayed_work(&last_ctx->fallback_work);
  943. percpu_ref_put(&last_ctx->refs);
  944. }
  945. last_ctx = req->ctx;
  946. percpu_ref_get(&last_ctx->refs);
  947. }
  948. if (llist_add(&req->io_task_work.node, &last_ctx->fallback_llist))
  949. schedule_delayed_work(&last_ctx->fallback_work, 1);
  950. }
  951. if (last_ctx) {
  952. if (sync)
  953. flush_delayed_work(&last_ctx->fallback_work);
  954. percpu_ref_put(&last_ctx->refs);
  955. }
  956. }
  957. struct llist_node *tctx_task_work_run(struct io_uring_task *tctx,
  958. unsigned int max_entries,
  959. unsigned int *count)
  960. {
  961. struct llist_node *node;
  962. if (unlikely(current->flags & PF_EXITING)) {
  963. io_fallback_tw(tctx, true);
  964. return NULL;
  965. }
  966. node = llist_del_all(&tctx->task_list);
  967. if (node) {
  968. node = llist_reverse_order(node);
  969. node = io_handle_tw_list(node, count, max_entries);
  970. }
  971. /* relaxed read is enough as only the task itself sets ->in_cancel */
  972. if (unlikely(atomic_read(&tctx->in_cancel)))
  973. io_uring_drop_tctx_refs(current);
  974. trace_io_uring_task_work_run(tctx, *count);
  975. return node;
  976. }
  977. void tctx_task_work(struct callback_head *cb)
  978. {
  979. struct io_uring_task *tctx;
  980. struct llist_node *ret;
  981. unsigned int count = 0;
  982. tctx = container_of(cb, struct io_uring_task, task_work);
  983. ret = tctx_task_work_run(tctx, UINT_MAX, &count);
  984. /* can't happen */
  985. WARN_ON_ONCE(ret);
  986. }
  987. static inline void io_req_local_work_add(struct io_kiocb *req,
  988. struct io_ring_ctx *ctx,
  989. unsigned flags)
  990. {
  991. unsigned nr_wait, nr_tw, nr_tw_prev;
  992. struct llist_node *head;
  993. /* See comment above IO_CQ_WAKE_INIT */
  994. BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES);
  995. /*
  996. * We don't know how many reuqests is there in the link and whether
  997. * they can even be queued lazily, fall back to non-lazy.
  998. */
  999. if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
  1000. flags &= ~IOU_F_TWQ_LAZY_WAKE;
  1001. guard(rcu)();
  1002. head = READ_ONCE(ctx->work_llist.first);
  1003. do {
  1004. nr_tw_prev = 0;
  1005. if (head) {
  1006. struct io_kiocb *first_req = container_of(head,
  1007. struct io_kiocb,
  1008. io_task_work.node);
  1009. /*
  1010. * Might be executed at any moment, rely on
  1011. * SLAB_TYPESAFE_BY_RCU to keep it alive.
  1012. */
  1013. nr_tw_prev = READ_ONCE(first_req->nr_tw);
  1014. }
  1015. /*
  1016. * Theoretically, it can overflow, but that's fine as one of
  1017. * previous adds should've tried to wake the task.
  1018. */
  1019. nr_tw = nr_tw_prev + 1;
  1020. if (!(flags & IOU_F_TWQ_LAZY_WAKE))
  1021. nr_tw = IO_CQ_WAKE_FORCE;
  1022. req->nr_tw = nr_tw;
  1023. req->io_task_work.node.next = head;
  1024. } while (!try_cmpxchg(&ctx->work_llist.first, &head,
  1025. &req->io_task_work.node));
  1026. /*
  1027. * cmpxchg implies a full barrier, which pairs with the barrier
  1028. * in set_current_state() on the io_cqring_wait() side. It's used
  1029. * to ensure that either we see updated ->cq_wait_nr, or waiters
  1030. * going to sleep will observe the work added to the list, which
  1031. * is similar to the wait/wawke task state sync.
  1032. */
  1033. if (!head) {
  1034. if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
  1035. atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
  1036. if (ctx->has_evfd)
  1037. io_eventfd_signal(ctx);
  1038. }
  1039. nr_wait = atomic_read(&ctx->cq_wait_nr);
  1040. /* not enough or no one is waiting */
  1041. if (nr_tw < nr_wait)
  1042. return;
  1043. /* the previous add has already woken it up */
  1044. if (nr_tw_prev >= nr_wait)
  1045. return;
  1046. wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
  1047. }
  1048. static void io_req_normal_work_add(struct io_kiocb *req)
  1049. {
  1050. struct io_uring_task *tctx = req->task->io_uring;
  1051. struct io_ring_ctx *ctx = req->ctx;
  1052. /* task_work already pending, we're done */
  1053. if (!llist_add(&req->io_task_work.node, &tctx->task_list))
  1054. return;
  1055. if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
  1056. atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
  1057. /* SQPOLL doesn't need the task_work added, it'll run it itself */
  1058. if (ctx->flags & IORING_SETUP_SQPOLL) {
  1059. __set_notify_signal(req->task);
  1060. return;
  1061. }
  1062. if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
  1063. return;
  1064. io_fallback_tw(tctx, false);
  1065. }
  1066. void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
  1067. {
  1068. if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)
  1069. io_req_local_work_add(req, req->ctx, flags);
  1070. else
  1071. io_req_normal_work_add(req);
  1072. }
  1073. void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
  1074. unsigned flags)
  1075. {
  1076. if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)))
  1077. return;
  1078. io_req_local_work_add(req, ctx, flags);
  1079. }
  1080. static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
  1081. {
  1082. struct llist_node *node;
  1083. node = llist_del_all(&ctx->work_llist);
  1084. while (node) {
  1085. struct io_kiocb *req = container_of(node, struct io_kiocb,
  1086. io_task_work.node);
  1087. node = node->next;
  1088. io_req_normal_work_add(req);
  1089. }
  1090. }
  1091. static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
  1092. int min_events)
  1093. {
  1094. if (llist_empty(&ctx->work_llist))
  1095. return false;
  1096. if (events < min_events)
  1097. return true;
  1098. if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
  1099. atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
  1100. return false;
  1101. }
  1102. static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
  1103. int min_events)
  1104. {
  1105. struct llist_node *node;
  1106. unsigned int loops = 0;
  1107. int ret = 0;
  1108. if (WARN_ON_ONCE(ctx->submitter_task != current))
  1109. return -EEXIST;
  1110. if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
  1111. atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
  1112. again:
  1113. /*
  1114. * llists are in reverse order, flip it back the right way before
  1115. * running the pending items.
  1116. */
  1117. node = llist_reverse_order(io_llist_xchg(&ctx->work_llist, NULL));
  1118. while (node) {
  1119. struct llist_node *next = node->next;
  1120. struct io_kiocb *req = container_of(node, struct io_kiocb,
  1121. io_task_work.node);
  1122. INDIRECT_CALL_2(req->io_task_work.func,
  1123. io_poll_task_func, io_req_rw_complete,
  1124. req, ts);
  1125. ret++;
  1126. node = next;
  1127. }
  1128. loops++;
  1129. if (io_run_local_work_continue(ctx, ret, min_events))
  1130. goto again;
  1131. io_submit_flush_completions(ctx);
  1132. if (io_run_local_work_continue(ctx, ret, min_events))
  1133. goto again;
  1134. trace_io_uring_local_work_run(ctx, ret, loops);
  1135. return ret;
  1136. }
  1137. static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
  1138. int min_events)
  1139. {
  1140. struct io_tw_state ts = {};
  1141. if (llist_empty(&ctx->work_llist))
  1142. return 0;
  1143. return __io_run_local_work(ctx, &ts, min_events);
  1144. }
  1145. static int io_run_local_work(struct io_ring_ctx *ctx, int min_events)
  1146. {
  1147. struct io_tw_state ts = {};
  1148. int ret;
  1149. mutex_lock(&ctx->uring_lock);
  1150. ret = __io_run_local_work(ctx, &ts, min_events);
  1151. mutex_unlock(&ctx->uring_lock);
  1152. return ret;
  1153. }
  1154. static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
  1155. {
  1156. io_tw_lock(req->ctx, ts);
  1157. io_req_defer_failed(req, req->cqe.res);
  1158. }
  1159. void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
  1160. {
  1161. struct io_ring_ctx *ctx = req->ctx;
  1162. io_tw_lock(ctx, ts);
  1163. if (unlikely(io_should_terminate_tw(ctx)))
  1164. io_req_defer_failed(req, -EFAULT);
  1165. else if (req->flags & REQ_F_FORCE_ASYNC)
  1166. io_queue_iowq(req);
  1167. else
  1168. io_queue_sqe(req);
  1169. }
  1170. void io_req_task_queue_fail(struct io_kiocb *req, int ret)
  1171. {
  1172. io_req_set_res(req, ret, 0);
  1173. req->io_task_work.func = io_req_task_cancel;
  1174. io_req_task_work_add(req);
  1175. }
  1176. void io_req_task_queue(struct io_kiocb *req)
  1177. {
  1178. req->io_task_work.func = io_req_task_submit;
  1179. io_req_task_work_add(req);
  1180. }
  1181. void io_queue_next(struct io_kiocb *req)
  1182. {
  1183. struct io_kiocb *nxt = io_req_find_next(req);
  1184. if (nxt)
  1185. io_req_task_queue(nxt);
  1186. }
  1187. static void io_free_batch_list(struct io_ring_ctx *ctx,
  1188. struct io_wq_work_node *node)
  1189. __must_hold(&ctx->uring_lock)
  1190. {
  1191. do {
  1192. struct io_kiocb *req = container_of(node, struct io_kiocb,
  1193. comp_list);
  1194. if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
  1195. if (req->flags & REQ_F_REFCOUNT) {
  1196. node = req->comp_list.next;
  1197. if (!req_ref_put_and_test(req))
  1198. continue;
  1199. }
  1200. if ((req->flags & REQ_F_POLLED) && req->apoll) {
  1201. struct async_poll *apoll = req->apoll;
  1202. if (apoll->double_poll)
  1203. kfree(apoll->double_poll);
  1204. if (!io_alloc_cache_put(&ctx->apoll_cache, apoll))
  1205. kfree(apoll);
  1206. req->flags &= ~REQ_F_POLLED;
  1207. }
  1208. if (req->flags & IO_REQ_LINK_FLAGS)
  1209. io_queue_next(req);
  1210. if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
  1211. io_clean_op(req);
  1212. }
  1213. io_put_file(req);
  1214. io_put_rsrc_node(ctx, req->rsrc_node);
  1215. io_put_task(req->task);
  1216. node = req->comp_list.next;
  1217. io_req_add_to_cache(req, ctx);
  1218. } while (node);
  1219. }
  1220. void __io_submit_flush_completions(struct io_ring_ctx *ctx)
  1221. __must_hold(&ctx->uring_lock)
  1222. {
  1223. struct io_submit_state *state = &ctx->submit_state;
  1224. struct io_wq_work_node *node;
  1225. __io_cq_lock(ctx);
  1226. __wq_list_for_each(node, &state->compl_reqs) {
  1227. struct io_kiocb *req = container_of(node, struct io_kiocb,
  1228. comp_list);
  1229. if (!(req->flags & REQ_F_CQE_SKIP) &&
  1230. unlikely(!io_fill_cqe_req(ctx, req))) {
  1231. if (ctx->lockless_cq) {
  1232. spin_lock(&ctx->completion_lock);
  1233. io_req_cqe_overflow(req);
  1234. spin_unlock(&ctx->completion_lock);
  1235. } else {
  1236. io_req_cqe_overflow(req);
  1237. }
  1238. }
  1239. }
  1240. __io_cq_unlock_post(ctx);
  1241. if (!wq_list_empty(&state->compl_reqs)) {
  1242. io_free_batch_list(ctx, state->compl_reqs.first);
  1243. INIT_WQ_LIST(&state->compl_reqs);
  1244. }
  1245. ctx->submit_state.cq_flush = false;
  1246. }
  1247. static unsigned io_cqring_events(struct io_ring_ctx *ctx)
  1248. {
  1249. /* See comment at the top of this file */
  1250. smp_rmb();
  1251. return __io_cqring_events(ctx);
  1252. }
  1253. /*
  1254. * We can't just wait for polled events to come to us, we have to actively
  1255. * find and complete them.
  1256. */
  1257. static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
  1258. {
  1259. if (!(ctx->flags & IORING_SETUP_IOPOLL))
  1260. return;
  1261. mutex_lock(&ctx->uring_lock);
  1262. while (!wq_list_empty(&ctx->iopoll_list)) {
  1263. /* let it sleep and repeat later if can't complete a request */
  1264. if (io_do_iopoll(ctx, true) == 0)
  1265. break;
  1266. /*
  1267. * Ensure we allow local-to-the-cpu processing to take place,
  1268. * in this case we need to ensure that we reap all events.
  1269. * Also let task_work, etc. to progress by releasing the mutex
  1270. */
  1271. if (need_resched()) {
  1272. mutex_unlock(&ctx->uring_lock);
  1273. cond_resched();
  1274. mutex_lock(&ctx->uring_lock);
  1275. }
  1276. }
  1277. mutex_unlock(&ctx->uring_lock);
  1278. }
  1279. static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
  1280. {
  1281. unsigned int nr_events = 0;
  1282. unsigned long check_cq;
  1283. lockdep_assert_held(&ctx->uring_lock);
  1284. if (!io_allowed_run_tw(ctx))
  1285. return -EEXIST;
  1286. check_cq = READ_ONCE(ctx->check_cq);
  1287. if (unlikely(check_cq)) {
  1288. if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
  1289. __io_cqring_overflow_flush(ctx, false);
  1290. /*
  1291. * Similarly do not spin if we have not informed the user of any
  1292. * dropped CQE.
  1293. */
  1294. if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
  1295. return -EBADR;
  1296. }
  1297. /*
  1298. * Don't enter poll loop if we already have events pending.
  1299. * If we do, we can potentially be spinning for commands that
  1300. * already triggered a CQE (eg in error).
  1301. */
  1302. if (io_cqring_events(ctx))
  1303. return 0;
  1304. do {
  1305. int ret = 0;
  1306. /*
  1307. * If a submit got punted to a workqueue, we can have the
  1308. * application entering polling for a command before it gets
  1309. * issued. That app will hold the uring_lock for the duration
  1310. * of the poll right here, so we need to take a breather every
  1311. * now and then to ensure that the issue has a chance to add
  1312. * the poll to the issued list. Otherwise we can spin here
  1313. * forever, while the workqueue is stuck trying to acquire the
  1314. * very same mutex.
  1315. */
  1316. if (wq_list_empty(&ctx->iopoll_list) ||
  1317. io_task_work_pending(ctx)) {
  1318. u32 tail = ctx->cached_cq_tail;
  1319. (void) io_run_local_work_locked(ctx, min);
  1320. if (task_work_pending(current) ||
  1321. wq_list_empty(&ctx->iopoll_list)) {
  1322. mutex_unlock(&ctx->uring_lock);
  1323. io_run_task_work();
  1324. mutex_lock(&ctx->uring_lock);
  1325. }
  1326. /* some requests don't go through iopoll_list */
  1327. if (tail != ctx->cached_cq_tail ||
  1328. wq_list_empty(&ctx->iopoll_list))
  1329. break;
  1330. }
  1331. ret = io_do_iopoll(ctx, !min);
  1332. if (unlikely(ret < 0))
  1333. return ret;
  1334. if (task_sigpending(current))
  1335. return -EINTR;
  1336. if (need_resched())
  1337. break;
  1338. nr_events += ret;
  1339. } while (nr_events < min);
  1340. return 0;
  1341. }
  1342. void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts)
  1343. {
  1344. io_req_complete_defer(req);
  1345. }
  1346. /*
  1347. * After the iocb has been issued, it's safe to be found on the poll list.
  1348. * Adding the kiocb to the list AFTER submission ensures that we don't
  1349. * find it from a io_do_iopoll() thread before the issuer is done
  1350. * accessing the kiocb cookie.
  1351. */
  1352. static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
  1353. {
  1354. struct io_ring_ctx *ctx = req->ctx;
  1355. const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
  1356. /* workqueue context doesn't hold uring_lock, grab it now */
  1357. if (unlikely(needs_lock))
  1358. mutex_lock(&ctx->uring_lock);
  1359. /*
  1360. * Track whether we have multiple files in our lists. This will impact
  1361. * how we do polling eventually, not spinning if we're on potentially
  1362. * different devices.
  1363. */
  1364. if (wq_list_empty(&ctx->iopoll_list)) {
  1365. ctx->poll_multi_queue = false;
  1366. } else if (!ctx->poll_multi_queue) {
  1367. struct io_kiocb *list_req;
  1368. list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
  1369. comp_list);
  1370. if (list_req->file != req->file)
  1371. ctx->poll_multi_queue = true;
  1372. }
  1373. /*
  1374. * For fast devices, IO may have already completed. If it has, add
  1375. * it to the front so we find it first.
  1376. */
  1377. if (READ_ONCE(req->iopoll_completed))
  1378. wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
  1379. else
  1380. wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
  1381. if (unlikely(needs_lock)) {
  1382. /*
  1383. * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
  1384. * in sq thread task context or in io worker task context. If
  1385. * current task context is sq thread, we don't need to check
  1386. * whether should wake up sq thread.
  1387. */
  1388. if ((ctx->flags & IORING_SETUP_SQPOLL) &&
  1389. wq_has_sleeper(&ctx->sq_data->wait))
  1390. wake_up(&ctx->sq_data->wait);
  1391. mutex_unlock(&ctx->uring_lock);
  1392. }
  1393. }
  1394. io_req_flags_t io_file_get_flags(struct file *file)
  1395. {
  1396. io_req_flags_t res = 0;
  1397. if (S_ISREG(file_inode(file)->i_mode))
  1398. res |= REQ_F_ISREG;
  1399. if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT))
  1400. res |= REQ_F_SUPPORT_NOWAIT;
  1401. return res;
  1402. }
  1403. bool io_alloc_async_data(struct io_kiocb *req)
  1404. {
  1405. const struct io_issue_def *def = &io_issue_defs[req->opcode];
  1406. WARN_ON_ONCE(!def->async_size);
  1407. req->async_data = kmalloc(def->async_size, GFP_KERNEL);
  1408. if (req->async_data) {
  1409. req->flags |= REQ_F_ASYNC_DATA;
  1410. return false;
  1411. }
  1412. return true;
  1413. }
  1414. static u32 io_get_sequence(struct io_kiocb *req)
  1415. {
  1416. u32 seq = req->ctx->cached_sq_head;
  1417. struct io_kiocb *cur;
  1418. /* need original cached_sq_head, but it was increased for each req */
  1419. io_for_each_link(cur, req)
  1420. seq--;
  1421. return seq;
  1422. }
  1423. static __cold void io_drain_req(struct io_kiocb *req)
  1424. __must_hold(&ctx->uring_lock)
  1425. {
  1426. struct io_ring_ctx *ctx = req->ctx;
  1427. struct io_defer_entry *de;
  1428. int ret;
  1429. u32 seq = io_get_sequence(req);
  1430. /* Still need defer if there is pending req in defer list. */
  1431. spin_lock(&ctx->completion_lock);
  1432. if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
  1433. spin_unlock(&ctx->completion_lock);
  1434. queue:
  1435. ctx->drain_active = false;
  1436. io_req_task_queue(req);
  1437. return;
  1438. }
  1439. spin_unlock(&ctx->completion_lock);
  1440. io_prep_async_link(req);
  1441. de = kmalloc(sizeof(*de), GFP_KERNEL_ACCOUNT);
  1442. if (!de) {
  1443. ret = -ENOMEM;
  1444. io_req_defer_failed(req, ret);
  1445. return;
  1446. }
  1447. spin_lock(&ctx->completion_lock);
  1448. if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
  1449. spin_unlock(&ctx->completion_lock);
  1450. kfree(de);
  1451. goto queue;
  1452. }
  1453. trace_io_uring_defer(req);
  1454. de->req = req;
  1455. de->seq = seq;
  1456. list_add_tail(&de->list, &ctx->defer_list);
  1457. spin_unlock(&ctx->completion_lock);
  1458. }
  1459. static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
  1460. unsigned int issue_flags)
  1461. {
  1462. if (req->file || !def->needs_file)
  1463. return true;
  1464. if (req->flags & REQ_F_FIXED_FILE)
  1465. req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
  1466. else
  1467. req->file = io_file_get_normal(req, req->cqe.fd);
  1468. return !!req->file;
  1469. }
  1470. #define REQ_ISSUE_SLOW_FLAGS (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
  1471. static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
  1472. {
  1473. const struct io_issue_def *def = &io_issue_defs[req->opcode];
  1474. const struct cred *creds = NULL;
  1475. struct io_kiocb *link = NULL;
  1476. int ret;
  1477. if (unlikely(!io_assign_file(req, def, issue_flags)))
  1478. return -EBADF;
  1479. if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) {
  1480. if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
  1481. creds = override_creds(req->creds);
  1482. if (req->flags & REQ_F_ARM_LTIMEOUT)
  1483. link = __io_prep_linked_timeout(req);
  1484. }
  1485. if (!def->audit_skip)
  1486. audit_uring_entry(req->opcode);
  1487. ret = def->issue(req, issue_flags);
  1488. if (!def->audit_skip)
  1489. audit_uring_exit(!ret, ret);
  1490. if (unlikely(creds || link)) {
  1491. if (creds)
  1492. revert_creds(creds);
  1493. if (link)
  1494. io_queue_linked_timeout(link);
  1495. }
  1496. if (ret == IOU_OK) {
  1497. if (issue_flags & IO_URING_F_COMPLETE_DEFER)
  1498. io_req_complete_defer(req);
  1499. else
  1500. io_req_complete_post(req, issue_flags);
  1501. return 0;
  1502. }
  1503. if (ret == IOU_ISSUE_SKIP_COMPLETE) {
  1504. ret = 0;
  1505. /* If the op doesn't have a file, we're not polling for it */
  1506. if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
  1507. io_iopoll_req_issued(req, issue_flags);
  1508. }
  1509. return ret;
  1510. }
  1511. int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts)
  1512. {
  1513. io_tw_lock(req->ctx, ts);
  1514. return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT|
  1515. IO_URING_F_COMPLETE_DEFER);
  1516. }
  1517. struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
  1518. {
  1519. struct io_kiocb *req = container_of(work, struct io_kiocb, work);
  1520. struct io_kiocb *nxt = NULL;
  1521. if (req_ref_put_and_test_atomic(req)) {
  1522. if (req->flags & IO_REQ_LINK_FLAGS)
  1523. nxt = io_req_find_next(req);
  1524. io_free_req(req);
  1525. }
  1526. return nxt ? &nxt->work : NULL;
  1527. }
  1528. void io_wq_submit_work(struct io_wq_work *work)
  1529. {
  1530. struct io_kiocb *req = container_of(work, struct io_kiocb, work);
  1531. const struct io_issue_def *def = &io_issue_defs[req->opcode];
  1532. unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ;
  1533. bool needs_poll = false;
  1534. int ret = 0, err = -ECANCELED;
  1535. /* one will be dropped by ->io_wq_free_work() after returning to io-wq */
  1536. if (!(req->flags & REQ_F_REFCOUNT))
  1537. __io_req_set_refcount(req, 2);
  1538. else
  1539. req_ref_get(req);
  1540. /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
  1541. if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) {
  1542. fail:
  1543. io_req_task_queue_fail(req, err);
  1544. return;
  1545. }
  1546. if (!io_assign_file(req, def, issue_flags)) {
  1547. err = -EBADF;
  1548. atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
  1549. goto fail;
  1550. }
  1551. /*
  1552. * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the
  1553. * submitter task context. Final request completions are handed to the
  1554. * right context, however this is not the case of auxiliary CQEs,
  1555. * which is the main mean of operation for multishot requests.
  1556. * Don't allow any multishot execution from io-wq. It's more restrictive
  1557. * than necessary and also cleaner.
  1558. */
  1559. if (req->flags & (REQ_F_MULTISHOT|REQ_F_APOLL_MULTISHOT)) {
  1560. err = -EBADFD;
  1561. if (!io_file_can_poll(req))
  1562. goto fail;
  1563. if (req->file->f_flags & O_NONBLOCK ||
  1564. req->file->f_mode & FMODE_NOWAIT) {
  1565. err = -ECANCELED;
  1566. if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK)
  1567. goto fail;
  1568. return;
  1569. } else {
  1570. req->flags &= ~(REQ_F_APOLL_MULTISHOT|REQ_F_MULTISHOT);
  1571. }
  1572. }
  1573. if (req->flags & REQ_F_FORCE_ASYNC) {
  1574. bool opcode_poll = def->pollin || def->pollout;
  1575. if (opcode_poll && io_file_can_poll(req)) {
  1576. needs_poll = true;
  1577. issue_flags |= IO_URING_F_NONBLOCK;
  1578. }
  1579. }
  1580. do {
  1581. ret = io_issue_sqe(req, issue_flags);
  1582. if (ret != -EAGAIN)
  1583. break;
  1584. /*
  1585. * If REQ_F_NOWAIT is set, then don't wait or retry with
  1586. * poll. -EAGAIN is final for that case.
  1587. */
  1588. if (req->flags & REQ_F_NOWAIT)
  1589. break;
  1590. /*
  1591. * We can get EAGAIN for iopolled IO even though we're
  1592. * forcing a sync submission from here, since we can't
  1593. * wait for request slots on the block side.
  1594. */
  1595. if (!needs_poll) {
  1596. if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
  1597. break;
  1598. if (io_wq_worker_stopped())
  1599. break;
  1600. cond_resched();
  1601. continue;
  1602. }
  1603. if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
  1604. return;
  1605. /* aborted or ready, in either case retry blocking */
  1606. needs_poll = false;
  1607. issue_flags &= ~IO_URING_F_NONBLOCK;
  1608. } while (1);
  1609. /* avoid locking problems by failing it from a clean context */
  1610. if (ret)
  1611. io_req_task_queue_fail(req, ret);
  1612. }
  1613. inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
  1614. unsigned int issue_flags)
  1615. {
  1616. struct io_ring_ctx *ctx = req->ctx;
  1617. struct io_fixed_file *slot;
  1618. struct file *file = NULL;
  1619. io_ring_submit_lock(ctx, issue_flags);
  1620. if (unlikely((unsigned int)fd >= ctx->nr_user_files))
  1621. goto out;
  1622. fd = array_index_nospec(fd, ctx->nr_user_files);
  1623. slot = io_fixed_file_slot(&ctx->file_table, fd);
  1624. if (!req->rsrc_node)
  1625. __io_req_set_rsrc_node(req, ctx);
  1626. req->flags |= io_slot_flags(slot);
  1627. file = io_slot_file(slot);
  1628. out:
  1629. io_ring_submit_unlock(ctx, issue_flags);
  1630. return file;
  1631. }
  1632. struct file *io_file_get_normal(struct io_kiocb *req, int fd)
  1633. {
  1634. struct file *file = fget(fd);
  1635. trace_io_uring_file_get(req, fd);
  1636. /* we don't allow fixed io_uring files */
  1637. if (file && io_is_uring_fops(file))
  1638. io_req_track_inflight(req);
  1639. return file;
  1640. }
  1641. static void io_queue_async(struct io_kiocb *req, int ret)
  1642. __must_hold(&req->ctx->uring_lock)
  1643. {
  1644. if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
  1645. io_req_defer_failed(req, ret);
  1646. return;
  1647. }
  1648. switch (io_arm_poll_handler(req, 0)) {
  1649. case IO_APOLL_READY:
  1650. io_kbuf_recycle(req, 0);
  1651. io_req_task_queue(req);
  1652. break;
  1653. case IO_APOLL_ABORTED:
  1654. io_kbuf_recycle(req, 0);
  1655. io_queue_iowq(req);
  1656. break;
  1657. case IO_APOLL_OK:
  1658. break;
  1659. }
  1660. }
  1661. static inline void io_queue_sqe(struct io_kiocb *req)
  1662. __must_hold(&req->ctx->uring_lock)
  1663. {
  1664. int ret;
  1665. ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
  1666. /*
  1667. * We async punt it if the file wasn't marked NOWAIT, or if the file
  1668. * doesn't support non-blocking read/write attempts
  1669. */
  1670. if (unlikely(ret))
  1671. io_queue_async(req, ret);
  1672. }
  1673. static void io_queue_sqe_fallback(struct io_kiocb *req)
  1674. __must_hold(&req->ctx->uring_lock)
  1675. {
  1676. if (unlikely(req->flags & REQ_F_FAIL)) {
  1677. /*
  1678. * We don't submit, fail them all, for that replace hardlinks
  1679. * with normal links. Extra REQ_F_LINK is tolerated.
  1680. */
  1681. req->flags &= ~REQ_F_HARDLINK;
  1682. req->flags |= REQ_F_LINK;
  1683. io_req_defer_failed(req, req->cqe.res);
  1684. } else {
  1685. if (unlikely(req->ctx->drain_active))
  1686. io_drain_req(req);
  1687. else
  1688. io_queue_iowq(req);
  1689. }
  1690. }
  1691. /*
  1692. * Check SQE restrictions (opcode and flags).
  1693. *
  1694. * Returns 'true' if SQE is allowed, 'false' otherwise.
  1695. */
  1696. static inline bool io_check_restriction(struct io_ring_ctx *ctx,
  1697. struct io_kiocb *req,
  1698. unsigned int sqe_flags)
  1699. {
  1700. if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
  1701. return false;
  1702. if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
  1703. ctx->restrictions.sqe_flags_required)
  1704. return false;
  1705. if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
  1706. ctx->restrictions.sqe_flags_required))
  1707. return false;
  1708. return true;
  1709. }
  1710. static void io_init_req_drain(struct io_kiocb *req)
  1711. {
  1712. struct io_ring_ctx *ctx = req->ctx;
  1713. struct io_kiocb *head = ctx->submit_state.link.head;
  1714. ctx->drain_active = true;
  1715. if (head) {
  1716. /*
  1717. * If we need to drain a request in the middle of a link, drain
  1718. * the head request and the next request/link after the current
  1719. * link. Considering sequential execution of links,
  1720. * REQ_F_IO_DRAIN will be maintained for every request of our
  1721. * link.
  1722. */
  1723. head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
  1724. ctx->drain_next = true;
  1725. }
  1726. }
  1727. static __cold int io_init_fail_req(struct io_kiocb *req, int err)
  1728. {
  1729. /* ensure per-opcode data is cleared if we fail before prep */
  1730. memset(&req->cmd.data, 0, sizeof(req->cmd.data));
  1731. return err;
  1732. }
  1733. static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
  1734. const struct io_uring_sqe *sqe)
  1735. __must_hold(&ctx->uring_lock)
  1736. {
  1737. const struct io_issue_def *def;
  1738. unsigned int sqe_flags;
  1739. int personality;
  1740. u8 opcode;
  1741. /* req is partially pre-initialised, see io_preinit_req() */
  1742. req->opcode = opcode = READ_ONCE(sqe->opcode);
  1743. /* same numerical values with corresponding REQ_F_*, safe to copy */
  1744. sqe_flags = READ_ONCE(sqe->flags);
  1745. req->flags = (__force io_req_flags_t) sqe_flags;
  1746. req->cqe.user_data = READ_ONCE(sqe->user_data);
  1747. req->file = NULL;
  1748. req->rsrc_node = NULL;
  1749. req->task = current;
  1750. req->cancel_seq_set = false;
  1751. if (unlikely(opcode >= IORING_OP_LAST)) {
  1752. req->opcode = 0;
  1753. return io_init_fail_req(req, -EINVAL);
  1754. }
  1755. opcode = array_index_nospec(opcode, IORING_OP_LAST);
  1756. def = &io_issue_defs[opcode];
  1757. if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
  1758. /* enforce forwards compatibility on users */
  1759. if (sqe_flags & ~SQE_VALID_FLAGS)
  1760. return io_init_fail_req(req, -EINVAL);
  1761. if (sqe_flags & IOSQE_BUFFER_SELECT) {
  1762. if (!def->buffer_select)
  1763. return io_init_fail_req(req, -EOPNOTSUPP);
  1764. req->buf_index = READ_ONCE(sqe->buf_group);
  1765. }
  1766. if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
  1767. ctx->drain_disabled = true;
  1768. if (sqe_flags & IOSQE_IO_DRAIN) {
  1769. if (ctx->drain_disabled)
  1770. return io_init_fail_req(req, -EOPNOTSUPP);
  1771. io_init_req_drain(req);
  1772. }
  1773. }
  1774. if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
  1775. if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
  1776. return io_init_fail_req(req, -EACCES);
  1777. /* knock it to the slow queue path, will be drained there */
  1778. if (ctx->drain_active)
  1779. req->flags |= REQ_F_FORCE_ASYNC;
  1780. /* if there is no link, we're at "next" request and need to drain */
  1781. if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
  1782. ctx->drain_next = false;
  1783. ctx->drain_active = true;
  1784. req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
  1785. }
  1786. }
  1787. if (!def->ioprio && sqe->ioprio)
  1788. return io_init_fail_req(req, -EINVAL);
  1789. if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
  1790. return io_init_fail_req(req, -EINVAL);
  1791. if (def->needs_file) {
  1792. struct io_submit_state *state = &ctx->submit_state;
  1793. req->cqe.fd = READ_ONCE(sqe->fd);
  1794. /*
  1795. * Plug now if we have more than 2 IO left after this, and the
  1796. * target is potentially a read/write to block based storage.
  1797. */
  1798. if (state->need_plug && def->plug) {
  1799. state->plug_started = true;
  1800. state->need_plug = false;
  1801. blk_start_plug_nr_ios(&state->plug, state->submit_nr);
  1802. }
  1803. }
  1804. personality = READ_ONCE(sqe->personality);
  1805. if (personality) {
  1806. int ret;
  1807. req->creds = xa_load(&ctx->personalities, personality);
  1808. if (!req->creds)
  1809. return io_init_fail_req(req, -EINVAL);
  1810. get_cred(req->creds);
  1811. ret = security_uring_override_creds(req->creds);
  1812. if (ret) {
  1813. put_cred(req->creds);
  1814. return io_init_fail_req(req, ret);
  1815. }
  1816. req->flags |= REQ_F_CREDS;
  1817. }
  1818. return def->prep(req, sqe);
  1819. }
  1820. static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
  1821. struct io_kiocb *req, int ret)
  1822. {
  1823. struct io_ring_ctx *ctx = req->ctx;
  1824. struct io_submit_link *link = &ctx->submit_state.link;
  1825. struct io_kiocb *head = link->head;
  1826. trace_io_uring_req_failed(sqe, req, ret);
  1827. /*
  1828. * Avoid breaking links in the middle as it renders links with SQPOLL
  1829. * unusable. Instead of failing eagerly, continue assembling the link if
  1830. * applicable and mark the head with REQ_F_FAIL. The link flushing code
  1831. * should find the flag and handle the rest.
  1832. */
  1833. req_fail_link_node(req, ret);
  1834. if (head && !(head->flags & REQ_F_FAIL))
  1835. req_fail_link_node(head, -ECANCELED);
  1836. if (!(req->flags & IO_REQ_LINK_FLAGS)) {
  1837. if (head) {
  1838. link->last->link = req;
  1839. link->head = NULL;
  1840. req = head;
  1841. }
  1842. io_queue_sqe_fallback(req);
  1843. return ret;
  1844. }
  1845. if (head)
  1846. link->last->link = req;
  1847. else
  1848. link->head = req;
  1849. link->last = req;
  1850. return 0;
  1851. }
  1852. static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
  1853. const struct io_uring_sqe *sqe)
  1854. __must_hold(&ctx->uring_lock)
  1855. {
  1856. struct io_submit_link *link = &ctx->submit_state.link;
  1857. int ret;
  1858. ret = io_init_req(ctx, req, sqe);
  1859. if (unlikely(ret))
  1860. return io_submit_fail_init(sqe, req, ret);
  1861. trace_io_uring_submit_req(req);
  1862. /*
  1863. * If we already have a head request, queue this one for async
  1864. * submittal once the head completes. If we don't have a head but
  1865. * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
  1866. * submitted sync once the chain is complete. If none of those
  1867. * conditions are true (normal request), then just queue it.
  1868. */
  1869. if (unlikely(link->head)) {
  1870. trace_io_uring_link(req, link->last);
  1871. link->last->link = req;
  1872. link->last = req;
  1873. if (req->flags & IO_REQ_LINK_FLAGS)
  1874. return 0;
  1875. /* last request of the link, flush it */
  1876. req = link->head;
  1877. link->head = NULL;
  1878. if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
  1879. goto fallback;
  1880. } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
  1881. REQ_F_FORCE_ASYNC | REQ_F_FAIL))) {
  1882. if (req->flags & IO_REQ_LINK_FLAGS) {
  1883. link->head = req;
  1884. link->last = req;
  1885. } else {
  1886. fallback:
  1887. io_queue_sqe_fallback(req);
  1888. }
  1889. return 0;
  1890. }
  1891. io_queue_sqe(req);
  1892. return 0;
  1893. }
  1894. /*
  1895. * Batched submission is done, ensure local IO is flushed out.
  1896. */
  1897. static void io_submit_state_end(struct io_ring_ctx *ctx)
  1898. {
  1899. struct io_submit_state *state = &ctx->submit_state;
  1900. if (unlikely(state->link.head))
  1901. io_queue_sqe_fallback(state->link.head);
  1902. /* flush only after queuing links as they can generate completions */
  1903. io_submit_flush_completions(ctx);
  1904. if (state->plug_started)
  1905. blk_finish_plug(&state->plug);
  1906. }
  1907. /*
  1908. * Start submission side cache.
  1909. */
  1910. static void io_submit_state_start(struct io_submit_state *state,
  1911. unsigned int max_ios)
  1912. {
  1913. state->plug_started = false;
  1914. state->need_plug = max_ios > 2;
  1915. state->submit_nr = max_ios;
  1916. /* set only head, no need to init link_last in advance */
  1917. state->link.head = NULL;
  1918. }
  1919. static void io_commit_sqring(struct io_ring_ctx *ctx)
  1920. {
  1921. struct io_rings *rings = ctx->rings;
  1922. /*
  1923. * Ensure any loads from the SQEs are done at this point,
  1924. * since once we write the new head, the application could
  1925. * write new data to them.
  1926. */
  1927. smp_store_release(&rings->sq.head, ctx->cached_sq_head);
  1928. }
  1929. /*
  1930. * Fetch an sqe, if one is available. Note this returns a pointer to memory
  1931. * that is mapped by userspace. This means that care needs to be taken to
  1932. * ensure that reads are stable, as we cannot rely on userspace always
  1933. * being a good citizen. If members of the sqe are validated and then later
  1934. * used, it's important that those reads are done through READ_ONCE() to
  1935. * prevent a re-load down the line.
  1936. */
  1937. static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
  1938. {
  1939. unsigned mask = ctx->sq_entries - 1;
  1940. unsigned head = ctx->cached_sq_head++ & mask;
  1941. if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) {
  1942. head = READ_ONCE(ctx->sq_array[head]);
  1943. if (unlikely(head >= ctx->sq_entries)) {
  1944. /* drop invalid entries */
  1945. spin_lock(&ctx->completion_lock);
  1946. ctx->cq_extra--;
  1947. spin_unlock(&ctx->completion_lock);
  1948. WRITE_ONCE(ctx->rings->sq_dropped,
  1949. READ_ONCE(ctx->rings->sq_dropped) + 1);
  1950. return false;
  1951. }
  1952. }
  1953. /*
  1954. * The cached sq head (or cq tail) serves two purposes:
  1955. *
  1956. * 1) allows us to batch the cost of updating the user visible
  1957. * head updates.
  1958. * 2) allows the kernel side to track the head on its own, even
  1959. * though the application is the one updating it.
  1960. */
  1961. /* double index for 128-byte SQEs, twice as long */
  1962. if (ctx->flags & IORING_SETUP_SQE128)
  1963. head <<= 1;
  1964. *sqe = &ctx->sq_sqes[head];
  1965. return true;
  1966. }
  1967. int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
  1968. __must_hold(&ctx->uring_lock)
  1969. {
  1970. unsigned int entries = io_sqring_entries(ctx);
  1971. unsigned int left;
  1972. int ret;
  1973. if (unlikely(!entries))
  1974. return 0;
  1975. /* make sure SQ entry isn't read before tail */
  1976. ret = left = min(nr, entries);
  1977. io_get_task_refs(left);
  1978. io_submit_state_start(&ctx->submit_state, left);
  1979. do {
  1980. const struct io_uring_sqe *sqe;
  1981. struct io_kiocb *req;
  1982. if (unlikely(!io_alloc_req(ctx, &req)))
  1983. break;
  1984. if (unlikely(!io_get_sqe(ctx, &sqe))) {
  1985. io_req_add_to_cache(req, ctx);
  1986. break;
  1987. }
  1988. /*
  1989. * Continue submitting even for sqe failure if the
  1990. * ring was setup with IORING_SETUP_SUBMIT_ALL
  1991. */
  1992. if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
  1993. !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
  1994. left--;
  1995. break;
  1996. }
  1997. } while (--left);
  1998. if (unlikely(left)) {
  1999. ret -= left;
  2000. /* try again if it submitted nothing and can't allocate a req */
  2001. if (!ret && io_req_cache_empty(ctx))
  2002. ret = -EAGAIN;
  2003. current->io_uring->cached_refs += left;
  2004. }
  2005. io_submit_state_end(ctx);
  2006. /* Commit SQ ring head once we've consumed and submitted all SQEs */
  2007. io_commit_sqring(ctx);
  2008. return ret;
  2009. }
  2010. static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
  2011. int wake_flags, void *key)
  2012. {
  2013. struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq);
  2014. /*
  2015. * Cannot safely flush overflowed CQEs from here, ensure we wake up
  2016. * the task, and the next invocation will do it.
  2017. */
  2018. if (io_should_wake(iowq) || io_has_work(iowq->ctx))
  2019. return autoremove_wake_function(curr, mode, wake_flags, key);
  2020. return -1;
  2021. }
  2022. int io_run_task_work_sig(struct io_ring_ctx *ctx)
  2023. {
  2024. if (!llist_empty(&ctx->work_llist)) {
  2025. __set_current_state(TASK_RUNNING);
  2026. if (io_run_local_work(ctx, INT_MAX) > 0)
  2027. return 0;
  2028. }
  2029. if (io_run_task_work() > 0)
  2030. return 0;
  2031. if (task_sigpending(current))
  2032. return -EINTR;
  2033. return 0;
  2034. }
  2035. static bool current_pending_io(void)
  2036. {
  2037. struct io_uring_task *tctx = current->io_uring;
  2038. if (!tctx)
  2039. return false;
  2040. return percpu_counter_read_positive(&tctx->inflight);
  2041. }
  2042. static enum hrtimer_restart io_cqring_timer_wakeup(struct hrtimer *timer)
  2043. {
  2044. struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t);
  2045. WRITE_ONCE(iowq->hit_timeout, 1);
  2046. iowq->min_timeout = 0;
  2047. wake_up_process(iowq->wq.private);
  2048. return HRTIMER_NORESTART;
  2049. }
  2050. /*
  2051. * Doing min_timeout portion. If we saw any timeouts, events, or have work,
  2052. * wake up. If not, and we have a normal timeout, switch to that and keep
  2053. * sleeping.
  2054. */
  2055. static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer)
  2056. {
  2057. struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t);
  2058. struct io_ring_ctx *ctx = iowq->ctx;
  2059. /* no general timeout, or shorter (or equal), we are done */
  2060. if (iowq->timeout == KTIME_MAX ||
  2061. ktime_compare(iowq->min_timeout, iowq->timeout) >= 0)
  2062. goto out_wake;
  2063. /* work we may need to run, wake function will see if we need to wake */
  2064. if (io_has_work(ctx))
  2065. goto out_wake;
  2066. /* got events since we started waiting, min timeout is done */
  2067. if (iowq->cq_min_tail != READ_ONCE(ctx->rings->cq.tail))
  2068. goto out_wake;
  2069. /* if we have any events and min timeout expired, we're done */
  2070. if (io_cqring_events(ctx))
  2071. goto out_wake;
  2072. /*
  2073. * If using deferred task_work running and application is waiting on
  2074. * more than one request, ensure we reset it now where we are switching
  2075. * to normal sleeps. Any request completion post min_wait should wake
  2076. * the task and return.
  2077. */
  2078. if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
  2079. atomic_set(&ctx->cq_wait_nr, 1);
  2080. smp_mb();
  2081. if (!llist_empty(&ctx->work_llist))
  2082. goto out_wake;
  2083. }
  2084. iowq->t.function = io_cqring_timer_wakeup;
  2085. hrtimer_set_expires(timer, iowq->timeout);
  2086. return HRTIMER_RESTART;
  2087. out_wake:
  2088. return io_cqring_timer_wakeup(timer);
  2089. }
  2090. static int io_cqring_schedule_timeout(struct io_wait_queue *iowq,
  2091. clockid_t clock_id, ktime_t start_time)
  2092. {
  2093. ktime_t timeout;
  2094. hrtimer_init_on_stack(&iowq->t, clock_id, HRTIMER_MODE_ABS);
  2095. if (iowq->min_timeout) {
  2096. timeout = ktime_add_ns(iowq->min_timeout, start_time);
  2097. iowq->t.function = io_cqring_min_timer_wakeup;
  2098. } else {
  2099. timeout = iowq->timeout;
  2100. iowq->t.function = io_cqring_timer_wakeup;
  2101. }
  2102. hrtimer_set_expires_range_ns(&iowq->t, timeout, 0);
  2103. hrtimer_start_expires(&iowq->t, HRTIMER_MODE_ABS);
  2104. if (!READ_ONCE(iowq->hit_timeout))
  2105. schedule();
  2106. hrtimer_cancel(&iowq->t);
  2107. destroy_hrtimer_on_stack(&iowq->t);
  2108. __set_current_state(TASK_RUNNING);
  2109. return READ_ONCE(iowq->hit_timeout) ? -ETIME : 0;
  2110. }
  2111. static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
  2112. struct io_wait_queue *iowq,
  2113. ktime_t start_time)
  2114. {
  2115. int ret = 0;
  2116. /*
  2117. * Mark us as being in io_wait if we have pending requests, so cpufreq
  2118. * can take into account that the task is waiting for IO - turns out
  2119. * to be important for low QD IO.
  2120. */
  2121. if (current_pending_io())
  2122. current->in_iowait = 1;
  2123. if (iowq->timeout != KTIME_MAX || iowq->min_timeout)
  2124. ret = io_cqring_schedule_timeout(iowq, ctx->clockid, start_time);
  2125. else
  2126. schedule();
  2127. current->in_iowait = 0;
  2128. return ret;
  2129. }
  2130. /* If this returns > 0, the caller should retry */
  2131. static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
  2132. struct io_wait_queue *iowq,
  2133. ktime_t start_time)
  2134. {
  2135. if (unlikely(READ_ONCE(ctx->check_cq)))
  2136. return 1;
  2137. if (unlikely(!llist_empty(&ctx->work_llist)))
  2138. return 1;
  2139. if (unlikely(task_work_pending(current)))
  2140. return 1;
  2141. if (unlikely(task_sigpending(current)))
  2142. return -EINTR;
  2143. if (unlikely(io_should_wake(iowq)))
  2144. return 0;
  2145. return __io_cqring_wait_schedule(ctx, iowq, start_time);
  2146. }
  2147. struct ext_arg {
  2148. size_t argsz;
  2149. struct __kernel_timespec __user *ts;
  2150. const sigset_t __user *sig;
  2151. ktime_t min_time;
  2152. };
  2153. /*
  2154. * Wait until events become available, if we don't already have some. The
  2155. * application must reap them itself, as they reside on the shared cq ring.
  2156. */
  2157. static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
  2158. struct ext_arg *ext_arg)
  2159. {
  2160. struct io_wait_queue iowq;
  2161. struct io_rings *rings = ctx->rings;
  2162. ktime_t start_time;
  2163. int ret;
  2164. if (!io_allowed_run_tw(ctx))
  2165. return -EEXIST;
  2166. if (!llist_empty(&ctx->work_llist))
  2167. io_run_local_work(ctx, min_events);
  2168. io_run_task_work();
  2169. if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)))
  2170. io_cqring_do_overflow_flush(ctx);
  2171. if (__io_cqring_events_user(ctx) >= min_events)
  2172. return 0;
  2173. init_waitqueue_func_entry(&iowq.wq, io_wake_function);
  2174. iowq.wq.private = current;
  2175. INIT_LIST_HEAD(&iowq.wq.entry);
  2176. iowq.ctx = ctx;
  2177. iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
  2178. iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail);
  2179. iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
  2180. iowq.hit_timeout = 0;
  2181. iowq.min_timeout = ext_arg->min_time;
  2182. iowq.timeout = KTIME_MAX;
  2183. start_time = io_get_time(ctx);
  2184. if (ext_arg->ts) {
  2185. struct timespec64 ts;
  2186. if (get_timespec64(&ts, ext_arg->ts))
  2187. return -EFAULT;
  2188. iowq.timeout = timespec64_to_ktime(ts);
  2189. if (!(flags & IORING_ENTER_ABS_TIMER))
  2190. iowq.timeout = ktime_add(iowq.timeout, start_time);
  2191. }
  2192. if (ext_arg->sig) {
  2193. #ifdef CONFIG_COMPAT
  2194. if (in_compat_syscall())
  2195. ret = set_compat_user_sigmask((const compat_sigset_t __user *)ext_arg->sig,
  2196. ext_arg->argsz);
  2197. else
  2198. #endif
  2199. ret = set_user_sigmask(ext_arg->sig, ext_arg->argsz);
  2200. if (ret)
  2201. return ret;
  2202. }
  2203. io_napi_busy_loop(ctx, &iowq);
  2204. trace_io_uring_cqring_wait(ctx, min_events);
  2205. do {
  2206. unsigned long check_cq;
  2207. int nr_wait;
  2208. /* if min timeout has been hit, don't reset wait count */
  2209. if (!iowq.hit_timeout)
  2210. nr_wait = (int) iowq.cq_tail -
  2211. READ_ONCE(ctx->rings->cq.tail);
  2212. else
  2213. nr_wait = 1;
  2214. if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
  2215. atomic_set(&ctx->cq_wait_nr, nr_wait);
  2216. set_current_state(TASK_INTERRUPTIBLE);
  2217. } else {
  2218. prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
  2219. TASK_INTERRUPTIBLE);
  2220. }
  2221. ret = io_cqring_wait_schedule(ctx, &iowq, start_time);
  2222. __set_current_state(TASK_RUNNING);
  2223. atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
  2224. /*
  2225. * Run task_work after scheduling and before io_should_wake().
  2226. * If we got woken because of task_work being processed, run it
  2227. * now rather than let the caller do another wait loop.
  2228. */
  2229. if (!llist_empty(&ctx->work_llist))
  2230. io_run_local_work(ctx, nr_wait);
  2231. io_run_task_work();
  2232. /*
  2233. * Non-local task_work will be run on exit to userspace, but
  2234. * if we're using DEFER_TASKRUN, then we could have waited
  2235. * with a timeout for a number of requests. If the timeout
  2236. * hits, we could have some requests ready to process. Ensure
  2237. * this break is _after_ we have run task_work, to avoid
  2238. * deferring running potentially pending requests until the
  2239. * next time we wait for events.
  2240. */
  2241. if (ret < 0)
  2242. break;
  2243. check_cq = READ_ONCE(ctx->check_cq);
  2244. if (unlikely(check_cq)) {
  2245. /* let the caller flush overflows, retry */
  2246. if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
  2247. io_cqring_do_overflow_flush(ctx);
  2248. if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) {
  2249. ret = -EBADR;
  2250. break;
  2251. }
  2252. }
  2253. if (io_should_wake(&iowq)) {
  2254. ret = 0;
  2255. break;
  2256. }
  2257. cond_resched();
  2258. } while (1);
  2259. if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
  2260. finish_wait(&ctx->cq_wait, &iowq.wq);
  2261. restore_saved_sigmask_unless(ret == -EINTR);
  2262. return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
  2263. }
  2264. static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
  2265. size_t size)
  2266. {
  2267. return __io_uaddr_map(&ctx->ring_pages, &ctx->n_ring_pages, uaddr,
  2268. size);
  2269. }
  2270. static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr,
  2271. size_t size)
  2272. {
  2273. return __io_uaddr_map(&ctx->sqe_pages, &ctx->n_sqe_pages, uaddr,
  2274. size);
  2275. }
  2276. static void io_rings_free(struct io_ring_ctx *ctx)
  2277. {
  2278. if (!(ctx->flags & IORING_SETUP_NO_MMAP)) {
  2279. io_pages_unmap(ctx->rings, &ctx->ring_pages, &ctx->n_ring_pages,
  2280. true);
  2281. io_pages_unmap(ctx->sq_sqes, &ctx->sqe_pages, &ctx->n_sqe_pages,
  2282. true);
  2283. } else {
  2284. io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
  2285. ctx->n_ring_pages = 0;
  2286. io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
  2287. ctx->n_sqe_pages = 0;
  2288. vunmap(ctx->rings);
  2289. vunmap(ctx->sq_sqes);
  2290. }
  2291. ctx->rings = NULL;
  2292. ctx->sq_sqes = NULL;
  2293. }
  2294. static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
  2295. unsigned int cq_entries, size_t *sq_offset)
  2296. {
  2297. struct io_rings *rings;
  2298. size_t off, sq_array_size;
  2299. off = struct_size(rings, cqes, cq_entries);
  2300. if (off == SIZE_MAX)
  2301. return SIZE_MAX;
  2302. if (ctx->flags & IORING_SETUP_CQE32) {
  2303. if (check_shl_overflow(off, 1, &off))
  2304. return SIZE_MAX;
  2305. }
  2306. #ifdef CONFIG_SMP
  2307. off = ALIGN(off, SMP_CACHE_BYTES);
  2308. if (off == 0)
  2309. return SIZE_MAX;
  2310. #endif
  2311. if (ctx->flags & IORING_SETUP_NO_SQARRAY) {
  2312. *sq_offset = SIZE_MAX;
  2313. return off;
  2314. }
  2315. *sq_offset = off;
  2316. sq_array_size = array_size(sizeof(u32), sq_entries);
  2317. if (sq_array_size == SIZE_MAX)
  2318. return SIZE_MAX;
  2319. if (check_add_overflow(off, sq_array_size, &off))
  2320. return SIZE_MAX;
  2321. return off;
  2322. }
  2323. static void io_req_caches_free(struct io_ring_ctx *ctx)
  2324. {
  2325. struct io_kiocb *req;
  2326. int nr = 0;
  2327. mutex_lock(&ctx->uring_lock);
  2328. while (!io_req_cache_empty(ctx)) {
  2329. req = io_extract_req(ctx);
  2330. kmem_cache_free(req_cachep, req);
  2331. nr++;
  2332. }
  2333. if (nr)
  2334. percpu_ref_put_many(&ctx->refs, nr);
  2335. mutex_unlock(&ctx->uring_lock);
  2336. }
  2337. static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
  2338. {
  2339. io_sq_thread_finish(ctx);
  2340. /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
  2341. if (WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)))
  2342. return;
  2343. mutex_lock(&ctx->uring_lock);
  2344. if (ctx->buf_data)
  2345. __io_sqe_buffers_unregister(ctx);
  2346. if (ctx->file_data)
  2347. __io_sqe_files_unregister(ctx);
  2348. io_cqring_overflow_kill(ctx);
  2349. io_eventfd_unregister(ctx);
  2350. io_alloc_cache_free(&ctx->apoll_cache, kfree);
  2351. io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
  2352. io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
  2353. io_alloc_cache_free(&ctx->uring_cache, kfree);
  2354. io_futex_cache_free(ctx);
  2355. io_destroy_buffers(ctx);
  2356. mutex_unlock(&ctx->uring_lock);
  2357. if (ctx->sq_creds)
  2358. put_cred(ctx->sq_creds);
  2359. if (ctx->submitter_task)
  2360. put_task_struct(ctx->submitter_task);
  2361. /* there are no registered resources left, nobody uses it */
  2362. if (ctx->rsrc_node)
  2363. io_rsrc_node_destroy(ctx, ctx->rsrc_node);
  2364. WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
  2365. WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
  2366. io_alloc_cache_free(&ctx->rsrc_node_cache, kfree);
  2367. if (ctx->mm_account) {
  2368. mmdrop(ctx->mm_account);
  2369. ctx->mm_account = NULL;
  2370. }
  2371. io_rings_free(ctx);
  2372. percpu_ref_exit(&ctx->refs);
  2373. free_uid(ctx->user);
  2374. io_req_caches_free(ctx);
  2375. if (ctx->hash_map)
  2376. io_wq_put_hash(ctx->hash_map);
  2377. io_napi_free(ctx);
  2378. kfree(ctx->cancel_table.hbs);
  2379. kfree(ctx->cancel_table_locked.hbs);
  2380. xa_destroy(&ctx->io_bl_xa);
  2381. kfree(ctx);
  2382. }
  2383. static __cold void io_activate_pollwq_cb(struct callback_head *cb)
  2384. {
  2385. struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx,
  2386. poll_wq_task_work);
  2387. mutex_lock(&ctx->uring_lock);
  2388. ctx->poll_activated = true;
  2389. mutex_unlock(&ctx->uring_lock);
  2390. /*
  2391. * Wake ups for some events between start of polling and activation
  2392. * might've been lost due to loose synchronisation.
  2393. */
  2394. wake_up_all(&ctx->poll_wq);
  2395. percpu_ref_put(&ctx->refs);
  2396. }
  2397. __cold void io_activate_pollwq(struct io_ring_ctx *ctx)
  2398. {
  2399. spin_lock(&ctx->completion_lock);
  2400. /* already activated or in progress */
  2401. if (ctx->poll_activated || ctx->poll_wq_task_work.func)
  2402. goto out;
  2403. if (WARN_ON_ONCE(!ctx->task_complete))
  2404. goto out;
  2405. if (!ctx->submitter_task)
  2406. goto out;
  2407. /*
  2408. * with ->submitter_task only the submitter task completes requests, we
  2409. * only need to sync with it, which is done by injecting a tw
  2410. */
  2411. init_task_work(&ctx->poll_wq_task_work, io_activate_pollwq_cb);
  2412. percpu_ref_get(&ctx->refs);
  2413. if (task_work_add(ctx->submitter_task, &ctx->poll_wq_task_work, TWA_SIGNAL))
  2414. percpu_ref_put(&ctx->refs);
  2415. out:
  2416. spin_unlock(&ctx->completion_lock);
  2417. }
  2418. static __poll_t io_uring_poll(struct file *file, poll_table *wait)
  2419. {
  2420. struct io_ring_ctx *ctx = file->private_data;
  2421. __poll_t mask = 0;
  2422. if (unlikely(!ctx->poll_activated))
  2423. io_activate_pollwq(ctx);
  2424. poll_wait(file, &ctx->poll_wq, wait);
  2425. /*
  2426. * synchronizes with barrier from wq_has_sleeper call in
  2427. * io_commit_cqring
  2428. */
  2429. smp_rmb();
  2430. if (!io_sqring_full(ctx))
  2431. mask |= EPOLLOUT | EPOLLWRNORM;
  2432. /*
  2433. * Don't flush cqring overflow list here, just do a simple check.
  2434. * Otherwise there could possible be ABBA deadlock:
  2435. * CPU0 CPU1
  2436. * ---- ----
  2437. * lock(&ctx->uring_lock);
  2438. * lock(&ep->mtx);
  2439. * lock(&ctx->uring_lock);
  2440. * lock(&ep->mtx);
  2441. *
  2442. * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
  2443. * pushes them to do the flush.
  2444. */
  2445. if (__io_cqring_events_user(ctx) || io_has_work(ctx))
  2446. mask |= EPOLLIN | EPOLLRDNORM;
  2447. return mask;
  2448. }
  2449. struct io_tctx_exit {
  2450. struct callback_head task_work;
  2451. struct completion completion;
  2452. struct io_ring_ctx *ctx;
  2453. };
  2454. static __cold void io_tctx_exit_cb(struct callback_head *cb)
  2455. {
  2456. struct io_uring_task *tctx = current->io_uring;
  2457. struct io_tctx_exit *work;
  2458. work = container_of(cb, struct io_tctx_exit, task_work);
  2459. /*
  2460. * When @in_cancel, we're in cancellation and it's racy to remove the
  2461. * node. It'll be removed by the end of cancellation, just ignore it.
  2462. * tctx can be NULL if the queueing of this task_work raced with
  2463. * work cancelation off the exec path.
  2464. */
  2465. if (tctx && !atomic_read(&tctx->in_cancel))
  2466. io_uring_del_tctx_node((unsigned long)work->ctx);
  2467. complete(&work->completion);
  2468. }
  2469. static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
  2470. {
  2471. struct io_kiocb *req = container_of(work, struct io_kiocb, work);
  2472. return req->ctx == data;
  2473. }
  2474. static __cold void io_ring_exit_work(struct work_struct *work)
  2475. {
  2476. struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
  2477. unsigned long timeout = jiffies + HZ * 60 * 5;
  2478. unsigned long interval = HZ / 20;
  2479. struct io_tctx_exit exit;
  2480. struct io_tctx_node *node;
  2481. int ret;
  2482. /*
  2483. * If we're doing polled IO and end up having requests being
  2484. * submitted async (out-of-line), then completions can come in while
  2485. * we're waiting for refs to drop. We need to reap these manually,
  2486. * as nobody else will be looking for them.
  2487. */
  2488. do {
  2489. if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
  2490. mutex_lock(&ctx->uring_lock);
  2491. io_cqring_overflow_kill(ctx);
  2492. mutex_unlock(&ctx->uring_lock);
  2493. }
  2494. if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
  2495. io_move_task_work_from_local(ctx);
  2496. while (io_uring_try_cancel_requests(ctx, NULL, true))
  2497. cond_resched();
  2498. if (ctx->sq_data) {
  2499. struct io_sq_data *sqd = ctx->sq_data;
  2500. struct task_struct *tsk;
  2501. io_sq_thread_park(sqd);
  2502. tsk = sqpoll_task_locked(sqd);
  2503. if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
  2504. io_wq_cancel_cb(tsk->io_uring->io_wq,
  2505. io_cancel_ctx_cb, ctx, true);
  2506. io_sq_thread_unpark(sqd);
  2507. }
  2508. io_req_caches_free(ctx);
  2509. if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
  2510. /* there is little hope left, don't run it too often */
  2511. interval = HZ * 60;
  2512. }
  2513. /*
  2514. * This is really an uninterruptible wait, as it has to be
  2515. * complete. But it's also run from a kworker, which doesn't
  2516. * take signals, so it's fine to make it interruptible. This
  2517. * avoids scenarios where we knowingly can wait much longer
  2518. * on completions, for example if someone does a SIGSTOP on
  2519. * a task that needs to finish task_work to make this loop
  2520. * complete. That's a synthetic situation that should not
  2521. * cause a stuck task backtrace, and hence a potential panic
  2522. * on stuck tasks if that is enabled.
  2523. */
  2524. } while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval));
  2525. init_completion(&exit.completion);
  2526. init_task_work(&exit.task_work, io_tctx_exit_cb);
  2527. exit.ctx = ctx;
  2528. mutex_lock(&ctx->uring_lock);
  2529. while (!list_empty(&ctx->tctx_list)) {
  2530. WARN_ON_ONCE(time_after(jiffies, timeout));
  2531. node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
  2532. ctx_node);
  2533. /* don't spin on a single task if cancellation failed */
  2534. list_rotate_left(&ctx->tctx_list);
  2535. ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
  2536. if (WARN_ON_ONCE(ret))
  2537. continue;
  2538. mutex_unlock(&ctx->uring_lock);
  2539. /*
  2540. * See comment above for
  2541. * wait_for_completion_interruptible_timeout() on why this
  2542. * wait is marked as interruptible.
  2543. */
  2544. wait_for_completion_interruptible(&exit.completion);
  2545. mutex_lock(&ctx->uring_lock);
  2546. }
  2547. mutex_unlock(&ctx->uring_lock);
  2548. spin_lock(&ctx->completion_lock);
  2549. spin_unlock(&ctx->completion_lock);
  2550. /* pairs with RCU read section in io_req_local_work_add() */
  2551. if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
  2552. synchronize_rcu();
  2553. io_ring_ctx_free(ctx);
  2554. }
  2555. static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
  2556. {
  2557. unsigned long index;
  2558. struct creds *creds;
  2559. mutex_lock(&ctx->uring_lock);
  2560. percpu_ref_kill(&ctx->refs);
  2561. xa_for_each(&ctx->personalities, index, creds)
  2562. io_unregister_personality(ctx, index);
  2563. mutex_unlock(&ctx->uring_lock);
  2564. flush_delayed_work(&ctx->fallback_work);
  2565. INIT_WORK(&ctx->exit_work, io_ring_exit_work);
  2566. /*
  2567. * Use system_unbound_wq to avoid spawning tons of event kworkers
  2568. * if we're exiting a ton of rings at the same time. It just adds
  2569. * noise and overhead, there's no discernable change in runtime
  2570. * over using system_wq.
  2571. */
  2572. queue_work(iou_wq, &ctx->exit_work);
  2573. }
  2574. static int io_uring_release(struct inode *inode, struct file *file)
  2575. {
  2576. struct io_ring_ctx *ctx = file->private_data;
  2577. file->private_data = NULL;
  2578. io_ring_ctx_wait_and_kill(ctx);
  2579. return 0;
  2580. }
  2581. struct io_task_cancel {
  2582. struct task_struct *task;
  2583. bool all;
  2584. };
  2585. static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
  2586. {
  2587. struct io_kiocb *req = container_of(work, struct io_kiocb, work);
  2588. struct io_task_cancel *cancel = data;
  2589. return io_match_task_safe(req, cancel->task, cancel->all);
  2590. }
  2591. static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
  2592. struct task_struct *task,
  2593. bool cancel_all)
  2594. {
  2595. struct io_defer_entry *de;
  2596. LIST_HEAD(list);
  2597. spin_lock(&ctx->completion_lock);
  2598. list_for_each_entry_reverse(de, &ctx->defer_list, list) {
  2599. if (io_match_task_safe(de->req, task, cancel_all)) {
  2600. list_cut_position(&list, &ctx->defer_list, &de->list);
  2601. break;
  2602. }
  2603. }
  2604. spin_unlock(&ctx->completion_lock);
  2605. if (list_empty(&list))
  2606. return false;
  2607. while (!list_empty(&list)) {
  2608. de = list_first_entry(&list, struct io_defer_entry, list);
  2609. list_del_init(&de->list);
  2610. io_req_task_queue_fail(de->req, -ECANCELED);
  2611. kfree(de);
  2612. }
  2613. return true;
  2614. }
  2615. static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
  2616. {
  2617. struct io_tctx_node *node;
  2618. enum io_wq_cancel cret;
  2619. bool ret = false;
  2620. mutex_lock(&ctx->uring_lock);
  2621. list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
  2622. struct io_uring_task *tctx = node->task->io_uring;
  2623. /*
  2624. * io_wq will stay alive while we hold uring_lock, because it's
  2625. * killed after ctx nodes, which requires to take the lock.
  2626. */
  2627. if (!tctx || !tctx->io_wq)
  2628. continue;
  2629. cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
  2630. ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
  2631. }
  2632. mutex_unlock(&ctx->uring_lock);
  2633. return ret;
  2634. }
  2635. static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
  2636. struct task_struct *task,
  2637. bool cancel_all)
  2638. {
  2639. struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
  2640. struct io_uring_task *tctx = task ? task->io_uring : NULL;
  2641. enum io_wq_cancel cret;
  2642. bool ret = false;
  2643. /* set it so io_req_local_work_add() would wake us up */
  2644. if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
  2645. atomic_set(&ctx->cq_wait_nr, 1);
  2646. smp_mb();
  2647. }
  2648. /* failed during ring init, it couldn't have issued any requests */
  2649. if (!ctx->rings)
  2650. return false;
  2651. if (!task) {
  2652. ret |= io_uring_try_cancel_iowq(ctx);
  2653. } else if (tctx && tctx->io_wq) {
  2654. /*
  2655. * Cancels requests of all rings, not only @ctx, but
  2656. * it's fine as the task is in exit/exec.
  2657. */
  2658. cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
  2659. &cancel, true);
  2660. ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
  2661. }
  2662. /* SQPOLL thread does its own polling */
  2663. if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
  2664. (ctx->sq_data && ctx->sq_data->thread == current)) {
  2665. while (!wq_list_empty(&ctx->iopoll_list)) {
  2666. io_iopoll_try_reap_events(ctx);
  2667. ret = true;
  2668. cond_resched();
  2669. }
  2670. }
  2671. if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
  2672. io_allowed_defer_tw_run(ctx))
  2673. ret |= io_run_local_work(ctx, INT_MAX) > 0;
  2674. ret |= io_cancel_defer_files(ctx, task, cancel_all);
  2675. mutex_lock(&ctx->uring_lock);
  2676. ret |= io_poll_remove_all(ctx, task, cancel_all);
  2677. ret |= io_waitid_remove_all(ctx, task, cancel_all);
  2678. ret |= io_futex_remove_all(ctx, task, cancel_all);
  2679. ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all);
  2680. mutex_unlock(&ctx->uring_lock);
  2681. ret |= io_kill_timeouts(ctx, task, cancel_all);
  2682. if (task)
  2683. ret |= io_run_task_work() > 0;
  2684. else
  2685. ret |= flush_delayed_work(&ctx->fallback_work);
  2686. return ret;
  2687. }
  2688. static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
  2689. {
  2690. if (tracked)
  2691. return atomic_read(&tctx->inflight_tracked);
  2692. return percpu_counter_sum(&tctx->inflight);
  2693. }
  2694. /*
  2695. * Find any io_uring ctx that this task has registered or done IO on, and cancel
  2696. * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
  2697. */
  2698. __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
  2699. {
  2700. struct io_uring_task *tctx = current->io_uring;
  2701. struct io_ring_ctx *ctx;
  2702. struct io_tctx_node *node;
  2703. unsigned long index;
  2704. s64 inflight;
  2705. DEFINE_WAIT(wait);
  2706. WARN_ON_ONCE(sqd && sqpoll_task_locked(sqd) != current);
  2707. if (!current->io_uring)
  2708. return;
  2709. if (tctx->io_wq)
  2710. io_wq_exit_start(tctx->io_wq);
  2711. atomic_inc(&tctx->in_cancel);
  2712. do {
  2713. bool loop = false;
  2714. io_uring_drop_tctx_refs(current);
  2715. if (!tctx_inflight(tctx, !cancel_all))
  2716. break;
  2717. /* read completions before cancelations */
  2718. inflight = tctx_inflight(tctx, false);
  2719. if (!inflight)
  2720. break;
  2721. if (!sqd) {
  2722. xa_for_each(&tctx->xa, index, node) {
  2723. /* sqpoll task will cancel all its requests */
  2724. if (node->ctx->sq_data)
  2725. continue;
  2726. loop |= io_uring_try_cancel_requests(node->ctx,
  2727. current, cancel_all);
  2728. }
  2729. } else {
  2730. list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
  2731. loop |= io_uring_try_cancel_requests(ctx,
  2732. current,
  2733. cancel_all);
  2734. }
  2735. if (loop) {
  2736. cond_resched();
  2737. continue;
  2738. }
  2739. prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
  2740. io_run_task_work();
  2741. io_uring_drop_tctx_refs(current);
  2742. xa_for_each(&tctx->xa, index, node) {
  2743. if (!llist_empty(&node->ctx->work_llist)) {
  2744. WARN_ON_ONCE(node->ctx->submitter_task &&
  2745. node->ctx->submitter_task != current);
  2746. goto end_wait;
  2747. }
  2748. }
  2749. /*
  2750. * If we've seen completions, retry without waiting. This
  2751. * avoids a race where a completion comes in before we did
  2752. * prepare_to_wait().
  2753. */
  2754. if (inflight == tctx_inflight(tctx, !cancel_all))
  2755. schedule();
  2756. end_wait:
  2757. finish_wait(&tctx->wait, &wait);
  2758. } while (1);
  2759. io_uring_clean_tctx(tctx);
  2760. if (cancel_all) {
  2761. /*
  2762. * We shouldn't run task_works after cancel, so just leave
  2763. * ->in_cancel set for normal exit.
  2764. */
  2765. atomic_dec(&tctx->in_cancel);
  2766. /* for exec all current's requests should be gone, kill tctx */
  2767. __io_uring_free(current);
  2768. }
  2769. }
  2770. void __io_uring_cancel(bool cancel_all)
  2771. {
  2772. io_uring_unreg_ringfd();
  2773. io_uring_cancel_generic(cancel_all, NULL);
  2774. }
  2775. static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
  2776. {
  2777. if (flags & IORING_ENTER_EXT_ARG) {
  2778. struct io_uring_getevents_arg arg;
  2779. if (argsz != sizeof(arg))
  2780. return -EINVAL;
  2781. if (copy_from_user(&arg, argp, sizeof(arg)))
  2782. return -EFAULT;
  2783. }
  2784. return 0;
  2785. }
  2786. static int io_get_ext_arg(unsigned flags, const void __user *argp,
  2787. struct ext_arg *ext_arg)
  2788. {
  2789. struct io_uring_getevents_arg arg;
  2790. /*
  2791. * If EXT_ARG isn't set, then we have no timespec and the argp pointer
  2792. * is just a pointer to the sigset_t.
  2793. */
  2794. if (!(flags & IORING_ENTER_EXT_ARG)) {
  2795. ext_arg->sig = (const sigset_t __user *) argp;
  2796. ext_arg->ts = NULL;
  2797. return 0;
  2798. }
  2799. /*
  2800. * EXT_ARG is set - ensure we agree on the size of it and copy in our
  2801. * timespec and sigset_t pointers if good.
  2802. */
  2803. if (ext_arg->argsz != sizeof(arg))
  2804. return -EINVAL;
  2805. if (copy_from_user(&arg, argp, sizeof(arg)))
  2806. return -EFAULT;
  2807. ext_arg->min_time = arg.min_wait_usec * NSEC_PER_USEC;
  2808. ext_arg->sig = u64_to_user_ptr(arg.sigmask);
  2809. ext_arg->argsz = arg.sigmask_sz;
  2810. ext_arg->ts = u64_to_user_ptr(arg.ts);
  2811. return 0;
  2812. }
  2813. SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
  2814. u32, min_complete, u32, flags, const void __user *, argp,
  2815. size_t, argsz)
  2816. {
  2817. struct io_ring_ctx *ctx;
  2818. struct file *file;
  2819. long ret;
  2820. if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
  2821. IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
  2822. IORING_ENTER_REGISTERED_RING |
  2823. IORING_ENTER_ABS_TIMER)))
  2824. return -EINVAL;
  2825. /*
  2826. * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
  2827. * need only dereference our task private array to find it.
  2828. */
  2829. if (flags & IORING_ENTER_REGISTERED_RING) {
  2830. struct io_uring_task *tctx = current->io_uring;
  2831. if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
  2832. return -EINVAL;
  2833. fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
  2834. file = tctx->registered_rings[fd];
  2835. if (unlikely(!file))
  2836. return -EBADF;
  2837. } else {
  2838. file = fget(fd);
  2839. if (unlikely(!file))
  2840. return -EBADF;
  2841. ret = -EOPNOTSUPP;
  2842. if (unlikely(!io_is_uring_fops(file)))
  2843. goto out;
  2844. }
  2845. ctx = file->private_data;
  2846. ret = -EBADFD;
  2847. if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
  2848. goto out;
  2849. /*
  2850. * For SQ polling, the thread will do all submissions and completions.
  2851. * Just return the requested submit count, and wake the thread if
  2852. * we were asked to.
  2853. */
  2854. ret = 0;
  2855. if (ctx->flags & IORING_SETUP_SQPOLL) {
  2856. if (unlikely(ctx->sq_data->thread == NULL)) {
  2857. ret = -EOWNERDEAD;
  2858. goto out;
  2859. }
  2860. if (flags & IORING_ENTER_SQ_WAKEUP)
  2861. wake_up(&ctx->sq_data->wait);
  2862. if (flags & IORING_ENTER_SQ_WAIT)
  2863. io_sqpoll_wait_sq(ctx);
  2864. ret = to_submit;
  2865. } else if (to_submit) {
  2866. ret = io_uring_add_tctx_node(ctx);
  2867. if (unlikely(ret))
  2868. goto out;
  2869. mutex_lock(&ctx->uring_lock);
  2870. ret = io_submit_sqes(ctx, to_submit);
  2871. if (ret != to_submit) {
  2872. mutex_unlock(&ctx->uring_lock);
  2873. goto out;
  2874. }
  2875. if (flags & IORING_ENTER_GETEVENTS) {
  2876. if (ctx->syscall_iopoll)
  2877. goto iopoll_locked;
  2878. /*
  2879. * Ignore errors, we'll soon call io_cqring_wait() and
  2880. * it should handle ownership problems if any.
  2881. */
  2882. if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
  2883. (void)io_run_local_work_locked(ctx, min_complete);
  2884. }
  2885. mutex_unlock(&ctx->uring_lock);
  2886. }
  2887. if (flags & IORING_ENTER_GETEVENTS) {
  2888. int ret2;
  2889. if (ctx->syscall_iopoll) {
  2890. /*
  2891. * We disallow the app entering submit/complete with
  2892. * polling, but we still need to lock the ring to
  2893. * prevent racing with polled issue that got punted to
  2894. * a workqueue.
  2895. */
  2896. mutex_lock(&ctx->uring_lock);
  2897. iopoll_locked:
  2898. ret2 = io_validate_ext_arg(flags, argp, argsz);
  2899. if (likely(!ret2)) {
  2900. min_complete = min(min_complete,
  2901. ctx->cq_entries);
  2902. ret2 = io_iopoll_check(ctx, min_complete);
  2903. }
  2904. mutex_unlock(&ctx->uring_lock);
  2905. } else {
  2906. struct ext_arg ext_arg = { .argsz = argsz };
  2907. ret2 = io_get_ext_arg(flags, argp, &ext_arg);
  2908. if (likely(!ret2)) {
  2909. min_complete = min(min_complete,
  2910. ctx->cq_entries);
  2911. ret2 = io_cqring_wait(ctx, min_complete, flags,
  2912. &ext_arg);
  2913. }
  2914. }
  2915. if (!ret) {
  2916. ret = ret2;
  2917. /*
  2918. * EBADR indicates that one or more CQE were dropped.
  2919. * Once the user has been informed we can clear the bit
  2920. * as they are obviously ok with those drops.
  2921. */
  2922. if (unlikely(ret2 == -EBADR))
  2923. clear_bit(IO_CHECK_CQ_DROPPED_BIT,
  2924. &ctx->check_cq);
  2925. }
  2926. }
  2927. out:
  2928. if (!(flags & IORING_ENTER_REGISTERED_RING))
  2929. fput(file);
  2930. return ret;
  2931. }
  2932. static const struct file_operations io_uring_fops = {
  2933. .release = io_uring_release,
  2934. .mmap = io_uring_mmap,
  2935. .get_unmapped_area = io_uring_get_unmapped_area,
  2936. #ifndef CONFIG_MMU
  2937. .mmap_capabilities = io_uring_nommu_mmap_capabilities,
  2938. #endif
  2939. .poll = io_uring_poll,
  2940. #ifdef CONFIG_PROC_FS
  2941. .show_fdinfo = io_uring_show_fdinfo,
  2942. #endif
  2943. };
  2944. bool io_is_uring_fops(struct file *file)
  2945. {
  2946. return file->f_op == &io_uring_fops;
  2947. }
  2948. static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
  2949. struct io_uring_params *p)
  2950. {
  2951. struct io_rings *rings;
  2952. size_t size, sq_array_offset;
  2953. void *ptr;
  2954. /* make sure these are sane, as we already accounted them */
  2955. ctx->sq_entries = p->sq_entries;
  2956. ctx->cq_entries = p->cq_entries;
  2957. size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
  2958. if (size == SIZE_MAX)
  2959. return -EOVERFLOW;
  2960. if (!(ctx->flags & IORING_SETUP_NO_MMAP))
  2961. rings = io_pages_map(&ctx->ring_pages, &ctx->n_ring_pages, size);
  2962. else
  2963. rings = io_rings_map(ctx, p->cq_off.user_addr, size);
  2964. if (IS_ERR(rings))
  2965. return PTR_ERR(rings);
  2966. ctx->rings = rings;
  2967. if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
  2968. ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
  2969. rings->sq_ring_mask = p->sq_entries - 1;
  2970. rings->cq_ring_mask = p->cq_entries - 1;
  2971. rings->sq_ring_entries = p->sq_entries;
  2972. rings->cq_ring_entries = p->cq_entries;
  2973. if (p->flags & IORING_SETUP_SQE128)
  2974. size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries);
  2975. else
  2976. size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
  2977. if (size == SIZE_MAX) {
  2978. io_rings_free(ctx);
  2979. return -EOVERFLOW;
  2980. }
  2981. if (!(ctx->flags & IORING_SETUP_NO_MMAP))
  2982. ptr = io_pages_map(&ctx->sqe_pages, &ctx->n_sqe_pages, size);
  2983. else
  2984. ptr = io_sqes_map(ctx, p->sq_off.user_addr, size);
  2985. if (IS_ERR(ptr)) {
  2986. io_rings_free(ctx);
  2987. return PTR_ERR(ptr);
  2988. }
  2989. ctx->sq_sqes = ptr;
  2990. return 0;
  2991. }
  2992. static int io_uring_install_fd(struct file *file)
  2993. {
  2994. int fd;
  2995. fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
  2996. if (fd < 0)
  2997. return fd;
  2998. fd_install(fd, file);
  2999. return fd;
  3000. }
  3001. /*
  3002. * Allocate an anonymous fd, this is what constitutes the application
  3003. * visible backing of an io_uring instance. The application mmaps this
  3004. * fd to gain access to the SQ/CQ ring details.
  3005. */
  3006. static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
  3007. {
  3008. /* Create a new inode so that the LSM can block the creation. */
  3009. return anon_inode_create_getfile("[io_uring]", &io_uring_fops, ctx,
  3010. O_RDWR | O_CLOEXEC, NULL);
  3011. }
  3012. static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
  3013. struct io_uring_params __user *params)
  3014. {
  3015. struct io_ring_ctx *ctx;
  3016. struct io_uring_task *tctx;
  3017. struct file *file;
  3018. int ret;
  3019. if (!entries)
  3020. return -EINVAL;
  3021. if (entries > IORING_MAX_ENTRIES) {
  3022. if (!(p->flags & IORING_SETUP_CLAMP))
  3023. return -EINVAL;
  3024. entries = IORING_MAX_ENTRIES;
  3025. }
  3026. if ((p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
  3027. && !(p->flags & IORING_SETUP_NO_MMAP))
  3028. return -EINVAL;
  3029. /*
  3030. * Use twice as many entries for the CQ ring. It's possible for the
  3031. * application to drive a higher depth than the size of the SQ ring,
  3032. * since the sqes are only used at submission time. This allows for
  3033. * some flexibility in overcommitting a bit. If the application has
  3034. * set IORING_SETUP_CQSIZE, it will have passed in the desired number
  3035. * of CQ ring entries manually.
  3036. */
  3037. p->sq_entries = roundup_pow_of_two(entries);
  3038. if (p->flags & IORING_SETUP_CQSIZE) {
  3039. /*
  3040. * If IORING_SETUP_CQSIZE is set, we do the same roundup
  3041. * to a power-of-two, if it isn't already. We do NOT impose
  3042. * any cq vs sq ring sizing.
  3043. */
  3044. if (!p->cq_entries)
  3045. return -EINVAL;
  3046. if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
  3047. if (!(p->flags & IORING_SETUP_CLAMP))
  3048. return -EINVAL;
  3049. p->cq_entries = IORING_MAX_CQ_ENTRIES;
  3050. }
  3051. p->cq_entries = roundup_pow_of_two(p->cq_entries);
  3052. if (p->cq_entries < p->sq_entries)
  3053. return -EINVAL;
  3054. } else {
  3055. p->cq_entries = 2 * p->sq_entries;
  3056. }
  3057. ctx = io_ring_ctx_alloc(p);
  3058. if (!ctx)
  3059. return -ENOMEM;
  3060. ctx->clockid = CLOCK_MONOTONIC;
  3061. ctx->clock_offset = 0;
  3062. if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
  3063. !(ctx->flags & IORING_SETUP_IOPOLL) &&
  3064. !(ctx->flags & IORING_SETUP_SQPOLL))
  3065. ctx->task_complete = true;
  3066. if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL))
  3067. ctx->lockless_cq = true;
  3068. /*
  3069. * lazy poll_wq activation relies on ->task_complete for synchronisation
  3070. * purposes, see io_activate_pollwq()
  3071. */
  3072. if (!ctx->task_complete)
  3073. ctx->poll_activated = true;
  3074. /*
  3075. * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
  3076. * space applications don't need to do io completion events
  3077. * polling again, they can rely on io_sq_thread to do polling
  3078. * work, which can reduce cpu usage and uring_lock contention.
  3079. */
  3080. if (ctx->flags & IORING_SETUP_IOPOLL &&
  3081. !(ctx->flags & IORING_SETUP_SQPOLL))
  3082. ctx->syscall_iopoll = 1;
  3083. ctx->compat = in_compat_syscall();
  3084. if (!ns_capable_noaudit(&init_user_ns, CAP_IPC_LOCK))
  3085. ctx->user = get_uid(current_user());
  3086. /*
  3087. * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
  3088. * COOP_TASKRUN is set, then IPIs are never needed by the app.
  3089. */
  3090. ret = -EINVAL;
  3091. if (ctx->flags & IORING_SETUP_SQPOLL) {
  3092. /* IPI related flags don't make sense with SQPOLL */
  3093. if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
  3094. IORING_SETUP_TASKRUN_FLAG |
  3095. IORING_SETUP_DEFER_TASKRUN))
  3096. goto err;
  3097. ctx->notify_method = TWA_SIGNAL_NO_IPI;
  3098. } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
  3099. ctx->notify_method = TWA_SIGNAL_NO_IPI;
  3100. } else {
  3101. if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
  3102. !(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
  3103. goto err;
  3104. ctx->notify_method = TWA_SIGNAL;
  3105. }
  3106. /*
  3107. * For DEFER_TASKRUN we require the completion task to be the same as the
  3108. * submission task. This implies that there is only one submitter, so enforce
  3109. * that.
  3110. */
  3111. if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
  3112. !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
  3113. goto err;
  3114. }
  3115. /*
  3116. * This is just grabbed for accounting purposes. When a process exits,
  3117. * the mm is exited and dropped before the files, hence we need to hang
  3118. * on to this mm purely for the purposes of being able to unaccount
  3119. * memory (locked/pinned vm). It's not used for anything else.
  3120. */
  3121. mmgrab(current->mm);
  3122. ctx->mm_account = current->mm;
  3123. ret = io_allocate_scq_urings(ctx, p);
  3124. if (ret)
  3125. goto err;
  3126. ret = io_sq_offload_create(ctx, p);
  3127. if (ret)
  3128. goto err;
  3129. ret = io_rsrc_init(ctx);
  3130. if (ret)
  3131. goto err;
  3132. p->sq_off.head = offsetof(struct io_rings, sq.head);
  3133. p->sq_off.tail = offsetof(struct io_rings, sq.tail);
  3134. p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
  3135. p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
  3136. p->sq_off.flags = offsetof(struct io_rings, sq_flags);
  3137. p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
  3138. if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
  3139. p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
  3140. p->sq_off.resv1 = 0;
  3141. if (!(ctx->flags & IORING_SETUP_NO_MMAP))
  3142. p->sq_off.user_addr = 0;
  3143. p->cq_off.head = offsetof(struct io_rings, cq.head);
  3144. p->cq_off.tail = offsetof(struct io_rings, cq.tail);
  3145. p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
  3146. p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
  3147. p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
  3148. p->cq_off.cqes = offsetof(struct io_rings, cqes);
  3149. p->cq_off.flags = offsetof(struct io_rings, cq_flags);
  3150. p->cq_off.resv1 = 0;
  3151. if (!(ctx->flags & IORING_SETUP_NO_MMAP))
  3152. p->cq_off.user_addr = 0;
  3153. p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
  3154. IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
  3155. IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
  3156. IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
  3157. IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
  3158. IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
  3159. IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING |
  3160. IORING_FEAT_RECVSEND_BUNDLE | IORING_FEAT_MIN_TIMEOUT;
  3161. if (copy_to_user(params, p, sizeof(*p))) {
  3162. ret = -EFAULT;
  3163. goto err;
  3164. }
  3165. if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
  3166. && !(ctx->flags & IORING_SETUP_R_DISABLED))
  3167. WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
  3168. file = io_uring_get_file(ctx);
  3169. if (IS_ERR(file)) {
  3170. ret = PTR_ERR(file);
  3171. goto err;
  3172. }
  3173. ret = __io_uring_add_tctx_node(ctx);
  3174. if (ret)
  3175. goto err_fput;
  3176. tctx = current->io_uring;
  3177. /*
  3178. * Install ring fd as the very last thing, so we don't risk someone
  3179. * having closed it before we finish setup
  3180. */
  3181. if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
  3182. ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX);
  3183. else
  3184. ret = io_uring_install_fd(file);
  3185. if (ret < 0)
  3186. goto err_fput;
  3187. trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
  3188. return ret;
  3189. err:
  3190. io_ring_ctx_wait_and_kill(ctx);
  3191. return ret;
  3192. err_fput:
  3193. fput(file);
  3194. return ret;
  3195. }
  3196. /*
  3197. * Sets up an aio uring context, and returns the fd. Applications asks for a
  3198. * ring size, we return the actual sq/cq ring sizes (among other things) in the
  3199. * params structure passed in.
  3200. */
  3201. static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
  3202. {
  3203. struct io_uring_params p;
  3204. int i;
  3205. if (copy_from_user(&p, params, sizeof(p)))
  3206. return -EFAULT;
  3207. for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
  3208. if (p.resv[i])
  3209. return -EINVAL;
  3210. }
  3211. if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
  3212. IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
  3213. IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
  3214. IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
  3215. IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
  3216. IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
  3217. IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN |
  3218. IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY |
  3219. IORING_SETUP_NO_SQARRAY))
  3220. return -EINVAL;
  3221. return io_uring_create(entries, &p, params);
  3222. }
  3223. static inline bool io_uring_allowed(void)
  3224. {
  3225. int disabled = READ_ONCE(sysctl_io_uring_disabled);
  3226. kgid_t io_uring_group;
  3227. if (disabled == 2)
  3228. return false;
  3229. if (disabled == 0 || capable(CAP_SYS_ADMIN))
  3230. return true;
  3231. io_uring_group = make_kgid(&init_user_ns, sysctl_io_uring_group);
  3232. if (!gid_valid(io_uring_group))
  3233. return false;
  3234. return in_group_p(io_uring_group);
  3235. }
  3236. SYSCALL_DEFINE2(io_uring_setup, u32, entries,
  3237. struct io_uring_params __user *, params)
  3238. {
  3239. if (!io_uring_allowed())
  3240. return -EPERM;
  3241. return io_uring_setup(entries, params);
  3242. }
  3243. static int __init io_uring_init(void)
  3244. {
  3245. struct kmem_cache_args kmem_args = {
  3246. .useroffset = offsetof(struct io_kiocb, cmd.data),
  3247. .usersize = sizeof_field(struct io_kiocb, cmd.data),
  3248. };
  3249. #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
  3250. BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
  3251. BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
  3252. } while (0)
  3253. #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
  3254. __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename)
  3255. #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \
  3256. __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename)
  3257. BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
  3258. BUILD_BUG_SQE_ELEM(0, __u8, opcode);
  3259. BUILD_BUG_SQE_ELEM(1, __u8, flags);
  3260. BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
  3261. BUILD_BUG_SQE_ELEM(4, __s32, fd);
  3262. BUILD_BUG_SQE_ELEM(8, __u64, off);
  3263. BUILD_BUG_SQE_ELEM(8, __u64, addr2);
  3264. BUILD_BUG_SQE_ELEM(8, __u32, cmd_op);
  3265. BUILD_BUG_SQE_ELEM(12, __u32, __pad1);
  3266. BUILD_BUG_SQE_ELEM(16, __u64, addr);
  3267. BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
  3268. BUILD_BUG_SQE_ELEM(24, __u32, len);
  3269. BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
  3270. BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
  3271. BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
  3272. BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
  3273. BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
  3274. BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
  3275. BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
  3276. BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
  3277. BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
  3278. BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
  3279. BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
  3280. BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
  3281. BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
  3282. BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
  3283. BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
  3284. BUILD_BUG_SQE_ELEM(28, __u32, rename_flags);
  3285. BUILD_BUG_SQE_ELEM(28, __u32, unlink_flags);
  3286. BUILD_BUG_SQE_ELEM(28, __u32, hardlink_flags);
  3287. BUILD_BUG_SQE_ELEM(28, __u32, xattr_flags);
  3288. BUILD_BUG_SQE_ELEM(28, __u32, msg_ring_flags);
  3289. BUILD_BUG_SQE_ELEM(32, __u64, user_data);
  3290. BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
  3291. BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
  3292. BUILD_BUG_SQE_ELEM(42, __u16, personality);
  3293. BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
  3294. BUILD_BUG_SQE_ELEM(44, __u32, file_index);
  3295. BUILD_BUG_SQE_ELEM(44, __u16, addr_len);
  3296. BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]);
  3297. BUILD_BUG_SQE_ELEM(48, __u64, addr3);
  3298. BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
  3299. BUILD_BUG_SQE_ELEM(56, __u64, __pad2);
  3300. BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
  3301. sizeof(struct io_uring_rsrc_update));
  3302. BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
  3303. sizeof(struct io_uring_rsrc_update2));
  3304. /* ->buf_index is u16 */
  3305. BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0);
  3306. BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) !=
  3307. offsetof(struct io_uring_buf_ring, tail));
  3308. /* should fit into one byte */
  3309. BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
  3310. BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
  3311. BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
  3312. BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof_field(struct io_kiocb, flags));
  3313. BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
  3314. /* top 8bits are for internal use */
  3315. BUILD_BUG_ON((IORING_URING_CMD_MASK & 0xff000000) != 0);
  3316. io_uring_optable_init();
  3317. /*
  3318. * Allow user copy in the per-command field, which starts after the
  3319. * file in io_kiocb and until the opcode field. The openat2 handling
  3320. * requires copying in user memory into the io_kiocb object in that
  3321. * range, and HARDENED_USERCOPY will complain if we haven't
  3322. * correctly annotated this range.
  3323. */
  3324. req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args,
  3325. SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT |
  3326. SLAB_TYPESAFE_BY_RCU);
  3327. io_buf_cachep = KMEM_CACHE(io_buffer,
  3328. SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
  3329. iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64);
  3330. #ifdef CONFIG_SYSCTL
  3331. register_sysctl_init("kernel", kernel_io_uring_disabled_table);
  3332. #endif
  3333. return 0;
  3334. };
  3335. __initcall(io_uring_init);