bfq-iosched.c 266 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Budget Fair Queueing (BFQ) I/O scheduler.
  4. *
  5. * Based on ideas and code from CFQ:
  6. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  7. *
  8. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  9. * Paolo Valente <paolo.valente@unimore.it>
  10. *
  11. * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
  12. * Arianna Avanzini <avanzini@google.com>
  13. *
  14. * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
  15. *
  16. * BFQ is a proportional-share I/O scheduler, with some extra
  17. * low-latency capabilities. BFQ also supports full hierarchical
  18. * scheduling through cgroups. Next paragraphs provide an introduction
  19. * on BFQ inner workings. Details on BFQ benefits, usage and
  20. * limitations can be found in Documentation/block/bfq-iosched.rst.
  21. *
  22. * BFQ is a proportional-share storage-I/O scheduling algorithm based
  23. * on the slice-by-slice service scheme of CFQ. But BFQ assigns
  24. * budgets, measured in number of sectors, to processes instead of
  25. * time slices. The device is not granted to the in-service process
  26. * for a given time slice, but until it has exhausted its assigned
  27. * budget. This change from the time to the service domain enables BFQ
  28. * to distribute the device throughput among processes as desired,
  29. * without any distortion due to throughput fluctuations, or to device
  30. * internal queueing. BFQ uses an ad hoc internal scheduler, called
  31. * B-WF2Q+, to schedule processes according to their budgets. More
  32. * precisely, BFQ schedules queues associated with processes. Each
  33. * process/queue is assigned a user-configurable weight, and B-WF2Q+
  34. * guarantees that each queue receives a fraction of the throughput
  35. * proportional to its weight. Thanks to the accurate policy of
  36. * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
  37. * processes issuing sequential requests (to boost the throughput),
  38. * and yet guarantee a low latency to interactive and soft real-time
  39. * applications.
  40. *
  41. * In particular, to provide these low-latency guarantees, BFQ
  42. * explicitly privileges the I/O of two classes of time-sensitive
  43. * applications: interactive and soft real-time. In more detail, BFQ
  44. * behaves this way if the low_latency parameter is set (default
  45. * configuration). This feature enables BFQ to provide applications in
  46. * these classes with a very low latency.
  47. *
  48. * To implement this feature, BFQ constantly tries to detect whether
  49. * the I/O requests in a bfq_queue come from an interactive or a soft
  50. * real-time application. For brevity, in these cases, the queue is
  51. * said to be interactive or soft real-time. In both cases, BFQ
  52. * privileges the service of the queue, over that of non-interactive
  53. * and non-soft-real-time queues. This privileging is performed,
  54. * mainly, by raising the weight of the queue. So, for brevity, we
  55. * call just weight-raising periods the time periods during which a
  56. * queue is privileged, because deemed interactive or soft real-time.
  57. *
  58. * The detection of soft real-time queues/applications is described in
  59. * detail in the comments on the function
  60. * bfq_bfqq_softrt_next_start. On the other hand, the detection of an
  61. * interactive queue works as follows: a queue is deemed interactive
  62. * if it is constantly non empty only for a limited time interval,
  63. * after which it does become empty. The queue may be deemed
  64. * interactive again (for a limited time), if it restarts being
  65. * constantly non empty, provided that this happens only after the
  66. * queue has remained empty for a given minimum idle time.
  67. *
  68. * By default, BFQ computes automatically the above maximum time
  69. * interval, i.e., the time interval after which a constantly
  70. * non-empty queue stops being deemed interactive. Since a queue is
  71. * weight-raised while it is deemed interactive, this maximum time
  72. * interval happens to coincide with the (maximum) duration of the
  73. * weight-raising for interactive queues.
  74. *
  75. * Finally, BFQ also features additional heuristics for
  76. * preserving both a low latency and a high throughput on NCQ-capable,
  77. * rotational or flash-based devices, and to get the job done quickly
  78. * for applications consisting in many I/O-bound processes.
  79. *
  80. * NOTE: if the main or only goal, with a given device, is to achieve
  81. * the maximum-possible throughput at all times, then do switch off
  82. * all low-latency heuristics for that device, by setting low_latency
  83. * to 0.
  84. *
  85. * BFQ is described in [1], where also a reference to the initial,
  86. * more theoretical paper on BFQ can be found. The interested reader
  87. * can find in the latter paper full details on the main algorithm, as
  88. * well as formulas of the guarantees and formal proofs of all the
  89. * properties. With respect to the version of BFQ presented in these
  90. * papers, this implementation adds a few more heuristics, such as the
  91. * ones that guarantee a low latency to interactive and soft real-time
  92. * applications, and a hierarchical extension based on H-WF2Q+.
  93. *
  94. * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
  95. * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
  96. * with O(log N) complexity derives from the one introduced with EEVDF
  97. * in [3].
  98. *
  99. * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
  100. * Scheduler", Proceedings of the First Workshop on Mobile System
  101. * Technologies (MST-2015), May 2015.
  102. * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
  103. *
  104. * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
  105. * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
  106. * Oct 1997.
  107. *
  108. * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
  109. *
  110. * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
  111. * First: A Flexible and Accurate Mechanism for Proportional Share
  112. * Resource Allocation", technical report.
  113. *
  114. * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
  115. */
  116. #include <linux/module.h>
  117. #include <linux/slab.h>
  118. #include <linux/blkdev.h>
  119. #include <linux/cgroup.h>
  120. #include <linux/ktime.h>
  121. #include <linux/rbtree.h>
  122. #include <linux/ioprio.h>
  123. #include <linux/sbitmap.h>
  124. #include <linux/delay.h>
  125. #include <linux/backing-dev.h>
  126. #include <trace/events/block.h>
  127. #include "elevator.h"
  128. #include "blk.h"
  129. #include "blk-mq.h"
  130. #include "blk-mq-sched.h"
  131. #include "bfq-iosched.h"
  132. #include "blk-wbt.h"
  133. #define BFQ_BFQQ_FNS(name) \
  134. void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
  135. { \
  136. __set_bit(BFQQF_##name, &(bfqq)->flags); \
  137. } \
  138. void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
  139. { \
  140. __clear_bit(BFQQF_##name, &(bfqq)->flags); \
  141. } \
  142. int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
  143. { \
  144. return test_bit(BFQQF_##name, &(bfqq)->flags); \
  145. }
  146. BFQ_BFQQ_FNS(just_created);
  147. BFQ_BFQQ_FNS(busy);
  148. BFQ_BFQQ_FNS(wait_request);
  149. BFQ_BFQQ_FNS(non_blocking_wait_rq);
  150. BFQ_BFQQ_FNS(fifo_expire);
  151. BFQ_BFQQ_FNS(has_short_ttime);
  152. BFQ_BFQQ_FNS(sync);
  153. BFQ_BFQQ_FNS(IO_bound);
  154. BFQ_BFQQ_FNS(in_large_burst);
  155. BFQ_BFQQ_FNS(coop);
  156. BFQ_BFQQ_FNS(split_coop);
  157. BFQ_BFQQ_FNS(softrt_update);
  158. #undef BFQ_BFQQ_FNS \
  159. /* Expiration time of async (0) and sync (1) requests, in ns. */
  160. static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
  161. /* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
  162. static const int bfq_back_max = 16 * 1024;
  163. /* Penalty of a backwards seek, in number of sectors. */
  164. static const int bfq_back_penalty = 2;
  165. /* Idling period duration, in ns. */
  166. static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
  167. /* Minimum number of assigned budgets for which stats are safe to compute. */
  168. static const int bfq_stats_min_budgets = 194;
  169. /* Default maximum budget values, in sectors and number of requests. */
  170. static const int bfq_default_max_budget = 16 * 1024;
  171. /*
  172. * When a sync request is dispatched, the queue that contains that
  173. * request, and all the ancestor entities of that queue, are charged
  174. * with the number of sectors of the request. In contrast, if the
  175. * request is async, then the queue and its ancestor entities are
  176. * charged with the number of sectors of the request, multiplied by
  177. * the factor below. This throttles the bandwidth for async I/O,
  178. * w.r.t. to sync I/O, and it is done to counter the tendency of async
  179. * writes to steal I/O throughput to reads.
  180. *
  181. * The current value of this parameter is the result of a tuning with
  182. * several hardware and software configurations. We tried to find the
  183. * lowest value for which writes do not cause noticeable problems to
  184. * reads. In fact, the lower this parameter, the stabler I/O control,
  185. * in the following respect. The lower this parameter is, the less
  186. * the bandwidth enjoyed by a group decreases
  187. * - when the group does writes, w.r.t. to when it does reads;
  188. * - when other groups do reads, w.r.t. to when they do writes.
  189. */
  190. static const int bfq_async_charge_factor = 3;
  191. /* Default timeout values, in jiffies, approximating CFQ defaults. */
  192. const int bfq_timeout = HZ / 8;
  193. /*
  194. * Time limit for merging (see comments in bfq_setup_cooperator). Set
  195. * to the slowest value that, in our tests, proved to be effective in
  196. * removing false positives, while not causing true positives to miss
  197. * queue merging.
  198. *
  199. * As can be deduced from the low time limit below, queue merging, if
  200. * successful, happens at the very beginning of the I/O of the involved
  201. * cooperating processes, as a consequence of the arrival of the very
  202. * first requests from each cooperator. After that, there is very
  203. * little chance to find cooperators.
  204. */
  205. static const unsigned long bfq_merge_time_limit = HZ/10;
  206. static struct kmem_cache *bfq_pool;
  207. /* Below this threshold (in ns), we consider thinktime immediate. */
  208. #define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
  209. /* hw_tag detection: parallel requests threshold and min samples needed. */
  210. #define BFQ_HW_QUEUE_THRESHOLD 3
  211. #define BFQ_HW_QUEUE_SAMPLES 32
  212. #define BFQQ_SEEK_THR (sector_t)(8 * 100)
  213. #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
  214. #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
  215. (get_sdist(last_pos, rq) > \
  216. BFQQ_SEEK_THR && \
  217. (!blk_queue_nonrot(bfqd->queue) || \
  218. blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
  219. #define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
  220. #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
  221. /*
  222. * Sync random I/O is likely to be confused with soft real-time I/O,
  223. * because it is characterized by limited throughput and apparently
  224. * isochronous arrival pattern. To avoid false positives, queues
  225. * containing only random (seeky) I/O are prevented from being tagged
  226. * as soft real-time.
  227. */
  228. #define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history == -1)
  229. /* Min number of samples required to perform peak-rate update */
  230. #define BFQ_RATE_MIN_SAMPLES 32
  231. /* Min observation time interval required to perform a peak-rate update (ns) */
  232. #define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
  233. /* Target observation time interval for a peak-rate update (ns) */
  234. #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
  235. /*
  236. * Shift used for peak-rate fixed precision calculations.
  237. * With
  238. * - the current shift: 16 positions
  239. * - the current type used to store rate: u32
  240. * - the current unit of measure for rate: [sectors/usec], or, more precisely,
  241. * [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift,
  242. * the range of rates that can be stored is
  243. * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec =
  244. * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec =
  245. * [15, 65G] sectors/sec
  246. * Which, assuming a sector size of 512B, corresponds to a range of
  247. * [7.5K, 33T] B/sec
  248. */
  249. #define BFQ_RATE_SHIFT 16
  250. /*
  251. * When configured for computing the duration of the weight-raising
  252. * for interactive queues automatically (see the comments at the
  253. * beginning of this file), BFQ does it using the following formula:
  254. * duration = (ref_rate / r) * ref_wr_duration,
  255. * where r is the peak rate of the device, and ref_rate and
  256. * ref_wr_duration are two reference parameters. In particular,
  257. * ref_rate is the peak rate of the reference storage device (see
  258. * below), and ref_wr_duration is about the maximum time needed, with
  259. * BFQ and while reading two files in parallel, to load typical large
  260. * applications on the reference device (see the comments on
  261. * max_service_from_wr below, for more details on how ref_wr_duration
  262. * is obtained). In practice, the slower/faster the device at hand
  263. * is, the more/less it takes to load applications with respect to the
  264. * reference device. Accordingly, the longer/shorter BFQ grants
  265. * weight raising to interactive applications.
  266. *
  267. * BFQ uses two different reference pairs (ref_rate, ref_wr_duration),
  268. * depending on whether the device is rotational or non-rotational.
  269. *
  270. * In the following definitions, ref_rate[0] and ref_wr_duration[0]
  271. * are the reference values for a rotational device, whereas
  272. * ref_rate[1] and ref_wr_duration[1] are the reference values for a
  273. * non-rotational device. The reference rates are not the actual peak
  274. * rates of the devices used as a reference, but slightly lower
  275. * values. The reason for using slightly lower values is that the
  276. * peak-rate estimator tends to yield slightly lower values than the
  277. * actual peak rate (it can yield the actual peak rate only if there
  278. * is only one process doing I/O, and the process does sequential
  279. * I/O).
  280. *
  281. * The reference peak rates are measured in sectors/usec, left-shifted
  282. * by BFQ_RATE_SHIFT.
  283. */
  284. static int ref_rate[2] = {14000, 33000};
  285. /*
  286. * To improve readability, a conversion function is used to initialize
  287. * the following array, which entails that the array can be
  288. * initialized only in a function.
  289. */
  290. static int ref_wr_duration[2];
  291. /*
  292. * BFQ uses the above-detailed, time-based weight-raising mechanism to
  293. * privilege interactive tasks. This mechanism is vulnerable to the
  294. * following false positives: I/O-bound applications that will go on
  295. * doing I/O for much longer than the duration of weight
  296. * raising. These applications have basically no benefit from being
  297. * weight-raised at the beginning of their I/O. On the opposite end,
  298. * while being weight-raised, these applications
  299. * a) unjustly steal throughput to applications that may actually need
  300. * low latency;
  301. * b) make BFQ uselessly perform device idling; device idling results
  302. * in loss of device throughput with most flash-based storage, and may
  303. * increase latencies when used purposelessly.
  304. *
  305. * BFQ tries to reduce these problems, by adopting the following
  306. * countermeasure. To introduce this countermeasure, we need first to
  307. * finish explaining how the duration of weight-raising for
  308. * interactive tasks is computed.
  309. *
  310. * For a bfq_queue deemed as interactive, the duration of weight
  311. * raising is dynamically adjusted, as a function of the estimated
  312. * peak rate of the device, so as to be equal to the time needed to
  313. * execute the 'largest' interactive task we benchmarked so far. By
  314. * largest task, we mean the task for which each involved process has
  315. * to do more I/O than for any of the other tasks we benchmarked. This
  316. * reference interactive task is the start-up of LibreOffice Writer,
  317. * and in this task each process/bfq_queue needs to have at most ~110K
  318. * sectors transferred.
  319. *
  320. * This last piece of information enables BFQ to reduce the actual
  321. * duration of weight-raising for at least one class of I/O-bound
  322. * applications: those doing sequential or quasi-sequential I/O. An
  323. * example is file copy. In fact, once started, the main I/O-bound
  324. * processes of these applications usually consume the above 110K
  325. * sectors in much less time than the processes of an application that
  326. * is starting, because these I/O-bound processes will greedily devote
  327. * almost all their CPU cycles only to their target,
  328. * throughput-friendly I/O operations. This is even more true if BFQ
  329. * happens to be underestimating the device peak rate, and thus
  330. * overestimating the duration of weight raising. But, according to
  331. * our measurements, once transferred 110K sectors, these processes
  332. * have no right to be weight-raised any longer.
  333. *
  334. * Basing on the last consideration, BFQ ends weight-raising for a
  335. * bfq_queue if the latter happens to have received an amount of
  336. * service at least equal to the following constant. The constant is
  337. * set to slightly more than 110K, to have a minimum safety margin.
  338. *
  339. * This early ending of weight-raising reduces the amount of time
  340. * during which interactive false positives cause the two problems
  341. * described at the beginning of these comments.
  342. */
  343. static const unsigned long max_service_from_wr = 120000;
  344. /*
  345. * Maximum time between the creation of two queues, for stable merge
  346. * to be activated (in ms)
  347. */
  348. static const unsigned long bfq_activation_stable_merging = 600;
  349. /*
  350. * Minimum time to be waited before evaluating delayed stable merge (in ms)
  351. */
  352. static const unsigned long bfq_late_stable_merging = 600;
  353. #define RQ_BIC(rq) ((struct bfq_io_cq *)((rq)->elv.priv[0]))
  354. #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
  355. struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync,
  356. unsigned int actuator_idx)
  357. {
  358. if (is_sync)
  359. return bic->bfqq[1][actuator_idx];
  360. return bic->bfqq[0][actuator_idx];
  361. }
  362. static void bfq_put_stable_ref(struct bfq_queue *bfqq);
  363. void bic_set_bfqq(struct bfq_io_cq *bic,
  364. struct bfq_queue *bfqq,
  365. bool is_sync,
  366. unsigned int actuator_idx)
  367. {
  368. struct bfq_queue *old_bfqq = bic->bfqq[is_sync][actuator_idx];
  369. /*
  370. * If bfqq != NULL, then a non-stable queue merge between
  371. * bic->bfqq and bfqq is happening here. This causes troubles
  372. * in the following case: bic->bfqq has also been scheduled
  373. * for a possible stable merge with bic->stable_merge_bfqq,
  374. * and bic->stable_merge_bfqq == bfqq happens to
  375. * hold. Troubles occur because bfqq may then undergo a split,
  376. * thereby becoming eligible for a stable merge. Yet, if
  377. * bic->stable_merge_bfqq points exactly to bfqq, then bfqq
  378. * would be stably merged with itself. To avoid this anomaly,
  379. * we cancel the stable merge if
  380. * bic->stable_merge_bfqq == bfqq.
  381. */
  382. struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[actuator_idx];
  383. /* Clear bic pointer if bfqq is detached from this bic */
  384. if (old_bfqq && old_bfqq->bic == bic)
  385. old_bfqq->bic = NULL;
  386. if (is_sync)
  387. bic->bfqq[1][actuator_idx] = bfqq;
  388. else
  389. bic->bfqq[0][actuator_idx] = bfqq;
  390. if (bfqq && bfqq_data->stable_merge_bfqq == bfqq) {
  391. /*
  392. * Actually, these same instructions are executed also
  393. * in bfq_setup_cooperator, in case of abort or actual
  394. * execution of a stable merge. We could avoid
  395. * repeating these instructions there too, but if we
  396. * did so, we would nest even more complexity in this
  397. * function.
  398. */
  399. bfq_put_stable_ref(bfqq_data->stable_merge_bfqq);
  400. bfqq_data->stable_merge_bfqq = NULL;
  401. }
  402. }
  403. struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
  404. {
  405. return bic->icq.q->elevator->elevator_data;
  406. }
  407. /**
  408. * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
  409. * @icq: the iocontext queue.
  410. */
  411. static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
  412. {
  413. /* bic->icq is the first member, %NULL will convert to %NULL */
  414. return container_of(icq, struct bfq_io_cq, icq);
  415. }
  416. /**
  417. * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
  418. * @q: the request queue.
  419. */
  420. static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
  421. {
  422. struct bfq_io_cq *icq;
  423. unsigned long flags;
  424. if (!current->io_context)
  425. return NULL;
  426. spin_lock_irqsave(&q->queue_lock, flags);
  427. icq = icq_to_bic(ioc_lookup_icq(q));
  428. spin_unlock_irqrestore(&q->queue_lock, flags);
  429. return icq;
  430. }
  431. /*
  432. * Scheduler run of queue, if there are requests pending and no one in the
  433. * driver that will restart queueing.
  434. */
  435. void bfq_schedule_dispatch(struct bfq_data *bfqd)
  436. {
  437. lockdep_assert_held(&bfqd->lock);
  438. if (bfqd->queued != 0) {
  439. bfq_log(bfqd, "schedule dispatch");
  440. blk_mq_run_hw_queues(bfqd->queue, true);
  441. }
  442. }
  443. #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  444. #define bfq_sample_valid(samples) ((samples) > 80)
  445. /*
  446. * Lifted from AS - choose which of rq1 and rq2 that is best served now.
  447. * We choose the request that is closer to the head right now. Distance
  448. * behind the head is penalized and only allowed to a certain extent.
  449. */
  450. static struct request *bfq_choose_req(struct bfq_data *bfqd,
  451. struct request *rq1,
  452. struct request *rq2,
  453. sector_t last)
  454. {
  455. sector_t s1, s2, d1 = 0, d2 = 0;
  456. unsigned long back_max;
  457. #define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
  458. #define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
  459. unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
  460. if (!rq1 || rq1 == rq2)
  461. return rq2;
  462. if (!rq2)
  463. return rq1;
  464. if (rq_is_sync(rq1) && !rq_is_sync(rq2))
  465. return rq1;
  466. else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
  467. return rq2;
  468. if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
  469. return rq1;
  470. else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
  471. return rq2;
  472. s1 = blk_rq_pos(rq1);
  473. s2 = blk_rq_pos(rq2);
  474. /*
  475. * By definition, 1KiB is 2 sectors.
  476. */
  477. back_max = bfqd->bfq_back_max * 2;
  478. /*
  479. * Strict one way elevator _except_ in the case where we allow
  480. * short backward seeks which are biased as twice the cost of a
  481. * similar forward seek.
  482. */
  483. if (s1 >= last)
  484. d1 = s1 - last;
  485. else if (s1 + back_max >= last)
  486. d1 = (last - s1) * bfqd->bfq_back_penalty;
  487. else
  488. wrap |= BFQ_RQ1_WRAP;
  489. if (s2 >= last)
  490. d2 = s2 - last;
  491. else if (s2 + back_max >= last)
  492. d2 = (last - s2) * bfqd->bfq_back_penalty;
  493. else
  494. wrap |= BFQ_RQ2_WRAP;
  495. /* Found required data */
  496. /*
  497. * By doing switch() on the bit mask "wrap" we avoid having to
  498. * check two variables for all permutations: --> faster!
  499. */
  500. switch (wrap) {
  501. case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
  502. if (d1 < d2)
  503. return rq1;
  504. else if (d2 < d1)
  505. return rq2;
  506. if (s1 >= s2)
  507. return rq1;
  508. else
  509. return rq2;
  510. case BFQ_RQ2_WRAP:
  511. return rq1;
  512. case BFQ_RQ1_WRAP:
  513. return rq2;
  514. case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
  515. default:
  516. /*
  517. * Since both rqs are wrapped,
  518. * start with the one that's further behind head
  519. * (--> only *one* back seek required),
  520. * since back seek takes more time than forward.
  521. */
  522. if (s1 <= s2)
  523. return rq1;
  524. else
  525. return rq2;
  526. }
  527. }
  528. #define BFQ_LIMIT_INLINE_DEPTH 16
  529. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  530. static bool bfqq_request_over_limit(struct bfq_data *bfqd,
  531. struct bfq_io_cq *bic, blk_opf_t opf,
  532. unsigned int act_idx, int limit)
  533. {
  534. struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH];
  535. struct bfq_entity **entities = inline_entities;
  536. int alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
  537. struct bfq_sched_data *sched_data;
  538. struct bfq_entity *entity;
  539. struct bfq_queue *bfqq;
  540. unsigned long wsum;
  541. bool ret = false;
  542. int depth;
  543. int level;
  544. retry:
  545. spin_lock_irq(&bfqd->lock);
  546. bfqq = bic_to_bfqq(bic, op_is_sync(opf), act_idx);
  547. if (!bfqq)
  548. goto out;
  549. entity = &bfqq->entity;
  550. if (!entity->on_st_or_in_serv)
  551. goto out;
  552. /* +1 for bfqq entity, root cgroup not included */
  553. depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
  554. if (depth > alloc_depth) {
  555. spin_unlock_irq(&bfqd->lock);
  556. if (entities != inline_entities)
  557. kfree(entities);
  558. entities = kmalloc_array(depth, sizeof(*entities), GFP_NOIO);
  559. if (!entities)
  560. return false;
  561. alloc_depth = depth;
  562. goto retry;
  563. }
  564. sched_data = entity->sched_data;
  565. /* Gather our ancestors as we need to traverse them in reverse order */
  566. level = 0;
  567. for_each_entity(entity) {
  568. /*
  569. * If at some level entity is not even active, allow request
  570. * queueing so that BFQ knows there's work to do and activate
  571. * entities.
  572. */
  573. if (!entity->on_st_or_in_serv)
  574. goto out;
  575. /* Uh, more parents than cgroup subsystem thinks? */
  576. if (WARN_ON_ONCE(level >= depth))
  577. break;
  578. entities[level++] = entity;
  579. }
  580. WARN_ON_ONCE(level != depth);
  581. for (level--; level >= 0; level--) {
  582. entity = entities[level];
  583. if (level > 0) {
  584. wsum = bfq_entity_service_tree(entity)->wsum;
  585. } else {
  586. int i;
  587. /*
  588. * For bfqq itself we take into account service trees
  589. * of all higher priority classes and multiply their
  590. * weights so that low prio queue from higher class
  591. * gets more requests than high prio queue from lower
  592. * class.
  593. */
  594. wsum = 0;
  595. for (i = 0; i <= bfqq->ioprio_class - 1; i++) {
  596. wsum = wsum * IOPRIO_BE_NR +
  597. sched_data->service_tree[i].wsum;
  598. }
  599. }
  600. if (!wsum)
  601. continue;
  602. limit = DIV_ROUND_CLOSEST(limit * entity->weight, wsum);
  603. if (entity->allocated >= limit) {
  604. bfq_log_bfqq(bfqq->bfqd, bfqq,
  605. "too many requests: allocated %d limit %d level %d",
  606. entity->allocated, limit, level);
  607. ret = true;
  608. break;
  609. }
  610. }
  611. out:
  612. spin_unlock_irq(&bfqd->lock);
  613. if (entities != inline_entities)
  614. kfree(entities);
  615. return ret;
  616. }
  617. #else
  618. static bool bfqq_request_over_limit(struct bfq_data *bfqd,
  619. struct bfq_io_cq *bic, blk_opf_t opf,
  620. unsigned int act_idx, int limit)
  621. {
  622. return false;
  623. }
  624. #endif
  625. /*
  626. * Async I/O can easily starve sync I/O (both sync reads and sync
  627. * writes), by consuming all tags. Similarly, storms of sync writes,
  628. * such as those that sync(2) may trigger, can starve sync reads.
  629. * Limit depths of async I/O and sync writes so as to counter both
  630. * problems.
  631. *
  632. * Also if a bfq queue or its parent cgroup consume more tags than would be
  633. * appropriate for their weight, we trim the available tag depth to 1. This
  634. * avoids a situation where one cgroup can starve another cgroup from tags and
  635. * thus block service differentiation among cgroups. Note that because the
  636. * queue / cgroup already has many requests allocated and queued, this does not
  637. * significantly affect service guarantees coming from the BFQ scheduling
  638. * algorithm.
  639. */
  640. static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
  641. {
  642. struct bfq_data *bfqd = data->q->elevator->elevator_data;
  643. struct bfq_io_cq *bic = bfq_bic_lookup(data->q);
  644. int depth;
  645. unsigned limit = data->q->nr_requests;
  646. unsigned int act_idx;
  647. /* Sync reads have full depth available */
  648. if (op_is_sync(opf) && !op_is_write(opf)) {
  649. depth = 0;
  650. } else {
  651. depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
  652. limit = (limit * depth) >> bfqd->full_depth_shift;
  653. }
  654. for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) {
  655. /* Fast path to check if bfqq is already allocated. */
  656. if (!bic_to_bfqq(bic, op_is_sync(opf), act_idx))
  657. continue;
  658. /*
  659. * Does queue (or any parent entity) exceed number of
  660. * requests that should be available to it? Heavily
  661. * limit depth so that it cannot consume more
  662. * available requests and thus starve other entities.
  663. */
  664. if (bfqq_request_over_limit(bfqd, bic, opf, act_idx, limit)) {
  665. depth = 1;
  666. break;
  667. }
  668. }
  669. bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
  670. __func__, bfqd->wr_busy_queues, op_is_sync(opf), depth);
  671. if (depth)
  672. data->shallow_depth = depth;
  673. }
  674. static struct bfq_queue *
  675. bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
  676. sector_t sector, struct rb_node **ret_parent,
  677. struct rb_node ***rb_link)
  678. {
  679. struct rb_node **p, *parent;
  680. struct bfq_queue *bfqq = NULL;
  681. parent = NULL;
  682. p = &root->rb_node;
  683. while (*p) {
  684. struct rb_node **n;
  685. parent = *p;
  686. bfqq = rb_entry(parent, struct bfq_queue, pos_node);
  687. /*
  688. * Sort strictly based on sector. Smallest to the left,
  689. * largest to the right.
  690. */
  691. if (sector > blk_rq_pos(bfqq->next_rq))
  692. n = &(*p)->rb_right;
  693. else if (sector < blk_rq_pos(bfqq->next_rq))
  694. n = &(*p)->rb_left;
  695. else
  696. break;
  697. p = n;
  698. bfqq = NULL;
  699. }
  700. *ret_parent = parent;
  701. if (rb_link)
  702. *rb_link = p;
  703. bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
  704. (unsigned long long)sector,
  705. bfqq ? bfqq->pid : 0);
  706. return bfqq;
  707. }
  708. static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
  709. {
  710. return bfqq->service_from_backlogged > 0 &&
  711. time_is_before_jiffies(bfqq->first_IO_time +
  712. bfq_merge_time_limit);
  713. }
  714. /*
  715. * The following function is not marked as __cold because it is
  716. * actually cold, but for the same performance goal described in the
  717. * comments on the likely() at the beginning of
  718. * bfq_setup_cooperator(). Unexpectedly, to reach an even lower
  719. * execution time for the case where this function is not invoked, we
  720. * had to add an unlikely() in each involved if().
  721. */
  722. void __cold
  723. bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  724. {
  725. struct rb_node **p, *parent;
  726. struct bfq_queue *__bfqq;
  727. if (bfqq->pos_root) {
  728. rb_erase(&bfqq->pos_node, bfqq->pos_root);
  729. bfqq->pos_root = NULL;
  730. }
  731. /* oom_bfqq does not participate in queue merging */
  732. if (bfqq == &bfqd->oom_bfqq)
  733. return;
  734. /*
  735. * bfqq cannot be merged any longer (see comments in
  736. * bfq_setup_cooperator): no point in adding bfqq into the
  737. * position tree.
  738. */
  739. if (bfq_too_late_for_merging(bfqq))
  740. return;
  741. if (bfq_class_idle(bfqq))
  742. return;
  743. if (!bfqq->next_rq)
  744. return;
  745. bfqq->pos_root = &bfqq_group(bfqq)->rq_pos_tree;
  746. __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
  747. blk_rq_pos(bfqq->next_rq), &parent, &p);
  748. if (!__bfqq) {
  749. rb_link_node(&bfqq->pos_node, parent, p);
  750. rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
  751. } else
  752. bfqq->pos_root = NULL;
  753. }
  754. /*
  755. * The following function returns false either if every active queue
  756. * must receive the same share of the throughput (symmetric scenario),
  757. * or, as a special case, if bfqq must receive a share of the
  758. * throughput lower than or equal to the share that every other active
  759. * queue must receive. If bfqq does sync I/O, then these are the only
  760. * two cases where bfqq happens to be guaranteed its share of the
  761. * throughput even if I/O dispatching is not plugged when bfqq remains
  762. * temporarily empty (for more details, see the comments in the
  763. * function bfq_better_to_idle()). For this reason, the return value
  764. * of this function is used to check whether I/O-dispatch plugging can
  765. * be avoided.
  766. *
  767. * The above first case (symmetric scenario) occurs when:
  768. * 1) all active queues have the same weight,
  769. * 2) all active queues belong to the same I/O-priority class,
  770. * 3) all active groups at the same level in the groups tree have the same
  771. * weight,
  772. * 4) all active groups at the same level in the groups tree have the same
  773. * number of children.
  774. *
  775. * Unfortunately, keeping the necessary state for evaluating exactly
  776. * the last two symmetry sub-conditions above would be quite complex
  777. * and time consuming. Therefore this function evaluates, instead,
  778. * only the following stronger three sub-conditions, for which it is
  779. * much easier to maintain the needed state:
  780. * 1) all active queues have the same weight,
  781. * 2) all active queues belong to the same I/O-priority class,
  782. * 3) there is at most one active group.
  783. * In particular, the last condition is always true if hierarchical
  784. * support or the cgroups interface are not enabled, thus no state
  785. * needs to be maintained in this case.
  786. */
  787. static bool bfq_asymmetric_scenario(struct bfq_data *bfqd,
  788. struct bfq_queue *bfqq)
  789. {
  790. bool smallest_weight = bfqq &&
  791. bfqq->weight_counter &&
  792. bfqq->weight_counter ==
  793. container_of(
  794. rb_first_cached(&bfqd->queue_weights_tree),
  795. struct bfq_weight_counter,
  796. weights_node);
  797. /*
  798. * For queue weights to differ, queue_weights_tree must contain
  799. * at least two nodes.
  800. */
  801. bool varied_queue_weights = !smallest_weight &&
  802. !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) &&
  803. (bfqd->queue_weights_tree.rb_root.rb_node->rb_left ||
  804. bfqd->queue_weights_tree.rb_root.rb_node->rb_right);
  805. bool multiple_classes_busy =
  806. (bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
  807. (bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
  808. (bfqd->busy_queues[1] && bfqd->busy_queues[2]);
  809. return varied_queue_weights || multiple_classes_busy
  810. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  811. || bfqd->num_groups_with_pending_reqs > 1
  812. #endif
  813. ;
  814. }
  815. /*
  816. * If the weight-counter tree passed as input contains no counter for
  817. * the weight of the input queue, then add that counter; otherwise just
  818. * increment the existing counter.
  819. *
  820. * Note that weight-counter trees contain few nodes in mostly symmetric
  821. * scenarios. For example, if all queues have the same weight, then the
  822. * weight-counter tree for the queues may contain at most one node.
  823. * This holds even if low_latency is on, because weight-raised queues
  824. * are not inserted in the tree.
  825. * In most scenarios, the rate at which nodes are created/destroyed
  826. * should be low too.
  827. */
  828. void bfq_weights_tree_add(struct bfq_queue *bfqq)
  829. {
  830. struct rb_root_cached *root = &bfqq->bfqd->queue_weights_tree;
  831. struct bfq_entity *entity = &bfqq->entity;
  832. struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
  833. bool leftmost = true;
  834. /*
  835. * Do not insert if the queue is already associated with a
  836. * counter, which happens if:
  837. * 1) a request arrival has caused the queue to become both
  838. * non-weight-raised, and hence change its weight, and
  839. * backlogged; in this respect, each of the two events
  840. * causes an invocation of this function,
  841. * 2) this is the invocation of this function caused by the
  842. * second event. This second invocation is actually useless,
  843. * and we handle this fact by exiting immediately. More
  844. * efficient or clearer solutions might possibly be adopted.
  845. */
  846. if (bfqq->weight_counter)
  847. return;
  848. while (*new) {
  849. struct bfq_weight_counter *__counter = container_of(*new,
  850. struct bfq_weight_counter,
  851. weights_node);
  852. parent = *new;
  853. if (entity->weight == __counter->weight) {
  854. bfqq->weight_counter = __counter;
  855. goto inc_counter;
  856. }
  857. if (entity->weight < __counter->weight)
  858. new = &((*new)->rb_left);
  859. else {
  860. new = &((*new)->rb_right);
  861. leftmost = false;
  862. }
  863. }
  864. bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
  865. GFP_ATOMIC);
  866. /*
  867. * In the unlucky event of an allocation failure, we just
  868. * exit. This will cause the weight of queue to not be
  869. * considered in bfq_asymmetric_scenario, which, in its turn,
  870. * causes the scenario to be deemed wrongly symmetric in case
  871. * bfqq's weight would have been the only weight making the
  872. * scenario asymmetric. On the bright side, no unbalance will
  873. * however occur when bfqq becomes inactive again (the
  874. * invocation of this function is triggered by an activation
  875. * of queue). In fact, bfq_weights_tree_remove does nothing
  876. * if !bfqq->weight_counter.
  877. */
  878. if (unlikely(!bfqq->weight_counter))
  879. return;
  880. bfqq->weight_counter->weight = entity->weight;
  881. rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
  882. rb_insert_color_cached(&bfqq->weight_counter->weights_node, root,
  883. leftmost);
  884. inc_counter:
  885. bfqq->weight_counter->num_active++;
  886. bfqq->ref++;
  887. }
  888. /*
  889. * Decrement the weight counter associated with the queue, and, if the
  890. * counter reaches 0, remove the counter from the tree.
  891. * See the comments to the function bfq_weights_tree_add() for considerations
  892. * about overhead.
  893. */
  894. void bfq_weights_tree_remove(struct bfq_queue *bfqq)
  895. {
  896. struct rb_root_cached *root;
  897. if (!bfqq->weight_counter)
  898. return;
  899. root = &bfqq->bfqd->queue_weights_tree;
  900. bfqq->weight_counter->num_active--;
  901. if (bfqq->weight_counter->num_active > 0)
  902. goto reset_entity_pointer;
  903. rb_erase_cached(&bfqq->weight_counter->weights_node, root);
  904. kfree(bfqq->weight_counter);
  905. reset_entity_pointer:
  906. bfqq->weight_counter = NULL;
  907. bfq_put_queue(bfqq);
  908. }
  909. /*
  910. * Return expired entry, or NULL to just start from scratch in rbtree.
  911. */
  912. static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
  913. struct request *last)
  914. {
  915. struct request *rq;
  916. if (bfq_bfqq_fifo_expire(bfqq))
  917. return NULL;
  918. bfq_mark_bfqq_fifo_expire(bfqq);
  919. rq = rq_entry_fifo(bfqq->fifo.next);
  920. if (rq == last || blk_time_get_ns() < rq->fifo_time)
  921. return NULL;
  922. bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
  923. return rq;
  924. }
  925. static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
  926. struct bfq_queue *bfqq,
  927. struct request *last)
  928. {
  929. struct rb_node *rbnext = rb_next(&last->rb_node);
  930. struct rb_node *rbprev = rb_prev(&last->rb_node);
  931. struct request *next, *prev = NULL;
  932. /* Follow expired path, else get first next available. */
  933. next = bfq_check_fifo(bfqq, last);
  934. if (next)
  935. return next;
  936. if (rbprev)
  937. prev = rb_entry_rq(rbprev);
  938. if (rbnext)
  939. next = rb_entry_rq(rbnext);
  940. else {
  941. rbnext = rb_first(&bfqq->sort_list);
  942. if (rbnext && rbnext != &last->rb_node)
  943. next = rb_entry_rq(rbnext);
  944. }
  945. return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
  946. }
  947. /* see the definition of bfq_async_charge_factor for details */
  948. static unsigned long bfq_serv_to_charge(struct request *rq,
  949. struct bfq_queue *bfqq)
  950. {
  951. if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 ||
  952. bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
  953. return blk_rq_sectors(rq);
  954. return blk_rq_sectors(rq) * bfq_async_charge_factor;
  955. }
  956. /**
  957. * bfq_updated_next_req - update the queue after a new next_rq selection.
  958. * @bfqd: the device data the queue belongs to.
  959. * @bfqq: the queue to update.
  960. *
  961. * If the first request of a queue changes we make sure that the queue
  962. * has enough budget to serve at least its first request (if the
  963. * request has grown). We do this because if the queue has not enough
  964. * budget for its first request, it has to go through two dispatch
  965. * rounds to actually get it dispatched.
  966. */
  967. static void bfq_updated_next_req(struct bfq_data *bfqd,
  968. struct bfq_queue *bfqq)
  969. {
  970. struct bfq_entity *entity = &bfqq->entity;
  971. struct request *next_rq = bfqq->next_rq;
  972. unsigned long new_budget;
  973. if (!next_rq)
  974. return;
  975. if (bfqq == bfqd->in_service_queue)
  976. /*
  977. * In order not to break guarantees, budgets cannot be
  978. * changed after an entity has been selected.
  979. */
  980. return;
  981. new_budget = max_t(unsigned long,
  982. max_t(unsigned long, bfqq->max_budget,
  983. bfq_serv_to_charge(next_rq, bfqq)),
  984. entity->service);
  985. if (entity->budget != new_budget) {
  986. entity->budget = new_budget;
  987. bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
  988. new_budget);
  989. bfq_requeue_bfqq(bfqd, bfqq, false);
  990. }
  991. }
  992. static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
  993. {
  994. u64 dur;
  995. dur = bfqd->rate_dur_prod;
  996. do_div(dur, bfqd->peak_rate);
  997. /*
  998. * Limit duration between 3 and 25 seconds. The upper limit
  999. * has been conservatively set after the following worst case:
  1000. * on a QEMU/KVM virtual machine
  1001. * - running in a slow PC
  1002. * - with a virtual disk stacked on a slow low-end 5400rpm HDD
  1003. * - serving a heavy I/O workload, such as the sequential reading
  1004. * of several files
  1005. * mplayer took 23 seconds to start, if constantly weight-raised.
  1006. *
  1007. * As for higher values than that accommodating the above bad
  1008. * scenario, tests show that higher values would often yield
  1009. * the opposite of the desired result, i.e., would worsen
  1010. * responsiveness by allowing non-interactive applications to
  1011. * preserve weight raising for too long.
  1012. *
  1013. * On the other end, lower values than 3 seconds make it
  1014. * difficult for most interactive tasks to complete their jobs
  1015. * before weight-raising finishes.
  1016. */
  1017. return clamp_val(dur, msecs_to_jiffies(3000), msecs_to_jiffies(25000));
  1018. }
  1019. /* switch back from soft real-time to interactive weight raising */
  1020. static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
  1021. struct bfq_data *bfqd)
  1022. {
  1023. bfqq->wr_coeff = bfqd->bfq_wr_coeff;
  1024. bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
  1025. bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
  1026. }
  1027. static void
  1028. bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
  1029. struct bfq_io_cq *bic, bool bfq_already_existing)
  1030. {
  1031. unsigned int old_wr_coeff = 1;
  1032. bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
  1033. unsigned int a_idx = bfqq->actuator_idx;
  1034. struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx];
  1035. if (bfqq_data->saved_has_short_ttime)
  1036. bfq_mark_bfqq_has_short_ttime(bfqq);
  1037. else
  1038. bfq_clear_bfqq_has_short_ttime(bfqq);
  1039. if (bfqq_data->saved_IO_bound)
  1040. bfq_mark_bfqq_IO_bound(bfqq);
  1041. else
  1042. bfq_clear_bfqq_IO_bound(bfqq);
  1043. bfqq->last_serv_time_ns = bfqq_data->saved_last_serv_time_ns;
  1044. bfqq->inject_limit = bfqq_data->saved_inject_limit;
  1045. bfqq->decrease_time_jif = bfqq_data->saved_decrease_time_jif;
  1046. bfqq->entity.new_weight = bfqq_data->saved_weight;
  1047. bfqq->ttime = bfqq_data->saved_ttime;
  1048. bfqq->io_start_time = bfqq_data->saved_io_start_time;
  1049. bfqq->tot_idle_time = bfqq_data->saved_tot_idle_time;
  1050. /*
  1051. * Restore weight coefficient only if low_latency is on
  1052. */
  1053. if (bfqd->low_latency) {
  1054. old_wr_coeff = bfqq->wr_coeff;
  1055. bfqq->wr_coeff = bfqq_data->saved_wr_coeff;
  1056. }
  1057. bfqq->service_from_wr = bfqq_data->saved_service_from_wr;
  1058. bfqq->wr_start_at_switch_to_srt =
  1059. bfqq_data->saved_wr_start_at_switch_to_srt;
  1060. bfqq->last_wr_start_finish = bfqq_data->saved_last_wr_start_finish;
  1061. bfqq->wr_cur_max_time = bfqq_data->saved_wr_cur_max_time;
  1062. if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
  1063. time_is_before_jiffies(bfqq->last_wr_start_finish +
  1064. bfqq->wr_cur_max_time))) {
  1065. if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
  1066. !bfq_bfqq_in_large_burst(bfqq) &&
  1067. time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
  1068. bfq_wr_duration(bfqd))) {
  1069. switch_back_to_interactive_wr(bfqq, bfqd);
  1070. } else {
  1071. bfqq->wr_coeff = 1;
  1072. bfq_log_bfqq(bfqq->bfqd, bfqq,
  1073. "resume state: switching off wr");
  1074. }
  1075. }
  1076. /* make sure weight will be updated, however we got here */
  1077. bfqq->entity.prio_changed = 1;
  1078. if (likely(!busy))
  1079. return;
  1080. if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
  1081. bfqd->wr_busy_queues++;
  1082. else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
  1083. bfqd->wr_busy_queues--;
  1084. }
  1085. static int bfqq_process_refs(struct bfq_queue *bfqq)
  1086. {
  1087. return bfqq->ref - bfqq->entity.allocated -
  1088. bfqq->entity.on_st_or_in_serv -
  1089. (bfqq->weight_counter != NULL) - bfqq->stable_ref;
  1090. }
  1091. /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
  1092. static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  1093. {
  1094. struct bfq_queue *item;
  1095. struct hlist_node *n;
  1096. hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
  1097. hlist_del_init(&item->burst_list_node);
  1098. /*
  1099. * Start the creation of a new burst list only if there is no
  1100. * active queue. See comments on the conditional invocation of
  1101. * bfq_handle_burst().
  1102. */
  1103. if (bfq_tot_busy_queues(bfqd) == 0) {
  1104. hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
  1105. bfqd->burst_size = 1;
  1106. } else
  1107. bfqd->burst_size = 0;
  1108. bfqd->burst_parent_entity = bfqq->entity.parent;
  1109. }
  1110. /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
  1111. static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  1112. {
  1113. /* Increment burst size to take into account also bfqq */
  1114. bfqd->burst_size++;
  1115. if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
  1116. struct bfq_queue *pos, *bfqq_item;
  1117. struct hlist_node *n;
  1118. /*
  1119. * Enough queues have been activated shortly after each
  1120. * other to consider this burst as large.
  1121. */
  1122. bfqd->large_burst = true;
  1123. /*
  1124. * We can now mark all queues in the burst list as
  1125. * belonging to a large burst.
  1126. */
  1127. hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
  1128. burst_list_node)
  1129. bfq_mark_bfqq_in_large_burst(bfqq_item);
  1130. bfq_mark_bfqq_in_large_burst(bfqq);
  1131. /*
  1132. * From now on, and until the current burst finishes, any
  1133. * new queue being activated shortly after the last queue
  1134. * was inserted in the burst can be immediately marked as
  1135. * belonging to a large burst. So the burst list is not
  1136. * needed any more. Remove it.
  1137. */
  1138. hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
  1139. burst_list_node)
  1140. hlist_del_init(&pos->burst_list_node);
  1141. } else /*
  1142. * Burst not yet large: add bfqq to the burst list. Do
  1143. * not increment the ref counter for bfqq, because bfqq
  1144. * is removed from the burst list before freeing bfqq
  1145. * in put_queue.
  1146. */
  1147. hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
  1148. }
  1149. /*
  1150. * If many queues belonging to the same group happen to be created
  1151. * shortly after each other, then the processes associated with these
  1152. * queues have typically a common goal. In particular, bursts of queue
  1153. * creations are usually caused by services or applications that spawn
  1154. * many parallel threads/processes. Examples are systemd during boot,
  1155. * or git grep. To help these processes get their job done as soon as
  1156. * possible, it is usually better to not grant either weight-raising
  1157. * or device idling to their queues, unless these queues must be
  1158. * protected from the I/O flowing through other active queues.
  1159. *
  1160. * In this comment we describe, firstly, the reasons why this fact
  1161. * holds, and, secondly, the next function, which implements the main
  1162. * steps needed to properly mark these queues so that they can then be
  1163. * treated in a different way.
  1164. *
  1165. * The above services or applications benefit mostly from a high
  1166. * throughput: the quicker the requests of the activated queues are
  1167. * cumulatively served, the sooner the target job of these queues gets
  1168. * completed. As a consequence, weight-raising any of these queues,
  1169. * which also implies idling the device for it, is almost always
  1170. * counterproductive, unless there are other active queues to isolate
  1171. * these new queues from. If there no other active queues, then
  1172. * weight-raising these new queues just lowers throughput in most
  1173. * cases.
  1174. *
  1175. * On the other hand, a burst of queue creations may be caused also by
  1176. * the start of an application that does not consist of a lot of
  1177. * parallel I/O-bound threads. In fact, with a complex application,
  1178. * several short processes may need to be executed to start-up the
  1179. * application. In this respect, to start an application as quickly as
  1180. * possible, the best thing to do is in any case to privilege the I/O
  1181. * related to the application with respect to all other
  1182. * I/O. Therefore, the best strategy to start as quickly as possible
  1183. * an application that causes a burst of queue creations is to
  1184. * weight-raise all the queues created during the burst. This is the
  1185. * exact opposite of the best strategy for the other type of bursts.
  1186. *
  1187. * In the end, to take the best action for each of the two cases, the
  1188. * two types of bursts need to be distinguished. Fortunately, this
  1189. * seems relatively easy, by looking at the sizes of the bursts. In
  1190. * particular, we found a threshold such that only bursts with a
  1191. * larger size than that threshold are apparently caused by
  1192. * services or commands such as systemd or git grep. For brevity,
  1193. * hereafter we call just 'large' these bursts. BFQ *does not*
  1194. * weight-raise queues whose creation occurs in a large burst. In
  1195. * addition, for each of these queues BFQ performs or does not perform
  1196. * idling depending on which choice boosts the throughput more. The
  1197. * exact choice depends on the device and request pattern at
  1198. * hand.
  1199. *
  1200. * Unfortunately, false positives may occur while an interactive task
  1201. * is starting (e.g., an application is being started). The
  1202. * consequence is that the queues associated with the task do not
  1203. * enjoy weight raising as expected. Fortunately these false positives
  1204. * are very rare. They typically occur if some service happens to
  1205. * start doing I/O exactly when the interactive task starts.
  1206. *
  1207. * Turning back to the next function, it is invoked only if there are
  1208. * no active queues (apart from active queues that would belong to the
  1209. * same, possible burst bfqq would belong to), and it implements all
  1210. * the steps needed to detect the occurrence of a large burst and to
  1211. * properly mark all the queues belonging to it (so that they can then
  1212. * be treated in a different way). This goal is achieved by
  1213. * maintaining a "burst list" that holds, temporarily, the queues that
  1214. * belong to the burst in progress. The list is then used to mark
  1215. * these queues as belonging to a large burst if the burst does become
  1216. * large. The main steps are the following.
  1217. *
  1218. * . when the very first queue is created, the queue is inserted into the
  1219. * list (as it could be the first queue in a possible burst)
  1220. *
  1221. * . if the current burst has not yet become large, and a queue Q that does
  1222. * not yet belong to the burst is activated shortly after the last time
  1223. * at which a new queue entered the burst list, then the function appends
  1224. * Q to the burst list
  1225. *
  1226. * . if, as a consequence of the previous step, the burst size reaches
  1227. * the large-burst threshold, then
  1228. *
  1229. * . all the queues in the burst list are marked as belonging to a
  1230. * large burst
  1231. *
  1232. * . the burst list is deleted; in fact, the burst list already served
  1233. * its purpose (keeping temporarily track of the queues in a burst,
  1234. * so as to be able to mark them as belonging to a large burst in the
  1235. * previous sub-step), and now is not needed any more
  1236. *
  1237. * . the device enters a large-burst mode
  1238. *
  1239. * . if a queue Q that does not belong to the burst is created while
  1240. * the device is in large-burst mode and shortly after the last time
  1241. * at which a queue either entered the burst list or was marked as
  1242. * belonging to the current large burst, then Q is immediately marked
  1243. * as belonging to a large burst.
  1244. *
  1245. * . if a queue Q that does not belong to the burst is created a while
  1246. * later, i.e., not shortly after, than the last time at which a queue
  1247. * either entered the burst list or was marked as belonging to the
  1248. * current large burst, then the current burst is deemed as finished and:
  1249. *
  1250. * . the large-burst mode is reset if set
  1251. *
  1252. * . the burst list is emptied
  1253. *
  1254. * . Q is inserted in the burst list, as Q may be the first queue
  1255. * in a possible new burst (then the burst list contains just Q
  1256. * after this step).
  1257. */
  1258. static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  1259. {
  1260. /*
  1261. * If bfqq is already in the burst list or is part of a large
  1262. * burst, or finally has just been split, then there is
  1263. * nothing else to do.
  1264. */
  1265. if (!hlist_unhashed(&bfqq->burst_list_node) ||
  1266. bfq_bfqq_in_large_burst(bfqq) ||
  1267. time_is_after_eq_jiffies(bfqq->split_time +
  1268. msecs_to_jiffies(10)))
  1269. return;
  1270. /*
  1271. * If bfqq's creation happens late enough, or bfqq belongs to
  1272. * a different group than the burst group, then the current
  1273. * burst is finished, and related data structures must be
  1274. * reset.
  1275. *
  1276. * In this respect, consider the special case where bfqq is
  1277. * the very first queue created after BFQ is selected for this
  1278. * device. In this case, last_ins_in_burst and
  1279. * burst_parent_entity are not yet significant when we get
  1280. * here. But it is easy to verify that, whether or not the
  1281. * following condition is true, bfqq will end up being
  1282. * inserted into the burst list. In particular the list will
  1283. * happen to contain only bfqq. And this is exactly what has
  1284. * to happen, as bfqq may be the first queue of the first
  1285. * burst.
  1286. */
  1287. if (time_is_before_jiffies(bfqd->last_ins_in_burst +
  1288. bfqd->bfq_burst_interval) ||
  1289. bfqq->entity.parent != bfqd->burst_parent_entity) {
  1290. bfqd->large_burst = false;
  1291. bfq_reset_burst_list(bfqd, bfqq);
  1292. goto end;
  1293. }
  1294. /*
  1295. * If we get here, then bfqq is being activated shortly after the
  1296. * last queue. So, if the current burst is also large, we can mark
  1297. * bfqq as belonging to this large burst immediately.
  1298. */
  1299. if (bfqd->large_burst) {
  1300. bfq_mark_bfqq_in_large_burst(bfqq);
  1301. goto end;
  1302. }
  1303. /*
  1304. * If we get here, then a large-burst state has not yet been
  1305. * reached, but bfqq is being activated shortly after the last
  1306. * queue. Then we add bfqq to the burst.
  1307. */
  1308. bfq_add_to_burst(bfqd, bfqq);
  1309. end:
  1310. /*
  1311. * At this point, bfqq either has been added to the current
  1312. * burst or has caused the current burst to terminate and a
  1313. * possible new burst to start. In particular, in the second
  1314. * case, bfqq has become the first queue in the possible new
  1315. * burst. In both cases last_ins_in_burst needs to be moved
  1316. * forward.
  1317. */
  1318. bfqd->last_ins_in_burst = jiffies;
  1319. }
  1320. static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
  1321. {
  1322. struct bfq_entity *entity = &bfqq->entity;
  1323. return entity->budget - entity->service;
  1324. }
  1325. /*
  1326. * If enough samples have been computed, return the current max budget
  1327. * stored in bfqd, which is dynamically updated according to the
  1328. * estimated disk peak rate; otherwise return the default max budget
  1329. */
  1330. static int bfq_max_budget(struct bfq_data *bfqd)
  1331. {
  1332. if (bfqd->budgets_assigned < bfq_stats_min_budgets)
  1333. return bfq_default_max_budget;
  1334. else
  1335. return bfqd->bfq_max_budget;
  1336. }
  1337. /*
  1338. * Return min budget, which is a fraction of the current or default
  1339. * max budget (trying with 1/32)
  1340. */
  1341. static int bfq_min_budget(struct bfq_data *bfqd)
  1342. {
  1343. if (bfqd->budgets_assigned < bfq_stats_min_budgets)
  1344. return bfq_default_max_budget / 32;
  1345. else
  1346. return bfqd->bfq_max_budget / 32;
  1347. }
  1348. /*
  1349. * The next function, invoked after the input queue bfqq switches from
  1350. * idle to busy, updates the budget of bfqq. The function also tells
  1351. * whether the in-service queue should be expired, by returning
  1352. * true. The purpose of expiring the in-service queue is to give bfqq
  1353. * the chance to possibly preempt the in-service queue, and the reason
  1354. * for preempting the in-service queue is to achieve one of the two
  1355. * goals below.
  1356. *
  1357. * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
  1358. * expired because it has remained idle. In particular, bfqq may have
  1359. * expired for one of the following two reasons:
  1360. *
  1361. * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
  1362. * and did not make it to issue a new request before its last
  1363. * request was served;
  1364. *
  1365. * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
  1366. * a new request before the expiration of the idling-time.
  1367. *
  1368. * Even if bfqq has expired for one of the above reasons, the process
  1369. * associated with the queue may be however issuing requests greedily,
  1370. * and thus be sensitive to the bandwidth it receives (bfqq may have
  1371. * remained idle for other reasons: CPU high load, bfqq not enjoying
  1372. * idling, I/O throttling somewhere in the path from the process to
  1373. * the I/O scheduler, ...). But if, after every expiration for one of
  1374. * the above two reasons, bfqq has to wait for the service of at least
  1375. * one full budget of another queue before being served again, then
  1376. * bfqq is likely to get a much lower bandwidth or resource time than
  1377. * its reserved ones. To address this issue, two countermeasures need
  1378. * to be taken.
  1379. *
  1380. * First, the budget and the timestamps of bfqq need to be updated in
  1381. * a special way on bfqq reactivation: they need to be updated as if
  1382. * bfqq did not remain idle and did not expire. In fact, if they are
  1383. * computed as if bfqq expired and remained idle until reactivation,
  1384. * then the process associated with bfqq is treated as if, instead of
  1385. * being greedy, it stopped issuing requests when bfqq remained idle,
  1386. * and restarts issuing requests only on this reactivation. In other
  1387. * words, the scheduler does not help the process recover the "service
  1388. * hole" between bfqq expiration and reactivation. As a consequence,
  1389. * the process receives a lower bandwidth than its reserved one. In
  1390. * contrast, to recover this hole, the budget must be updated as if
  1391. * bfqq was not expired at all before this reactivation, i.e., it must
  1392. * be set to the value of the remaining budget when bfqq was
  1393. * expired. Along the same line, timestamps need to be assigned the
  1394. * value they had the last time bfqq was selected for service, i.e.,
  1395. * before last expiration. Thus timestamps need to be back-shifted
  1396. * with respect to their normal computation (see [1] for more details
  1397. * on this tricky aspect).
  1398. *
  1399. * Secondly, to allow the process to recover the hole, the in-service
  1400. * queue must be expired too, to give bfqq the chance to preempt it
  1401. * immediately. In fact, if bfqq has to wait for a full budget of the
  1402. * in-service queue to be completed, then it may become impossible to
  1403. * let the process recover the hole, even if the back-shifted
  1404. * timestamps of bfqq are lower than those of the in-service queue. If
  1405. * this happens for most or all of the holes, then the process may not
  1406. * receive its reserved bandwidth. In this respect, it is worth noting
  1407. * that, being the service of outstanding requests unpreemptible, a
  1408. * little fraction of the holes may however be unrecoverable, thereby
  1409. * causing a little loss of bandwidth.
  1410. *
  1411. * The last important point is detecting whether bfqq does need this
  1412. * bandwidth recovery. In this respect, the next function deems the
  1413. * process associated with bfqq greedy, and thus allows it to recover
  1414. * the hole, if: 1) the process is waiting for the arrival of a new
  1415. * request (which implies that bfqq expired for one of the above two
  1416. * reasons), and 2) such a request has arrived soon. The first
  1417. * condition is controlled through the flag non_blocking_wait_rq,
  1418. * while the second through the flag arrived_in_time. If both
  1419. * conditions hold, then the function computes the budget in the
  1420. * above-described special way, and signals that the in-service queue
  1421. * should be expired. Timestamp back-shifting is done later in
  1422. * __bfq_activate_entity.
  1423. *
  1424. * 2. Reduce latency. Even if timestamps are not backshifted to let
  1425. * the process associated with bfqq recover a service hole, bfqq may
  1426. * however happen to have, after being (re)activated, a lower finish
  1427. * timestamp than the in-service queue. That is, the next budget of
  1428. * bfqq may have to be completed before the one of the in-service
  1429. * queue. If this is the case, then preempting the in-service queue
  1430. * allows this goal to be achieved, apart from the unpreemptible,
  1431. * outstanding requests mentioned above.
  1432. *
  1433. * Unfortunately, regardless of which of the above two goals one wants
  1434. * to achieve, service trees need first to be updated to know whether
  1435. * the in-service queue must be preempted. To have service trees
  1436. * correctly updated, the in-service queue must be expired and
  1437. * rescheduled, and bfqq must be scheduled too. This is one of the
  1438. * most costly operations (in future versions, the scheduling
  1439. * mechanism may be re-designed in such a way to make it possible to
  1440. * know whether preemption is needed without needing to update service
  1441. * trees). In addition, queue preemptions almost always cause random
  1442. * I/O, which may in turn cause loss of throughput. Finally, there may
  1443. * even be no in-service queue when the next function is invoked (so,
  1444. * no queue to compare timestamps with). Because of these facts, the
  1445. * next function adopts the following simple scheme to avoid costly
  1446. * operations, too frequent preemptions and too many dependencies on
  1447. * the state of the scheduler: it requests the expiration of the
  1448. * in-service queue (unconditionally) only for queues that need to
  1449. * recover a hole. Then it delegates to other parts of the code the
  1450. * responsibility of handling the above case 2.
  1451. */
  1452. static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
  1453. struct bfq_queue *bfqq,
  1454. bool arrived_in_time)
  1455. {
  1456. struct bfq_entity *entity = &bfqq->entity;
  1457. /*
  1458. * In the next compound condition, we check also whether there
  1459. * is some budget left, because otherwise there is no point in
  1460. * trying to go on serving bfqq with this same budget: bfqq
  1461. * would be expired immediately after being selected for
  1462. * service. This would only cause useless overhead.
  1463. */
  1464. if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time &&
  1465. bfq_bfqq_budget_left(bfqq) > 0) {
  1466. /*
  1467. * We do not clear the flag non_blocking_wait_rq here, as
  1468. * the latter is used in bfq_activate_bfqq to signal
  1469. * that timestamps need to be back-shifted (and is
  1470. * cleared right after).
  1471. */
  1472. /*
  1473. * In next assignment we rely on that either
  1474. * entity->service or entity->budget are not updated
  1475. * on expiration if bfqq is empty (see
  1476. * __bfq_bfqq_recalc_budget). Thus both quantities
  1477. * remain unchanged after such an expiration, and the
  1478. * following statement therefore assigns to
  1479. * entity->budget the remaining budget on such an
  1480. * expiration.
  1481. */
  1482. entity->budget = min_t(unsigned long,
  1483. bfq_bfqq_budget_left(bfqq),
  1484. bfqq->max_budget);
  1485. /*
  1486. * At this point, we have used entity->service to get
  1487. * the budget left (needed for updating
  1488. * entity->budget). Thus we finally can, and have to,
  1489. * reset entity->service. The latter must be reset
  1490. * because bfqq would otherwise be charged again for
  1491. * the service it has received during its previous
  1492. * service slot(s).
  1493. */
  1494. entity->service = 0;
  1495. return true;
  1496. }
  1497. /*
  1498. * We can finally complete expiration, by setting service to 0.
  1499. */
  1500. entity->service = 0;
  1501. entity->budget = max_t(unsigned long, bfqq->max_budget,
  1502. bfq_serv_to_charge(bfqq->next_rq, bfqq));
  1503. bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
  1504. return false;
  1505. }
  1506. /*
  1507. * Return the farthest past time instant according to jiffies
  1508. * macros.
  1509. */
  1510. static unsigned long bfq_smallest_from_now(void)
  1511. {
  1512. return jiffies - MAX_JIFFY_OFFSET;
  1513. }
  1514. static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
  1515. struct bfq_queue *bfqq,
  1516. unsigned int old_wr_coeff,
  1517. bool wr_or_deserves_wr,
  1518. bool interactive,
  1519. bool in_burst,
  1520. bool soft_rt)
  1521. {
  1522. if (old_wr_coeff == 1 && wr_or_deserves_wr) {
  1523. /* start a weight-raising period */
  1524. if (interactive) {
  1525. bfqq->service_from_wr = 0;
  1526. bfqq->wr_coeff = bfqd->bfq_wr_coeff;
  1527. bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
  1528. } else {
  1529. /*
  1530. * No interactive weight raising in progress
  1531. * here: assign minus infinity to
  1532. * wr_start_at_switch_to_srt, to make sure
  1533. * that, at the end of the soft-real-time
  1534. * weight raising periods that is starting
  1535. * now, no interactive weight-raising period
  1536. * may be wrongly considered as still in
  1537. * progress (and thus actually started by
  1538. * mistake).
  1539. */
  1540. bfqq->wr_start_at_switch_to_srt =
  1541. bfq_smallest_from_now();
  1542. bfqq->wr_coeff = bfqd->bfq_wr_coeff *
  1543. BFQ_SOFTRT_WEIGHT_FACTOR;
  1544. bfqq->wr_cur_max_time =
  1545. bfqd->bfq_wr_rt_max_time;
  1546. }
  1547. /*
  1548. * If needed, further reduce budget to make sure it is
  1549. * close to bfqq's backlog, so as to reduce the
  1550. * scheduling-error component due to a too large
  1551. * budget. Do not care about throughput consequences,
  1552. * but only about latency. Finally, do not assign a
  1553. * too small budget either, to avoid increasing
  1554. * latency by causing too frequent expirations.
  1555. */
  1556. bfqq->entity.budget = min_t(unsigned long,
  1557. bfqq->entity.budget,
  1558. 2 * bfq_min_budget(bfqd));
  1559. } else if (old_wr_coeff > 1) {
  1560. if (interactive) { /* update wr coeff and duration */
  1561. bfqq->wr_coeff = bfqd->bfq_wr_coeff;
  1562. bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
  1563. } else if (in_burst)
  1564. bfqq->wr_coeff = 1;
  1565. else if (soft_rt) {
  1566. /*
  1567. * The application is now or still meeting the
  1568. * requirements for being deemed soft rt. We
  1569. * can then correctly and safely (re)charge
  1570. * the weight-raising duration for the
  1571. * application with the weight-raising
  1572. * duration for soft rt applications.
  1573. *
  1574. * In particular, doing this recharge now, i.e.,
  1575. * before the weight-raising period for the
  1576. * application finishes, reduces the probability
  1577. * of the following negative scenario:
  1578. * 1) the weight of a soft rt application is
  1579. * raised at startup (as for any newly
  1580. * created application),
  1581. * 2) since the application is not interactive,
  1582. * at a certain time weight-raising is
  1583. * stopped for the application,
  1584. * 3) at that time the application happens to
  1585. * still have pending requests, and hence
  1586. * is destined to not have a chance to be
  1587. * deemed soft rt before these requests are
  1588. * completed (see the comments to the
  1589. * function bfq_bfqq_softrt_next_start()
  1590. * for details on soft rt detection),
  1591. * 4) these pending requests experience a high
  1592. * latency because the application is not
  1593. * weight-raised while they are pending.
  1594. */
  1595. if (bfqq->wr_cur_max_time !=
  1596. bfqd->bfq_wr_rt_max_time) {
  1597. bfqq->wr_start_at_switch_to_srt =
  1598. bfqq->last_wr_start_finish;
  1599. bfqq->wr_cur_max_time =
  1600. bfqd->bfq_wr_rt_max_time;
  1601. bfqq->wr_coeff = bfqd->bfq_wr_coeff *
  1602. BFQ_SOFTRT_WEIGHT_FACTOR;
  1603. }
  1604. bfqq->last_wr_start_finish = jiffies;
  1605. }
  1606. }
  1607. }
  1608. static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
  1609. struct bfq_queue *bfqq)
  1610. {
  1611. return bfqq->dispatched == 0 &&
  1612. time_is_before_jiffies(
  1613. bfqq->budget_timeout +
  1614. bfqd->bfq_wr_min_idle_time);
  1615. }
  1616. /*
  1617. * Return true if bfqq is in a higher priority class, or has a higher
  1618. * weight than the in-service queue.
  1619. */
  1620. static bool bfq_bfqq_higher_class_or_weight(struct bfq_queue *bfqq,
  1621. struct bfq_queue *in_serv_bfqq)
  1622. {
  1623. int bfqq_weight, in_serv_weight;
  1624. if (bfqq->ioprio_class < in_serv_bfqq->ioprio_class)
  1625. return true;
  1626. if (in_serv_bfqq->entity.parent == bfqq->entity.parent) {
  1627. bfqq_weight = bfqq->entity.weight;
  1628. in_serv_weight = in_serv_bfqq->entity.weight;
  1629. } else {
  1630. if (bfqq->entity.parent)
  1631. bfqq_weight = bfqq->entity.parent->weight;
  1632. else
  1633. bfqq_weight = bfqq->entity.weight;
  1634. if (in_serv_bfqq->entity.parent)
  1635. in_serv_weight = in_serv_bfqq->entity.parent->weight;
  1636. else
  1637. in_serv_weight = in_serv_bfqq->entity.weight;
  1638. }
  1639. return bfqq_weight > in_serv_weight;
  1640. }
  1641. /*
  1642. * Get the index of the actuator that will serve bio.
  1643. */
  1644. static unsigned int bfq_actuator_index(struct bfq_data *bfqd, struct bio *bio)
  1645. {
  1646. unsigned int i;
  1647. sector_t end;
  1648. /* no search needed if one or zero ranges present */
  1649. if (bfqd->num_actuators == 1)
  1650. return 0;
  1651. /* bio_end_sector(bio) gives the sector after the last one */
  1652. end = bio_end_sector(bio) - 1;
  1653. for (i = 0; i < bfqd->num_actuators; i++) {
  1654. if (end >= bfqd->sector[i] &&
  1655. end < bfqd->sector[i] + bfqd->nr_sectors[i])
  1656. return i;
  1657. }
  1658. WARN_ONCE(true,
  1659. "bfq_actuator_index: bio sector out of ranges: end=%llu\n",
  1660. end);
  1661. return 0;
  1662. }
  1663. static bool bfq_better_to_idle(struct bfq_queue *bfqq);
  1664. static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
  1665. struct bfq_queue *bfqq,
  1666. int old_wr_coeff,
  1667. struct request *rq,
  1668. bool *interactive)
  1669. {
  1670. bool soft_rt, in_burst, wr_or_deserves_wr,
  1671. bfqq_wants_to_preempt,
  1672. idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
  1673. /*
  1674. * See the comments on
  1675. * bfq_bfqq_update_budg_for_activation for
  1676. * details on the usage of the next variable.
  1677. */
  1678. arrived_in_time = blk_time_get_ns() <=
  1679. bfqq->ttime.last_end_request +
  1680. bfqd->bfq_slice_idle * 3;
  1681. unsigned int act_idx = bfq_actuator_index(bfqd, rq->bio);
  1682. bool bfqq_non_merged_or_stably_merged =
  1683. bfqq->bic || RQ_BIC(rq)->bfqq_data[act_idx].stably_merged;
  1684. /*
  1685. * bfqq deserves to be weight-raised if:
  1686. * - it is sync,
  1687. * - it does not belong to a large burst,
  1688. * - it has been idle for enough time or is soft real-time,
  1689. * - is linked to a bfq_io_cq (it is not shared in any sense),
  1690. * - has a default weight (otherwise we assume the user wanted
  1691. * to control its weight explicitly)
  1692. */
  1693. in_burst = bfq_bfqq_in_large_burst(bfqq);
  1694. soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
  1695. !BFQQ_TOTALLY_SEEKY(bfqq) &&
  1696. !in_burst &&
  1697. time_is_before_jiffies(bfqq->soft_rt_next_start) &&
  1698. bfqq->dispatched == 0 &&
  1699. bfqq->entity.new_weight == 40;
  1700. *interactive = !in_burst && idle_for_long_time &&
  1701. bfqq->entity.new_weight == 40;
  1702. /*
  1703. * Merged bfq_queues are kept out of weight-raising
  1704. * (low-latency) mechanisms. The reason is that these queues
  1705. * are usually created for non-interactive and
  1706. * non-soft-real-time tasks. Yet this is not the case for
  1707. * stably-merged queues. These queues are merged just because
  1708. * they are created shortly after each other. So they may
  1709. * easily serve the I/O of an interactive or soft-real time
  1710. * application, if the application happens to spawn multiple
  1711. * processes. So let also stably-merged queued enjoy weight
  1712. * raising.
  1713. */
  1714. wr_or_deserves_wr = bfqd->low_latency &&
  1715. (bfqq->wr_coeff > 1 ||
  1716. (bfq_bfqq_sync(bfqq) && bfqq_non_merged_or_stably_merged &&
  1717. (*interactive || soft_rt)));
  1718. /*
  1719. * Using the last flag, update budget and check whether bfqq
  1720. * may want to preempt the in-service queue.
  1721. */
  1722. bfqq_wants_to_preempt =
  1723. bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
  1724. arrived_in_time);
  1725. /*
  1726. * If bfqq happened to be activated in a burst, but has been
  1727. * idle for much more than an interactive queue, then we
  1728. * assume that, in the overall I/O initiated in the burst, the
  1729. * I/O associated with bfqq is finished. So bfqq does not need
  1730. * to be treated as a queue belonging to a burst
  1731. * anymore. Accordingly, we reset bfqq's in_large_burst flag
  1732. * if set, and remove bfqq from the burst list if it's
  1733. * there. We do not decrement burst_size, because the fact
  1734. * that bfqq does not need to belong to the burst list any
  1735. * more does not invalidate the fact that bfqq was created in
  1736. * a burst.
  1737. */
  1738. if (likely(!bfq_bfqq_just_created(bfqq)) &&
  1739. idle_for_long_time &&
  1740. time_is_before_jiffies(
  1741. bfqq->budget_timeout +
  1742. msecs_to_jiffies(10000))) {
  1743. hlist_del_init(&bfqq->burst_list_node);
  1744. bfq_clear_bfqq_in_large_burst(bfqq);
  1745. }
  1746. bfq_clear_bfqq_just_created(bfqq);
  1747. if (bfqd->low_latency) {
  1748. if (unlikely(time_is_after_jiffies(bfqq->split_time)))
  1749. /* wraparound */
  1750. bfqq->split_time =
  1751. jiffies - bfqd->bfq_wr_min_idle_time - 1;
  1752. if (time_is_before_jiffies(bfqq->split_time +
  1753. bfqd->bfq_wr_min_idle_time)) {
  1754. bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
  1755. old_wr_coeff,
  1756. wr_or_deserves_wr,
  1757. *interactive,
  1758. in_burst,
  1759. soft_rt);
  1760. if (old_wr_coeff != bfqq->wr_coeff)
  1761. bfqq->entity.prio_changed = 1;
  1762. }
  1763. }
  1764. bfqq->last_idle_bklogged = jiffies;
  1765. bfqq->service_from_backlogged = 0;
  1766. bfq_clear_bfqq_softrt_update(bfqq);
  1767. bfq_add_bfqq_busy(bfqq);
  1768. /*
  1769. * Expire in-service queue if preemption may be needed for
  1770. * guarantees or throughput. As for guarantees, we care
  1771. * explicitly about two cases. The first is that bfqq has to
  1772. * recover a service hole, as explained in the comments on
  1773. * bfq_bfqq_update_budg_for_activation(), i.e., that
  1774. * bfqq_wants_to_preempt is true. However, if bfqq does not
  1775. * carry time-critical I/O, then bfqq's bandwidth is less
  1776. * important than that of queues that carry time-critical I/O.
  1777. * So, as a further constraint, we consider this case only if
  1778. * bfqq is at least as weight-raised, i.e., at least as time
  1779. * critical, as the in-service queue.
  1780. *
  1781. * The second case is that bfqq is in a higher priority class,
  1782. * or has a higher weight than the in-service queue. If this
  1783. * condition does not hold, we don't care because, even if
  1784. * bfqq does not start to be served immediately, the resulting
  1785. * delay for bfqq's I/O is however lower or much lower than
  1786. * the ideal completion time to be guaranteed to bfqq's I/O.
  1787. *
  1788. * In both cases, preemption is needed only if, according to
  1789. * the timestamps of both bfqq and of the in-service queue,
  1790. * bfqq actually is the next queue to serve. So, to reduce
  1791. * useless preemptions, the return value of
  1792. * next_queue_may_preempt() is considered in the next compound
  1793. * condition too. Yet next_queue_may_preempt() just checks a
  1794. * simple, necessary condition for bfqq to be the next queue
  1795. * to serve. In fact, to evaluate a sufficient condition, the
  1796. * timestamps of the in-service queue would need to be
  1797. * updated, and this operation is quite costly (see the
  1798. * comments on bfq_bfqq_update_budg_for_activation()).
  1799. *
  1800. * As for throughput, we ask bfq_better_to_idle() whether we
  1801. * still need to plug I/O dispatching. If bfq_better_to_idle()
  1802. * says no, then plugging is not needed any longer, either to
  1803. * boost throughput or to perserve service guarantees. Then
  1804. * the best option is to stop plugging I/O, as not doing so
  1805. * would certainly lower throughput. We may end up in this
  1806. * case if: (1) upon a dispatch attempt, we detected that it
  1807. * was better to plug I/O dispatch, and to wait for a new
  1808. * request to arrive for the currently in-service queue, but
  1809. * (2) this switch of bfqq to busy changes the scenario.
  1810. */
  1811. if (bfqd->in_service_queue &&
  1812. ((bfqq_wants_to_preempt &&
  1813. bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) ||
  1814. bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue) ||
  1815. !bfq_better_to_idle(bfqd->in_service_queue)) &&
  1816. next_queue_may_preempt(bfqd))
  1817. bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
  1818. false, BFQQE_PREEMPTED);
  1819. }
  1820. static void bfq_reset_inject_limit(struct bfq_data *bfqd,
  1821. struct bfq_queue *bfqq)
  1822. {
  1823. /* invalidate baseline total service time */
  1824. bfqq->last_serv_time_ns = 0;
  1825. /*
  1826. * Reset pointer in case we are waiting for
  1827. * some request completion.
  1828. */
  1829. bfqd->waited_rq = NULL;
  1830. /*
  1831. * If bfqq has a short think time, then start by setting the
  1832. * inject limit to 0 prudentially, because the service time of
  1833. * an injected I/O request may be higher than the think time
  1834. * of bfqq, and therefore, if one request was injected when
  1835. * bfqq remains empty, this injected request might delay the
  1836. * service of the next I/O request for bfqq significantly. In
  1837. * case bfqq can actually tolerate some injection, then the
  1838. * adaptive update will however raise the limit soon. This
  1839. * lucky circumstance holds exactly because bfqq has a short
  1840. * think time, and thus, after remaining empty, is likely to
  1841. * get new I/O enqueued---and then completed---before being
  1842. * expired. This is the very pattern that gives the
  1843. * limit-update algorithm the chance to measure the effect of
  1844. * injection on request service times, and then to update the
  1845. * limit accordingly.
  1846. *
  1847. * However, in the following special case, the inject limit is
  1848. * left to 1 even if the think time is short: bfqq's I/O is
  1849. * synchronized with that of some other queue, i.e., bfqq may
  1850. * receive new I/O only after the I/O of the other queue is
  1851. * completed. Keeping the inject limit to 1 allows the
  1852. * blocking I/O to be served while bfqq is in service. And
  1853. * this is very convenient both for bfqq and for overall
  1854. * throughput, as explained in detail in the comments in
  1855. * bfq_update_has_short_ttime().
  1856. *
  1857. * On the opposite end, if bfqq has a long think time, then
  1858. * start directly by 1, because:
  1859. * a) on the bright side, keeping at most one request in
  1860. * service in the drive is unlikely to cause any harm to the
  1861. * latency of bfqq's requests, as the service time of a single
  1862. * request is likely to be lower than the think time of bfqq;
  1863. * b) on the downside, after becoming empty, bfqq is likely to
  1864. * expire before getting its next request. With this request
  1865. * arrival pattern, it is very hard to sample total service
  1866. * times and update the inject limit accordingly (see comments
  1867. * on bfq_update_inject_limit()). So the limit is likely to be
  1868. * never, or at least seldom, updated. As a consequence, by
  1869. * setting the limit to 1, we avoid that no injection ever
  1870. * occurs with bfqq. On the downside, this proactive step
  1871. * further reduces chances to actually compute the baseline
  1872. * total service time. Thus it reduces chances to execute the
  1873. * limit-update algorithm and possibly raise the limit to more
  1874. * than 1.
  1875. */
  1876. if (bfq_bfqq_has_short_ttime(bfqq))
  1877. bfqq->inject_limit = 0;
  1878. else
  1879. bfqq->inject_limit = 1;
  1880. bfqq->decrease_time_jif = jiffies;
  1881. }
  1882. static void bfq_update_io_intensity(struct bfq_queue *bfqq, u64 now_ns)
  1883. {
  1884. u64 tot_io_time = now_ns - bfqq->io_start_time;
  1885. if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfqq->dispatched == 0)
  1886. bfqq->tot_idle_time +=
  1887. now_ns - bfqq->ttime.last_end_request;
  1888. if (unlikely(bfq_bfqq_just_created(bfqq)))
  1889. return;
  1890. /*
  1891. * Must be busy for at least about 80% of the time to be
  1892. * considered I/O bound.
  1893. */
  1894. if (bfqq->tot_idle_time * 5 > tot_io_time)
  1895. bfq_clear_bfqq_IO_bound(bfqq);
  1896. else
  1897. bfq_mark_bfqq_IO_bound(bfqq);
  1898. /*
  1899. * Keep an observation window of at most 200 ms in the past
  1900. * from now.
  1901. */
  1902. if (tot_io_time > 200 * NSEC_PER_MSEC) {
  1903. bfqq->io_start_time = now_ns - (tot_io_time>>1);
  1904. bfqq->tot_idle_time >>= 1;
  1905. }
  1906. }
  1907. /*
  1908. * Detect whether bfqq's I/O seems synchronized with that of some
  1909. * other queue, i.e., whether bfqq, after remaining empty, happens to
  1910. * receive new I/O only right after some I/O request of the other
  1911. * queue has been completed. We call waker queue the other queue, and
  1912. * we assume, for simplicity, that bfqq may have at most one waker
  1913. * queue.
  1914. *
  1915. * A remarkable throughput boost can be reached by unconditionally
  1916. * injecting the I/O of the waker queue, every time a new
  1917. * bfq_dispatch_request happens to be invoked while I/O is being
  1918. * plugged for bfqq. In addition to boosting throughput, this
  1919. * unblocks bfqq's I/O, thereby improving bandwidth and latency for
  1920. * bfqq. Note that these same results may be achieved with the general
  1921. * injection mechanism, but less effectively. For details on this
  1922. * aspect, see the comments on the choice of the queue for injection
  1923. * in bfq_select_queue().
  1924. *
  1925. * Turning back to the detection of a waker queue, a queue Q is deemed as a
  1926. * waker queue for bfqq if, for three consecutive times, bfqq happens to become
  1927. * non empty right after a request of Q has been completed within given
  1928. * timeout. In this respect, even if bfqq is empty, we do not check for a waker
  1929. * if it still has some in-flight I/O. In fact, in this case bfqq is actually
  1930. * still being served by the drive, and may receive new I/O on the completion
  1931. * of some of the in-flight requests. In particular, on the first time, Q is
  1932. * tentatively set as a candidate waker queue, while on the third consecutive
  1933. * time that Q is detected, the field waker_bfqq is set to Q, to confirm that Q
  1934. * is a waker queue for bfqq. These detection steps are performed only if bfqq
  1935. * has a long think time, so as to make it more likely that bfqq's I/O is
  1936. * actually being blocked by a synchronization. This last filter, plus the
  1937. * above three-times requirement and time limit for detection, make false
  1938. * positives less likely.
  1939. *
  1940. * NOTE
  1941. *
  1942. * The sooner a waker queue is detected, the sooner throughput can be
  1943. * boosted by injecting I/O from the waker queue. Fortunately,
  1944. * detection is likely to be actually fast, for the following
  1945. * reasons. While blocked by synchronization, bfqq has a long think
  1946. * time. This implies that bfqq's inject limit is at least equal to 1
  1947. * (see the comments in bfq_update_inject_limit()). So, thanks to
  1948. * injection, the waker queue is likely to be served during the very
  1949. * first I/O-plugging time interval for bfqq. This triggers the first
  1950. * step of the detection mechanism. Thanks again to injection, the
  1951. * candidate waker queue is then likely to be confirmed no later than
  1952. * during the next I/O-plugging interval for bfqq.
  1953. *
  1954. * ISSUE
  1955. *
  1956. * On queue merging all waker information is lost.
  1957. */
  1958. static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  1959. u64 now_ns)
  1960. {
  1961. char waker_name[MAX_BFQQ_NAME_LENGTH];
  1962. if (!bfqd->last_completed_rq_bfqq ||
  1963. bfqd->last_completed_rq_bfqq == bfqq ||
  1964. bfq_bfqq_has_short_ttime(bfqq) ||
  1965. now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC ||
  1966. bfqd->last_completed_rq_bfqq == &bfqd->oom_bfqq ||
  1967. bfqq == &bfqd->oom_bfqq)
  1968. return;
  1969. /*
  1970. * We reset waker detection logic also if too much time has passed
  1971. * since the first detection. If wakeups are rare, pointless idling
  1972. * doesn't hurt throughput that much. The condition below makes sure
  1973. * we do not uselessly idle blocking waker in more than 1/64 cases.
  1974. */
  1975. if (bfqd->last_completed_rq_bfqq !=
  1976. bfqq->tentative_waker_bfqq ||
  1977. now_ns > bfqq->waker_detection_started +
  1978. 128 * (u64)bfqd->bfq_slice_idle) {
  1979. /*
  1980. * First synchronization detected with a
  1981. * candidate waker queue, or with a different
  1982. * candidate waker queue from the current one.
  1983. */
  1984. bfqq->tentative_waker_bfqq =
  1985. bfqd->last_completed_rq_bfqq;
  1986. bfqq->num_waker_detections = 1;
  1987. bfqq->waker_detection_started = now_ns;
  1988. bfq_bfqq_name(bfqq->tentative_waker_bfqq, waker_name,
  1989. MAX_BFQQ_NAME_LENGTH);
  1990. bfq_log_bfqq(bfqd, bfqq, "set tentative waker %s", waker_name);
  1991. } else /* Same tentative waker queue detected again */
  1992. bfqq->num_waker_detections++;
  1993. if (bfqq->num_waker_detections == 3) {
  1994. bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq;
  1995. bfqq->tentative_waker_bfqq = NULL;
  1996. bfq_bfqq_name(bfqq->waker_bfqq, waker_name,
  1997. MAX_BFQQ_NAME_LENGTH);
  1998. bfq_log_bfqq(bfqd, bfqq, "set waker %s", waker_name);
  1999. /*
  2000. * If the waker queue disappears, then
  2001. * bfqq->waker_bfqq must be reset. To
  2002. * this goal, we maintain in each
  2003. * waker queue a list, woken_list, of
  2004. * all the queues that reference the
  2005. * waker queue through their
  2006. * waker_bfqq pointer. When the waker
  2007. * queue exits, the waker_bfqq pointer
  2008. * of all the queues in the woken_list
  2009. * is reset.
  2010. *
  2011. * In addition, if bfqq is already in
  2012. * the woken_list of a waker queue,
  2013. * then, before being inserted into
  2014. * the woken_list of a new waker
  2015. * queue, bfqq must be removed from
  2016. * the woken_list of the old waker
  2017. * queue.
  2018. */
  2019. if (!hlist_unhashed(&bfqq->woken_list_node))
  2020. hlist_del_init(&bfqq->woken_list_node);
  2021. hlist_add_head(&bfqq->woken_list_node,
  2022. &bfqd->last_completed_rq_bfqq->woken_list);
  2023. }
  2024. }
  2025. static void bfq_add_request(struct request *rq)
  2026. {
  2027. struct bfq_queue *bfqq = RQ_BFQQ(rq);
  2028. struct bfq_data *bfqd = bfqq->bfqd;
  2029. struct request *next_rq, *prev;
  2030. unsigned int old_wr_coeff = bfqq->wr_coeff;
  2031. bool interactive = false;
  2032. u64 now_ns = blk_time_get_ns();
  2033. bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
  2034. bfqq->queued[rq_is_sync(rq)]++;
  2035. /*
  2036. * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it
  2037. * may be read without holding the lock in bfq_has_work().
  2038. */
  2039. WRITE_ONCE(bfqd->queued, bfqd->queued + 1);
  2040. if (bfq_bfqq_sync(bfqq) && RQ_BIC(rq)->requests <= 1) {
  2041. bfq_check_waker(bfqd, bfqq, now_ns);
  2042. /*
  2043. * Periodically reset inject limit, to make sure that
  2044. * the latter eventually drops in case workload
  2045. * changes, see step (3) in the comments on
  2046. * bfq_update_inject_limit().
  2047. */
  2048. if (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
  2049. msecs_to_jiffies(1000)))
  2050. bfq_reset_inject_limit(bfqd, bfqq);
  2051. /*
  2052. * The following conditions must hold to setup a new
  2053. * sampling of total service time, and then a new
  2054. * update of the inject limit:
  2055. * - bfqq is in service, because the total service
  2056. * time is evaluated only for the I/O requests of
  2057. * the queues in service;
  2058. * - this is the right occasion to compute or to
  2059. * lower the baseline total service time, because
  2060. * there are actually no requests in the drive,
  2061. * or
  2062. * the baseline total service time is available, and
  2063. * this is the right occasion to compute the other
  2064. * quantity needed to update the inject limit, i.e.,
  2065. * the total service time caused by the amount of
  2066. * injection allowed by the current value of the
  2067. * limit. It is the right occasion because injection
  2068. * has actually been performed during the service
  2069. * hole, and there are still in-flight requests,
  2070. * which are very likely to be exactly the injected
  2071. * requests, or part of them;
  2072. * - the minimum interval for sampling the total
  2073. * service time and updating the inject limit has
  2074. * elapsed.
  2075. */
  2076. if (bfqq == bfqd->in_service_queue &&
  2077. (bfqd->tot_rq_in_driver == 0 ||
  2078. (bfqq->last_serv_time_ns > 0 &&
  2079. bfqd->rqs_injected && bfqd->tot_rq_in_driver > 0)) &&
  2080. time_is_before_eq_jiffies(bfqq->decrease_time_jif +
  2081. msecs_to_jiffies(10))) {
  2082. bfqd->last_empty_occupied_ns = blk_time_get_ns();
  2083. /*
  2084. * Start the state machine for measuring the
  2085. * total service time of rq: setting
  2086. * wait_dispatch will cause bfqd->waited_rq to
  2087. * be set when rq will be dispatched.
  2088. */
  2089. bfqd->wait_dispatch = true;
  2090. /*
  2091. * If there is no I/O in service in the drive,
  2092. * then possible injection occurred before the
  2093. * arrival of rq will not affect the total
  2094. * service time of rq. So the injection limit
  2095. * must not be updated as a function of such
  2096. * total service time, unless new injection
  2097. * occurs before rq is completed. To have the
  2098. * injection limit updated only in the latter
  2099. * case, reset rqs_injected here (rqs_injected
  2100. * will be set in case injection is performed
  2101. * on bfqq before rq is completed).
  2102. */
  2103. if (bfqd->tot_rq_in_driver == 0)
  2104. bfqd->rqs_injected = false;
  2105. }
  2106. }
  2107. if (bfq_bfqq_sync(bfqq))
  2108. bfq_update_io_intensity(bfqq, now_ns);
  2109. elv_rb_add(&bfqq->sort_list, rq);
  2110. /*
  2111. * Check if this request is a better next-serve candidate.
  2112. */
  2113. prev = bfqq->next_rq;
  2114. next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
  2115. bfqq->next_rq = next_rq;
  2116. /*
  2117. * Adjust priority tree position, if next_rq changes.
  2118. * See comments on bfq_pos_tree_add_move() for the unlikely().
  2119. */
  2120. if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
  2121. bfq_pos_tree_add_move(bfqd, bfqq);
  2122. if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
  2123. bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
  2124. rq, &interactive);
  2125. else {
  2126. if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
  2127. time_is_before_jiffies(
  2128. bfqq->last_wr_start_finish +
  2129. bfqd->bfq_wr_min_inter_arr_async)) {
  2130. bfqq->wr_coeff = bfqd->bfq_wr_coeff;
  2131. bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
  2132. bfqd->wr_busy_queues++;
  2133. bfqq->entity.prio_changed = 1;
  2134. }
  2135. if (prev != bfqq->next_rq)
  2136. bfq_updated_next_req(bfqd, bfqq);
  2137. }
  2138. /*
  2139. * Assign jiffies to last_wr_start_finish in the following
  2140. * cases:
  2141. *
  2142. * . if bfqq is not going to be weight-raised, because, for
  2143. * non weight-raised queues, last_wr_start_finish stores the
  2144. * arrival time of the last request; as of now, this piece
  2145. * of information is used only for deciding whether to
  2146. * weight-raise async queues
  2147. *
  2148. * . if bfqq is not weight-raised, because, if bfqq is now
  2149. * switching to weight-raised, then last_wr_start_finish
  2150. * stores the time when weight-raising starts
  2151. *
  2152. * . if bfqq is interactive, because, regardless of whether
  2153. * bfqq is currently weight-raised, the weight-raising
  2154. * period must start or restart (this case is considered
  2155. * separately because it is not detected by the above
  2156. * conditions, if bfqq is already weight-raised)
  2157. *
  2158. * last_wr_start_finish has to be updated also if bfqq is soft
  2159. * real-time, because the weight-raising period is constantly
  2160. * restarted on idle-to-busy transitions for these queues, but
  2161. * this is already done in bfq_bfqq_handle_idle_busy_switch if
  2162. * needed.
  2163. */
  2164. if (bfqd->low_latency &&
  2165. (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
  2166. bfqq->last_wr_start_finish = jiffies;
  2167. }
  2168. static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
  2169. struct bio *bio,
  2170. struct request_queue *q)
  2171. {
  2172. struct bfq_queue *bfqq = bfqd->bio_bfqq;
  2173. if (bfqq)
  2174. return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
  2175. return NULL;
  2176. }
  2177. static sector_t get_sdist(sector_t last_pos, struct request *rq)
  2178. {
  2179. if (last_pos)
  2180. return abs(blk_rq_pos(rq) - last_pos);
  2181. return 0;
  2182. }
  2183. static void bfq_remove_request(struct request_queue *q,
  2184. struct request *rq)
  2185. {
  2186. struct bfq_queue *bfqq = RQ_BFQQ(rq);
  2187. struct bfq_data *bfqd = bfqq->bfqd;
  2188. const int sync = rq_is_sync(rq);
  2189. if (bfqq->next_rq == rq) {
  2190. bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
  2191. bfq_updated_next_req(bfqd, bfqq);
  2192. }
  2193. if (rq->queuelist.prev != &rq->queuelist)
  2194. list_del_init(&rq->queuelist);
  2195. bfqq->queued[sync]--;
  2196. /*
  2197. * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it
  2198. * may be read without holding the lock in bfq_has_work().
  2199. */
  2200. WRITE_ONCE(bfqd->queued, bfqd->queued - 1);
  2201. elv_rb_del(&bfqq->sort_list, rq);
  2202. elv_rqhash_del(q, rq);
  2203. if (q->last_merge == rq)
  2204. q->last_merge = NULL;
  2205. if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
  2206. bfqq->next_rq = NULL;
  2207. if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
  2208. bfq_del_bfqq_busy(bfqq, false);
  2209. /*
  2210. * bfqq emptied. In normal operation, when
  2211. * bfqq is empty, bfqq->entity.service and
  2212. * bfqq->entity.budget must contain,
  2213. * respectively, the service received and the
  2214. * budget used last time bfqq emptied. These
  2215. * facts do not hold in this case, as at least
  2216. * this last removal occurred while bfqq is
  2217. * not in service. To avoid inconsistencies,
  2218. * reset both bfqq->entity.service and
  2219. * bfqq->entity.budget, if bfqq has still a
  2220. * process that may issue I/O requests to it.
  2221. */
  2222. bfqq->entity.budget = bfqq->entity.service = 0;
  2223. }
  2224. /*
  2225. * Remove queue from request-position tree as it is empty.
  2226. */
  2227. if (bfqq->pos_root) {
  2228. rb_erase(&bfqq->pos_node, bfqq->pos_root);
  2229. bfqq->pos_root = NULL;
  2230. }
  2231. } else {
  2232. /* see comments on bfq_pos_tree_add_move() for the unlikely() */
  2233. if (unlikely(!bfqd->nonrot_with_queueing))
  2234. bfq_pos_tree_add_move(bfqd, bfqq);
  2235. }
  2236. if (rq->cmd_flags & REQ_META)
  2237. bfqq->meta_pending--;
  2238. }
  2239. static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
  2240. unsigned int nr_segs)
  2241. {
  2242. struct bfq_data *bfqd = q->elevator->elevator_data;
  2243. struct request *free = NULL;
  2244. /*
  2245. * bfq_bic_lookup grabs the queue_lock: invoke it now and
  2246. * store its return value for later use, to avoid nesting
  2247. * queue_lock inside the bfqd->lock. We assume that the bic
  2248. * returned by bfq_bic_lookup does not go away before
  2249. * bfqd->lock is taken.
  2250. */
  2251. struct bfq_io_cq *bic = bfq_bic_lookup(q);
  2252. bool ret;
  2253. spin_lock_irq(&bfqd->lock);
  2254. if (bic) {
  2255. /*
  2256. * Make sure cgroup info is uptodate for current process before
  2257. * considering the merge.
  2258. */
  2259. bfq_bic_update_cgroup(bic, bio);
  2260. bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf),
  2261. bfq_actuator_index(bfqd, bio));
  2262. } else {
  2263. bfqd->bio_bfqq = NULL;
  2264. }
  2265. bfqd->bio_bic = bic;
  2266. ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
  2267. spin_unlock_irq(&bfqd->lock);
  2268. if (free)
  2269. blk_mq_free_request(free);
  2270. return ret;
  2271. }
  2272. static int bfq_request_merge(struct request_queue *q, struct request **req,
  2273. struct bio *bio)
  2274. {
  2275. struct bfq_data *bfqd = q->elevator->elevator_data;
  2276. struct request *__rq;
  2277. __rq = bfq_find_rq_fmerge(bfqd, bio, q);
  2278. if (__rq && elv_bio_merge_ok(__rq, bio)) {
  2279. *req = __rq;
  2280. if (blk_discard_mergable(__rq))
  2281. return ELEVATOR_DISCARD_MERGE;
  2282. return ELEVATOR_FRONT_MERGE;
  2283. }
  2284. return ELEVATOR_NO_MERGE;
  2285. }
  2286. static void bfq_request_merged(struct request_queue *q, struct request *req,
  2287. enum elv_merge type)
  2288. {
  2289. if (type == ELEVATOR_FRONT_MERGE &&
  2290. rb_prev(&req->rb_node) &&
  2291. blk_rq_pos(req) <
  2292. blk_rq_pos(container_of(rb_prev(&req->rb_node),
  2293. struct request, rb_node))) {
  2294. struct bfq_queue *bfqq = RQ_BFQQ(req);
  2295. struct bfq_data *bfqd;
  2296. struct request *prev, *next_rq;
  2297. if (!bfqq)
  2298. return;
  2299. bfqd = bfqq->bfqd;
  2300. /* Reposition request in its sort_list */
  2301. elv_rb_del(&bfqq->sort_list, req);
  2302. elv_rb_add(&bfqq->sort_list, req);
  2303. /* Choose next request to be served for bfqq */
  2304. prev = bfqq->next_rq;
  2305. next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
  2306. bfqd->last_position);
  2307. bfqq->next_rq = next_rq;
  2308. /*
  2309. * If next_rq changes, update both the queue's budget to
  2310. * fit the new request and the queue's position in its
  2311. * rq_pos_tree.
  2312. */
  2313. if (prev != bfqq->next_rq) {
  2314. bfq_updated_next_req(bfqd, bfqq);
  2315. /*
  2316. * See comments on bfq_pos_tree_add_move() for
  2317. * the unlikely().
  2318. */
  2319. if (unlikely(!bfqd->nonrot_with_queueing))
  2320. bfq_pos_tree_add_move(bfqd, bfqq);
  2321. }
  2322. }
  2323. }
  2324. /*
  2325. * This function is called to notify the scheduler that the requests
  2326. * rq and 'next' have been merged, with 'next' going away. BFQ
  2327. * exploits this hook to address the following issue: if 'next' has a
  2328. * fifo_time lower that rq, then the fifo_time of rq must be set to
  2329. * the value of 'next', to not forget the greater age of 'next'.
  2330. *
  2331. * NOTE: in this function we assume that rq is in a bfq_queue, basing
  2332. * on that rq is picked from the hash table q->elevator->hash, which,
  2333. * in its turn, is filled only with I/O requests present in
  2334. * bfq_queues, while BFQ is in use for the request queue q. In fact,
  2335. * the function that fills this hash table (elv_rqhash_add) is called
  2336. * only by bfq_insert_request.
  2337. */
  2338. static void bfq_requests_merged(struct request_queue *q, struct request *rq,
  2339. struct request *next)
  2340. {
  2341. struct bfq_queue *bfqq = RQ_BFQQ(rq),
  2342. *next_bfqq = RQ_BFQQ(next);
  2343. if (!bfqq)
  2344. goto remove;
  2345. /*
  2346. * If next and rq belong to the same bfq_queue and next is older
  2347. * than rq, then reposition rq in the fifo (by substituting next
  2348. * with rq). Otherwise, if next and rq belong to different
  2349. * bfq_queues, never reposition rq: in fact, we would have to
  2350. * reposition it with respect to next's position in its own fifo,
  2351. * which would most certainly be too expensive with respect to
  2352. * the benefits.
  2353. */
  2354. if (bfqq == next_bfqq &&
  2355. !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
  2356. next->fifo_time < rq->fifo_time) {
  2357. list_del_init(&rq->queuelist);
  2358. list_replace_init(&next->queuelist, &rq->queuelist);
  2359. rq->fifo_time = next->fifo_time;
  2360. }
  2361. if (bfqq->next_rq == next)
  2362. bfqq->next_rq = rq;
  2363. bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
  2364. remove:
  2365. /* Merged request may be in the IO scheduler. Remove it. */
  2366. if (!RB_EMPTY_NODE(&next->rb_node)) {
  2367. bfq_remove_request(next->q, next);
  2368. if (next_bfqq)
  2369. bfqg_stats_update_io_remove(bfqq_group(next_bfqq),
  2370. next->cmd_flags);
  2371. }
  2372. }
  2373. /* Must be called with bfqq != NULL */
  2374. static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
  2375. {
  2376. /*
  2377. * If bfqq has been enjoying interactive weight-raising, then
  2378. * reset soft_rt_next_start. We do it for the following
  2379. * reason. bfqq may have been conveying the I/O needed to load
  2380. * a soft real-time application. Such an application actually
  2381. * exhibits a soft real-time I/O pattern after it finishes
  2382. * loading, and finally starts doing its job. But, if bfqq has
  2383. * been receiving a lot of bandwidth so far (likely to happen
  2384. * on a fast device), then soft_rt_next_start now contains a
  2385. * high value that. So, without this reset, bfqq would be
  2386. * prevented from being possibly considered as soft_rt for a
  2387. * very long time.
  2388. */
  2389. if (bfqq->wr_cur_max_time !=
  2390. bfqq->bfqd->bfq_wr_rt_max_time)
  2391. bfqq->soft_rt_next_start = jiffies;
  2392. if (bfq_bfqq_busy(bfqq))
  2393. bfqq->bfqd->wr_busy_queues--;
  2394. bfqq->wr_coeff = 1;
  2395. bfqq->wr_cur_max_time = 0;
  2396. bfqq->last_wr_start_finish = jiffies;
  2397. /*
  2398. * Trigger a weight change on the next invocation of
  2399. * __bfq_entity_update_weight_prio.
  2400. */
  2401. bfqq->entity.prio_changed = 1;
  2402. }
  2403. void bfq_end_wr_async_queues(struct bfq_data *bfqd,
  2404. struct bfq_group *bfqg)
  2405. {
  2406. int i, j, k;
  2407. for (k = 0; k < bfqd->num_actuators; k++) {
  2408. for (i = 0; i < 2; i++)
  2409. for (j = 0; j < IOPRIO_NR_LEVELS; j++)
  2410. if (bfqg->async_bfqq[i][j][k])
  2411. bfq_bfqq_end_wr(bfqg->async_bfqq[i][j][k]);
  2412. if (bfqg->async_idle_bfqq[k])
  2413. bfq_bfqq_end_wr(bfqg->async_idle_bfqq[k]);
  2414. }
  2415. }
  2416. static void bfq_end_wr(struct bfq_data *bfqd)
  2417. {
  2418. struct bfq_queue *bfqq;
  2419. int i;
  2420. spin_lock_irq(&bfqd->lock);
  2421. for (i = 0; i < bfqd->num_actuators; i++) {
  2422. list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list)
  2423. bfq_bfqq_end_wr(bfqq);
  2424. }
  2425. list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
  2426. bfq_bfqq_end_wr(bfqq);
  2427. bfq_end_wr_async(bfqd);
  2428. spin_unlock_irq(&bfqd->lock);
  2429. }
  2430. static sector_t bfq_io_struct_pos(void *io_struct, bool request)
  2431. {
  2432. if (request)
  2433. return blk_rq_pos(io_struct);
  2434. else
  2435. return ((struct bio *)io_struct)->bi_iter.bi_sector;
  2436. }
  2437. static int bfq_rq_close_to_sector(void *io_struct, bool request,
  2438. sector_t sector)
  2439. {
  2440. return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
  2441. BFQQ_CLOSE_THR;
  2442. }
  2443. static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
  2444. struct bfq_queue *bfqq,
  2445. sector_t sector)
  2446. {
  2447. struct rb_root *root = &bfqq_group(bfqq)->rq_pos_tree;
  2448. struct rb_node *parent, *node;
  2449. struct bfq_queue *__bfqq;
  2450. if (RB_EMPTY_ROOT(root))
  2451. return NULL;
  2452. /*
  2453. * First, if we find a request starting at the end of the last
  2454. * request, choose it.
  2455. */
  2456. __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
  2457. if (__bfqq)
  2458. return __bfqq;
  2459. /*
  2460. * If the exact sector wasn't found, the parent of the NULL leaf
  2461. * will contain the closest sector (rq_pos_tree sorted by
  2462. * next_request position).
  2463. */
  2464. __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
  2465. if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
  2466. return __bfqq;
  2467. if (blk_rq_pos(__bfqq->next_rq) < sector)
  2468. node = rb_next(&__bfqq->pos_node);
  2469. else
  2470. node = rb_prev(&__bfqq->pos_node);
  2471. if (!node)
  2472. return NULL;
  2473. __bfqq = rb_entry(node, struct bfq_queue, pos_node);
  2474. if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
  2475. return __bfqq;
  2476. return NULL;
  2477. }
  2478. static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
  2479. struct bfq_queue *cur_bfqq,
  2480. sector_t sector)
  2481. {
  2482. struct bfq_queue *bfqq;
  2483. /*
  2484. * We shall notice if some of the queues are cooperating,
  2485. * e.g., working closely on the same area of the device. In
  2486. * that case, we can group them together and: 1) don't waste
  2487. * time idling, and 2) serve the union of their requests in
  2488. * the best possible order for throughput.
  2489. */
  2490. bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
  2491. if (!bfqq || bfqq == cur_bfqq)
  2492. return NULL;
  2493. return bfqq;
  2494. }
  2495. static struct bfq_queue *
  2496. bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
  2497. {
  2498. int process_refs, new_process_refs;
  2499. struct bfq_queue *__bfqq;
  2500. /*
  2501. * If there are no process references on the new_bfqq, then it is
  2502. * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
  2503. * may have dropped their last reference (not just their last process
  2504. * reference).
  2505. */
  2506. if (!bfqq_process_refs(new_bfqq))
  2507. return NULL;
  2508. /* Avoid a circular list and skip interim queue merges. */
  2509. while ((__bfqq = new_bfqq->new_bfqq)) {
  2510. if (__bfqq == bfqq)
  2511. return NULL;
  2512. new_bfqq = __bfqq;
  2513. }
  2514. process_refs = bfqq_process_refs(bfqq);
  2515. new_process_refs = bfqq_process_refs(new_bfqq);
  2516. /*
  2517. * If the process for the bfqq has gone away, there is no
  2518. * sense in merging the queues.
  2519. */
  2520. if (process_refs == 0 || new_process_refs == 0)
  2521. return NULL;
  2522. /*
  2523. * Make sure merged queues belong to the same parent. Parents could
  2524. * have changed since the time we decided the two queues are suitable
  2525. * for merging.
  2526. */
  2527. if (new_bfqq->entity.parent != bfqq->entity.parent)
  2528. return NULL;
  2529. bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
  2530. new_bfqq->pid);
  2531. /*
  2532. * Merging is just a redirection: the requests of the process
  2533. * owning one of the two queues are redirected to the other queue.
  2534. * The latter queue, in its turn, is set as shared if this is the
  2535. * first time that the requests of some process are redirected to
  2536. * it.
  2537. *
  2538. * We redirect bfqq to new_bfqq and not the opposite, because
  2539. * we are in the context of the process owning bfqq, thus we
  2540. * have the io_cq of this process. So we can immediately
  2541. * configure this io_cq to redirect the requests of the
  2542. * process to new_bfqq. In contrast, the io_cq of new_bfqq is
  2543. * not available any more (new_bfqq->bic == NULL).
  2544. *
  2545. * Anyway, even in case new_bfqq coincides with the in-service
  2546. * queue, redirecting requests the in-service queue is the
  2547. * best option, as we feed the in-service queue with new
  2548. * requests close to the last request served and, by doing so,
  2549. * are likely to increase the throughput.
  2550. */
  2551. bfqq->new_bfqq = new_bfqq;
  2552. /*
  2553. * The above assignment schedules the following redirections:
  2554. * each time some I/O for bfqq arrives, the process that
  2555. * generated that I/O is disassociated from bfqq and
  2556. * associated with new_bfqq. Here we increases new_bfqq->ref
  2557. * in advance, adding the number of processes that are
  2558. * expected to be associated with new_bfqq as they happen to
  2559. * issue I/O.
  2560. */
  2561. new_bfqq->ref += process_refs;
  2562. return new_bfqq;
  2563. }
  2564. static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
  2565. struct bfq_queue *new_bfqq)
  2566. {
  2567. if (bfq_too_late_for_merging(new_bfqq))
  2568. return false;
  2569. if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
  2570. (bfqq->ioprio_class != new_bfqq->ioprio_class))
  2571. return false;
  2572. /*
  2573. * If either of the queues has already been detected as seeky,
  2574. * then merging it with the other queue is unlikely to lead to
  2575. * sequential I/O.
  2576. */
  2577. if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
  2578. return false;
  2579. /*
  2580. * Interleaved I/O is known to be done by (some) applications
  2581. * only for reads, so it does not make sense to merge async
  2582. * queues.
  2583. */
  2584. if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
  2585. return false;
  2586. return true;
  2587. }
  2588. static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
  2589. struct bfq_queue *bfqq);
  2590. static struct bfq_queue *
  2591. bfq_setup_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  2592. struct bfq_queue *stable_merge_bfqq,
  2593. struct bfq_iocq_bfqq_data *bfqq_data)
  2594. {
  2595. int proc_ref = min(bfqq_process_refs(bfqq),
  2596. bfqq_process_refs(stable_merge_bfqq));
  2597. struct bfq_queue *new_bfqq = NULL;
  2598. bfqq_data->stable_merge_bfqq = NULL;
  2599. if (idling_boosts_thr_without_issues(bfqd, bfqq) || proc_ref == 0)
  2600. goto out;
  2601. /* next function will take at least one ref */
  2602. new_bfqq = bfq_setup_merge(bfqq, stable_merge_bfqq);
  2603. if (new_bfqq) {
  2604. bfqq_data->stably_merged = true;
  2605. if (new_bfqq->bic) {
  2606. unsigned int new_a_idx = new_bfqq->actuator_idx;
  2607. struct bfq_iocq_bfqq_data *new_bfqq_data =
  2608. &new_bfqq->bic->bfqq_data[new_a_idx];
  2609. new_bfqq_data->stably_merged = true;
  2610. }
  2611. }
  2612. out:
  2613. /* deschedule stable merge, because done or aborted here */
  2614. bfq_put_stable_ref(stable_merge_bfqq);
  2615. return new_bfqq;
  2616. }
  2617. /*
  2618. * Attempt to schedule a merge of bfqq with the currently in-service
  2619. * queue or with a close queue among the scheduled queues. Return
  2620. * NULL if no merge was scheduled, a pointer to the shared bfq_queue
  2621. * structure otherwise.
  2622. *
  2623. * The OOM queue is not allowed to participate to cooperation: in fact, since
  2624. * the requests temporarily redirected to the OOM queue could be redirected
  2625. * again to dedicated queues at any time, the state needed to correctly
  2626. * handle merging with the OOM queue would be quite complex and expensive
  2627. * to maintain. Besides, in such a critical condition as an out of memory,
  2628. * the benefits of queue merging may be little relevant, or even negligible.
  2629. *
  2630. * WARNING: queue merging may impair fairness among non-weight raised
  2631. * queues, for at least two reasons: 1) the original weight of a
  2632. * merged queue may change during the merged state, 2) even being the
  2633. * weight the same, a merged queue may be bloated with many more
  2634. * requests than the ones produced by its originally-associated
  2635. * process.
  2636. */
  2637. static struct bfq_queue *
  2638. bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  2639. void *io_struct, bool request, struct bfq_io_cq *bic)
  2640. {
  2641. struct bfq_queue *in_service_bfqq, *new_bfqq;
  2642. unsigned int a_idx = bfqq->actuator_idx;
  2643. struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx];
  2644. /* if a merge has already been setup, then proceed with that first */
  2645. new_bfqq = bfqq->new_bfqq;
  2646. if (new_bfqq) {
  2647. while (new_bfqq->new_bfqq)
  2648. new_bfqq = new_bfqq->new_bfqq;
  2649. return new_bfqq;
  2650. }
  2651. /*
  2652. * Check delayed stable merge for rotational or non-queueing
  2653. * devs. For this branch to be executed, bfqq must not be
  2654. * currently merged with some other queue (i.e., bfqq->bic
  2655. * must be non null). If we considered also merged queues,
  2656. * then we should also check whether bfqq has already been
  2657. * merged with bic->stable_merge_bfqq. But this would be
  2658. * costly and complicated.
  2659. */
  2660. if (unlikely(!bfqd->nonrot_with_queueing)) {
  2661. /*
  2662. * Make sure also that bfqq is sync, because
  2663. * bic->stable_merge_bfqq may point to some queue (for
  2664. * stable merging) also if bic is associated with a
  2665. * sync queue, but this bfqq is async
  2666. */
  2667. if (bfq_bfqq_sync(bfqq) && bfqq_data->stable_merge_bfqq &&
  2668. !bfq_bfqq_just_created(bfqq) &&
  2669. time_is_before_jiffies(bfqq->split_time +
  2670. msecs_to_jiffies(bfq_late_stable_merging)) &&
  2671. time_is_before_jiffies(bfqq->creation_time +
  2672. msecs_to_jiffies(bfq_late_stable_merging))) {
  2673. struct bfq_queue *stable_merge_bfqq =
  2674. bfqq_data->stable_merge_bfqq;
  2675. return bfq_setup_stable_merge(bfqd, bfqq,
  2676. stable_merge_bfqq,
  2677. bfqq_data);
  2678. }
  2679. }
  2680. /*
  2681. * Do not perform queue merging if the device is non
  2682. * rotational and performs internal queueing. In fact, such a
  2683. * device reaches a high speed through internal parallelism
  2684. * and pipelining. This means that, to reach a high
  2685. * throughput, it must have many requests enqueued at the same
  2686. * time. But, in this configuration, the internal scheduling
  2687. * algorithm of the device does exactly the job of queue
  2688. * merging: it reorders requests so as to obtain as much as
  2689. * possible a sequential I/O pattern. As a consequence, with
  2690. * the workload generated by processes doing interleaved I/O,
  2691. * the throughput reached by the device is likely to be the
  2692. * same, with and without queue merging.
  2693. *
  2694. * Disabling merging also provides a remarkable benefit in
  2695. * terms of throughput. Merging tends to make many workloads
  2696. * artificially more uneven, because of shared queues
  2697. * remaining non empty for incomparably more time than
  2698. * non-merged queues. This may accentuate workload
  2699. * asymmetries. For example, if one of the queues in a set of
  2700. * merged queues has a higher weight than a normal queue, then
  2701. * the shared queue may inherit such a high weight and, by
  2702. * staying almost always active, may force BFQ to perform I/O
  2703. * plugging most of the time. This evidently makes it harder
  2704. * for BFQ to let the device reach a high throughput.
  2705. *
  2706. * Finally, the likely() macro below is not used because one
  2707. * of the two branches is more likely than the other, but to
  2708. * have the code path after the following if() executed as
  2709. * fast as possible for the case of a non rotational device
  2710. * with queueing. We want it because this is the fastest kind
  2711. * of device. On the opposite end, the likely() may lengthen
  2712. * the execution time of BFQ for the case of slower devices
  2713. * (rotational or at least without queueing). But in this case
  2714. * the execution time of BFQ matters very little, if not at
  2715. * all.
  2716. */
  2717. if (likely(bfqd->nonrot_with_queueing))
  2718. return NULL;
  2719. /*
  2720. * Prevent bfqq from being merged if it has been created too
  2721. * long ago. The idea is that true cooperating processes, and
  2722. * thus their associated bfq_queues, are supposed to be
  2723. * created shortly after each other. This is the case, e.g.,
  2724. * for KVM/QEMU and dump I/O threads. Basing on this
  2725. * assumption, the following filtering greatly reduces the
  2726. * probability that two non-cooperating processes, which just
  2727. * happen to do close I/O for some short time interval, have
  2728. * their queues merged by mistake.
  2729. */
  2730. if (bfq_too_late_for_merging(bfqq))
  2731. return NULL;
  2732. if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
  2733. return NULL;
  2734. /* If there is only one backlogged queue, don't search. */
  2735. if (bfq_tot_busy_queues(bfqd) == 1)
  2736. return NULL;
  2737. in_service_bfqq = bfqd->in_service_queue;
  2738. if (in_service_bfqq && in_service_bfqq != bfqq &&
  2739. likely(in_service_bfqq != &bfqd->oom_bfqq) &&
  2740. bfq_rq_close_to_sector(io_struct, request,
  2741. bfqd->in_serv_last_pos) &&
  2742. bfqq->entity.parent == in_service_bfqq->entity.parent &&
  2743. bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
  2744. new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
  2745. if (new_bfqq)
  2746. return new_bfqq;
  2747. }
  2748. /*
  2749. * Check whether there is a cooperator among currently scheduled
  2750. * queues. The only thing we need is that the bio/request is not
  2751. * NULL, as we need it to establish whether a cooperator exists.
  2752. */
  2753. new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
  2754. bfq_io_struct_pos(io_struct, request));
  2755. if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
  2756. bfq_may_be_close_cooperator(bfqq, new_bfqq))
  2757. return bfq_setup_merge(bfqq, new_bfqq);
  2758. return NULL;
  2759. }
  2760. static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
  2761. {
  2762. struct bfq_io_cq *bic = bfqq->bic;
  2763. unsigned int a_idx = bfqq->actuator_idx;
  2764. struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx];
  2765. /*
  2766. * If !bfqq->bic, the queue is already shared or its requests
  2767. * have already been redirected to a shared queue; both idle window
  2768. * and weight raising state have already been saved. Do nothing.
  2769. */
  2770. if (!bic)
  2771. return;
  2772. bfqq_data->saved_last_serv_time_ns = bfqq->last_serv_time_ns;
  2773. bfqq_data->saved_inject_limit = bfqq->inject_limit;
  2774. bfqq_data->saved_decrease_time_jif = bfqq->decrease_time_jif;
  2775. bfqq_data->saved_weight = bfqq->entity.orig_weight;
  2776. bfqq_data->saved_ttime = bfqq->ttime;
  2777. bfqq_data->saved_has_short_ttime =
  2778. bfq_bfqq_has_short_ttime(bfqq);
  2779. bfqq_data->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
  2780. bfqq_data->saved_io_start_time = bfqq->io_start_time;
  2781. bfqq_data->saved_tot_idle_time = bfqq->tot_idle_time;
  2782. bfqq_data->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
  2783. bfqq_data->was_in_burst_list =
  2784. !hlist_unhashed(&bfqq->burst_list_node);
  2785. if (unlikely(bfq_bfqq_just_created(bfqq) &&
  2786. !bfq_bfqq_in_large_burst(bfqq) &&
  2787. bfqq->bfqd->low_latency)) {
  2788. /*
  2789. * bfqq being merged right after being created: bfqq
  2790. * would have deserved interactive weight raising, but
  2791. * did not make it to be set in a weight-raised state,
  2792. * because of this early merge. Store directly the
  2793. * weight-raising state that would have been assigned
  2794. * to bfqq, so that to avoid that bfqq unjustly fails
  2795. * to enjoy weight raising if split soon.
  2796. */
  2797. bfqq_data->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
  2798. bfqq_data->saved_wr_start_at_switch_to_srt =
  2799. bfq_smallest_from_now();
  2800. bfqq_data->saved_wr_cur_max_time =
  2801. bfq_wr_duration(bfqq->bfqd);
  2802. bfqq_data->saved_last_wr_start_finish = jiffies;
  2803. } else {
  2804. bfqq_data->saved_wr_coeff = bfqq->wr_coeff;
  2805. bfqq_data->saved_wr_start_at_switch_to_srt =
  2806. bfqq->wr_start_at_switch_to_srt;
  2807. bfqq_data->saved_service_from_wr =
  2808. bfqq->service_from_wr;
  2809. bfqq_data->saved_last_wr_start_finish =
  2810. bfqq->last_wr_start_finish;
  2811. bfqq_data->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
  2812. }
  2813. }
  2814. void bfq_reassign_last_bfqq(struct bfq_queue *cur_bfqq,
  2815. struct bfq_queue *new_bfqq)
  2816. {
  2817. if (cur_bfqq->entity.parent &&
  2818. cur_bfqq->entity.parent->last_bfqq_created == cur_bfqq)
  2819. cur_bfqq->entity.parent->last_bfqq_created = new_bfqq;
  2820. else if (cur_bfqq->bfqd && cur_bfqq->bfqd->last_bfqq_created == cur_bfqq)
  2821. cur_bfqq->bfqd->last_bfqq_created = new_bfqq;
  2822. }
  2823. void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  2824. {
  2825. /*
  2826. * To prevent bfqq's service guarantees from being violated,
  2827. * bfqq may be left busy, i.e., queued for service, even if
  2828. * empty (see comments in __bfq_bfqq_expire() for
  2829. * details). But, if no process will send requests to bfqq any
  2830. * longer, then there is no point in keeping bfqq queued for
  2831. * service. In addition, keeping bfqq queued for service, but
  2832. * with no process ref any longer, may have caused bfqq to be
  2833. * freed when dequeued from service. But this is assumed to
  2834. * never happen.
  2835. */
  2836. if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
  2837. bfqq != bfqd->in_service_queue)
  2838. bfq_del_bfqq_busy(bfqq, false);
  2839. bfq_reassign_last_bfqq(bfqq, NULL);
  2840. bfq_put_queue(bfqq);
  2841. }
  2842. static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd,
  2843. struct bfq_io_cq *bic,
  2844. struct bfq_queue *bfqq)
  2845. {
  2846. struct bfq_queue *new_bfqq = bfqq->new_bfqq;
  2847. bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
  2848. (unsigned long)new_bfqq->pid);
  2849. /* Save weight raising and idle window of the merged queues */
  2850. bfq_bfqq_save_state(bfqq);
  2851. bfq_bfqq_save_state(new_bfqq);
  2852. if (bfq_bfqq_IO_bound(bfqq))
  2853. bfq_mark_bfqq_IO_bound(new_bfqq);
  2854. bfq_clear_bfqq_IO_bound(bfqq);
  2855. /*
  2856. * The processes associated with bfqq are cooperators of the
  2857. * processes associated with new_bfqq. So, if bfqq has a
  2858. * waker, then assume that all these processes will be happy
  2859. * to let bfqq's waker freely inject I/O when they have no
  2860. * I/O.
  2861. */
  2862. if (bfqq->waker_bfqq && !new_bfqq->waker_bfqq &&
  2863. bfqq->waker_bfqq != new_bfqq) {
  2864. new_bfqq->waker_bfqq = bfqq->waker_bfqq;
  2865. new_bfqq->tentative_waker_bfqq = NULL;
  2866. /*
  2867. * If the waker queue disappears, then
  2868. * new_bfqq->waker_bfqq must be reset. So insert
  2869. * new_bfqq into the woken_list of the waker. See
  2870. * bfq_check_waker for details.
  2871. */
  2872. hlist_add_head(&new_bfqq->woken_list_node,
  2873. &new_bfqq->waker_bfqq->woken_list);
  2874. }
  2875. /*
  2876. * If bfqq is weight-raised, then let new_bfqq inherit
  2877. * weight-raising. To reduce false positives, neglect the case
  2878. * where bfqq has just been created, but has not yet made it
  2879. * to be weight-raised (which may happen because EQM may merge
  2880. * bfqq even before bfq_add_request is executed for the first
  2881. * time for bfqq). Handling this case would however be very
  2882. * easy, thanks to the flag just_created.
  2883. */
  2884. if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
  2885. new_bfqq->wr_coeff = bfqq->wr_coeff;
  2886. new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
  2887. new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
  2888. new_bfqq->wr_start_at_switch_to_srt =
  2889. bfqq->wr_start_at_switch_to_srt;
  2890. if (bfq_bfqq_busy(new_bfqq))
  2891. bfqd->wr_busy_queues++;
  2892. new_bfqq->entity.prio_changed = 1;
  2893. }
  2894. if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
  2895. bfqq->wr_coeff = 1;
  2896. bfqq->entity.prio_changed = 1;
  2897. if (bfq_bfqq_busy(bfqq))
  2898. bfqd->wr_busy_queues--;
  2899. }
  2900. bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
  2901. bfqd->wr_busy_queues);
  2902. /*
  2903. * Merge queues (that is, let bic redirect its requests to new_bfqq)
  2904. */
  2905. bic_set_bfqq(bic, new_bfqq, true, bfqq->actuator_idx);
  2906. bfq_mark_bfqq_coop(new_bfqq);
  2907. /*
  2908. * new_bfqq now belongs to at least two bics (it is a shared queue):
  2909. * set new_bfqq->bic to NULL. bfqq either:
  2910. * - does not belong to any bic any more, and hence bfqq->bic must
  2911. * be set to NULL, or
  2912. * - is a queue whose owning bics have already been redirected to a
  2913. * different queue, hence the queue is destined to not belong to
  2914. * any bic soon and bfqq->bic is already NULL (therefore the next
  2915. * assignment causes no harm).
  2916. */
  2917. new_bfqq->bic = NULL;
  2918. /*
  2919. * If the queue is shared, the pid is the pid of one of the associated
  2920. * processes. Which pid depends on the exact sequence of merge events
  2921. * the queue underwent. So printing such a pid is useless and confusing
  2922. * because it reports a random pid between those of the associated
  2923. * processes.
  2924. * We mark such a queue with a pid -1, and then print SHARED instead of
  2925. * a pid in logging messages.
  2926. */
  2927. new_bfqq->pid = -1;
  2928. bfqq->bic = NULL;
  2929. bfq_reassign_last_bfqq(bfqq, new_bfqq);
  2930. bfq_release_process_ref(bfqd, bfqq);
  2931. return new_bfqq;
  2932. }
  2933. static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
  2934. struct bio *bio)
  2935. {
  2936. struct bfq_data *bfqd = q->elevator->elevator_data;
  2937. bool is_sync = op_is_sync(bio->bi_opf);
  2938. struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
  2939. /*
  2940. * Disallow merge of a sync bio into an async request.
  2941. */
  2942. if (is_sync && !rq_is_sync(rq))
  2943. return false;
  2944. /*
  2945. * Lookup the bfqq that this bio will be queued with. Allow
  2946. * merge only if rq is queued there.
  2947. */
  2948. if (!bfqq)
  2949. return false;
  2950. /*
  2951. * We take advantage of this function to perform an early merge
  2952. * of the queues of possible cooperating processes.
  2953. */
  2954. new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false, bfqd->bio_bic);
  2955. if (new_bfqq) {
  2956. /*
  2957. * bic still points to bfqq, then it has not yet been
  2958. * redirected to some other bfq_queue, and a queue
  2959. * merge between bfqq and new_bfqq can be safely
  2960. * fulfilled, i.e., bic can be redirected to new_bfqq
  2961. * and bfqq can be put.
  2962. */
  2963. while (bfqq != new_bfqq)
  2964. bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq);
  2965. /*
  2966. * Change also bqfd->bio_bfqq, as
  2967. * bfqd->bio_bic now points to new_bfqq, and
  2968. * this function may be invoked again (and then may
  2969. * use again bqfd->bio_bfqq).
  2970. */
  2971. bfqd->bio_bfqq = bfqq;
  2972. }
  2973. return bfqq == RQ_BFQQ(rq);
  2974. }
  2975. /*
  2976. * Set the maximum time for the in-service queue to consume its
  2977. * budget. This prevents seeky processes from lowering the throughput.
  2978. * In practice, a time-slice service scheme is used with seeky
  2979. * processes.
  2980. */
  2981. static void bfq_set_budget_timeout(struct bfq_data *bfqd,
  2982. struct bfq_queue *bfqq)
  2983. {
  2984. unsigned int timeout_coeff;
  2985. if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
  2986. timeout_coeff = 1;
  2987. else
  2988. timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
  2989. bfqd->last_budget_start = blk_time_get();
  2990. bfqq->budget_timeout = jiffies +
  2991. bfqd->bfq_timeout * timeout_coeff;
  2992. }
  2993. static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
  2994. struct bfq_queue *bfqq)
  2995. {
  2996. if (bfqq) {
  2997. bfq_clear_bfqq_fifo_expire(bfqq);
  2998. bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
  2999. if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
  3000. bfqq->wr_coeff > 1 &&
  3001. bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
  3002. time_is_before_jiffies(bfqq->budget_timeout)) {
  3003. /*
  3004. * For soft real-time queues, move the start
  3005. * of the weight-raising period forward by the
  3006. * time the queue has not received any
  3007. * service. Otherwise, a relatively long
  3008. * service delay is likely to cause the
  3009. * weight-raising period of the queue to end,
  3010. * because of the short duration of the
  3011. * weight-raising period of a soft real-time
  3012. * queue. It is worth noting that this move
  3013. * is not so dangerous for the other queues,
  3014. * because soft real-time queues are not
  3015. * greedy.
  3016. *
  3017. * To not add a further variable, we use the
  3018. * overloaded field budget_timeout to
  3019. * determine for how long the queue has not
  3020. * received service, i.e., how much time has
  3021. * elapsed since the queue expired. However,
  3022. * this is a little imprecise, because
  3023. * budget_timeout is set to jiffies if bfqq
  3024. * not only expires, but also remains with no
  3025. * request.
  3026. */
  3027. if (time_after(bfqq->budget_timeout,
  3028. bfqq->last_wr_start_finish))
  3029. bfqq->last_wr_start_finish +=
  3030. jiffies - bfqq->budget_timeout;
  3031. else
  3032. bfqq->last_wr_start_finish = jiffies;
  3033. }
  3034. bfq_set_budget_timeout(bfqd, bfqq);
  3035. bfq_log_bfqq(bfqd, bfqq,
  3036. "set_in_service_queue, cur-budget = %d",
  3037. bfqq->entity.budget);
  3038. }
  3039. bfqd->in_service_queue = bfqq;
  3040. bfqd->in_serv_last_pos = 0;
  3041. }
  3042. /*
  3043. * Get and set a new queue for service.
  3044. */
  3045. static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
  3046. {
  3047. struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
  3048. __bfq_set_in_service_queue(bfqd, bfqq);
  3049. return bfqq;
  3050. }
  3051. static void bfq_arm_slice_timer(struct bfq_data *bfqd)
  3052. {
  3053. struct bfq_queue *bfqq = bfqd->in_service_queue;
  3054. u32 sl;
  3055. bfq_mark_bfqq_wait_request(bfqq);
  3056. /*
  3057. * We don't want to idle for seeks, but we do want to allow
  3058. * fair distribution of slice time for a process doing back-to-back
  3059. * seeks. So allow a little bit of time for him to submit a new rq.
  3060. */
  3061. sl = bfqd->bfq_slice_idle;
  3062. /*
  3063. * Unless the queue is being weight-raised or the scenario is
  3064. * asymmetric, grant only minimum idle time if the queue
  3065. * is seeky. A long idling is preserved for a weight-raised
  3066. * queue, or, more in general, in an asymmetric scenario,
  3067. * because a long idling is needed for guaranteeing to a queue
  3068. * its reserved share of the throughput (in particular, it is
  3069. * needed if the queue has a higher weight than some other
  3070. * queue).
  3071. */
  3072. if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
  3073. !bfq_asymmetric_scenario(bfqd, bfqq))
  3074. sl = min_t(u64, sl, BFQ_MIN_TT);
  3075. else if (bfqq->wr_coeff > 1)
  3076. sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
  3077. bfqd->last_idling_start = blk_time_get();
  3078. bfqd->last_idling_start_jiffies = jiffies;
  3079. hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
  3080. HRTIMER_MODE_REL);
  3081. bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
  3082. }
  3083. /*
  3084. * In autotuning mode, max_budget is dynamically recomputed as the
  3085. * amount of sectors transferred in timeout at the estimated peak
  3086. * rate. This enables BFQ to utilize a full timeslice with a full
  3087. * budget, even if the in-service queue is served at peak rate. And
  3088. * this maximises throughput with sequential workloads.
  3089. */
  3090. static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
  3091. {
  3092. return (u64)bfqd->peak_rate * USEC_PER_MSEC *
  3093. jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
  3094. }
  3095. /*
  3096. * Update parameters related to throughput and responsiveness, as a
  3097. * function of the estimated peak rate. See comments on
  3098. * bfq_calc_max_budget(), and on the ref_wr_duration array.
  3099. */
  3100. static void update_thr_responsiveness_params(struct bfq_data *bfqd)
  3101. {
  3102. if (bfqd->bfq_user_max_budget == 0) {
  3103. bfqd->bfq_max_budget =
  3104. bfq_calc_max_budget(bfqd);
  3105. bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget);
  3106. }
  3107. }
  3108. static void bfq_reset_rate_computation(struct bfq_data *bfqd,
  3109. struct request *rq)
  3110. {
  3111. if (rq != NULL) { /* new rq dispatch now, reset accordingly */
  3112. bfqd->last_dispatch = bfqd->first_dispatch = blk_time_get_ns();
  3113. bfqd->peak_rate_samples = 1;
  3114. bfqd->sequential_samples = 0;
  3115. bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
  3116. blk_rq_sectors(rq);
  3117. } else /* no new rq dispatched, just reset the number of samples */
  3118. bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
  3119. bfq_log(bfqd,
  3120. "reset_rate_computation at end, sample %u/%u tot_sects %llu",
  3121. bfqd->peak_rate_samples, bfqd->sequential_samples,
  3122. bfqd->tot_sectors_dispatched);
  3123. }
  3124. static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
  3125. {
  3126. u32 rate, weight, divisor;
  3127. /*
  3128. * For the convergence property to hold (see comments on
  3129. * bfq_update_peak_rate()) and for the assessment to be
  3130. * reliable, a minimum number of samples must be present, and
  3131. * a minimum amount of time must have elapsed. If not so, do
  3132. * not compute new rate. Just reset parameters, to get ready
  3133. * for a new evaluation attempt.
  3134. */
  3135. if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
  3136. bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
  3137. goto reset_computation;
  3138. /*
  3139. * If a new request completion has occurred after last
  3140. * dispatch, then, to approximate the rate at which requests
  3141. * have been served by the device, it is more precise to
  3142. * extend the observation interval to the last completion.
  3143. */
  3144. bfqd->delta_from_first =
  3145. max_t(u64, bfqd->delta_from_first,
  3146. bfqd->last_completion - bfqd->first_dispatch);
  3147. /*
  3148. * Rate computed in sects/usec, and not sects/nsec, for
  3149. * precision issues.
  3150. */
  3151. rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
  3152. div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
  3153. /*
  3154. * Peak rate not updated if:
  3155. * - the percentage of sequential dispatches is below 3/4 of the
  3156. * total, and rate is below the current estimated peak rate
  3157. * - rate is unreasonably high (> 20M sectors/sec)
  3158. */
  3159. if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
  3160. rate <= bfqd->peak_rate) ||
  3161. rate > 20<<BFQ_RATE_SHIFT)
  3162. goto reset_computation;
  3163. /*
  3164. * We have to update the peak rate, at last! To this purpose,
  3165. * we use a low-pass filter. We compute the smoothing constant
  3166. * of the filter as a function of the 'weight' of the new
  3167. * measured rate.
  3168. *
  3169. * As can be seen in next formulas, we define this weight as a
  3170. * quantity proportional to how sequential the workload is,
  3171. * and to how long the observation time interval is.
  3172. *
  3173. * The weight runs from 0 to 8. The maximum value of the
  3174. * weight, 8, yields the minimum value for the smoothing
  3175. * constant. At this minimum value for the smoothing constant,
  3176. * the measured rate contributes for half of the next value of
  3177. * the estimated peak rate.
  3178. *
  3179. * So, the first step is to compute the weight as a function
  3180. * of how sequential the workload is. Note that the weight
  3181. * cannot reach 9, because bfqd->sequential_samples cannot
  3182. * become equal to bfqd->peak_rate_samples, which, in its
  3183. * turn, holds true because bfqd->sequential_samples is not
  3184. * incremented for the first sample.
  3185. */
  3186. weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
  3187. /*
  3188. * Second step: further refine the weight as a function of the
  3189. * duration of the observation interval.
  3190. */
  3191. weight = min_t(u32, 8,
  3192. div_u64(weight * bfqd->delta_from_first,
  3193. BFQ_RATE_REF_INTERVAL));
  3194. /*
  3195. * Divisor ranging from 10, for minimum weight, to 2, for
  3196. * maximum weight.
  3197. */
  3198. divisor = 10 - weight;
  3199. /*
  3200. * Finally, update peak rate:
  3201. *
  3202. * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
  3203. */
  3204. bfqd->peak_rate *= divisor-1;
  3205. bfqd->peak_rate /= divisor;
  3206. rate /= divisor; /* smoothing constant alpha = 1/divisor */
  3207. bfqd->peak_rate += rate;
  3208. /*
  3209. * For a very slow device, bfqd->peak_rate can reach 0 (see
  3210. * the minimum representable values reported in the comments
  3211. * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid
  3212. * divisions by zero where bfqd->peak_rate is used as a
  3213. * divisor.
  3214. */
  3215. bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate);
  3216. update_thr_responsiveness_params(bfqd);
  3217. reset_computation:
  3218. bfq_reset_rate_computation(bfqd, rq);
  3219. }
  3220. /*
  3221. * Update the read/write peak rate (the main quantity used for
  3222. * auto-tuning, see update_thr_responsiveness_params()).
  3223. *
  3224. * It is not trivial to estimate the peak rate (correctly): because of
  3225. * the presence of sw and hw queues between the scheduler and the
  3226. * device components that finally serve I/O requests, it is hard to
  3227. * say exactly when a given dispatched request is served inside the
  3228. * device, and for how long. As a consequence, it is hard to know
  3229. * precisely at what rate a given set of requests is actually served
  3230. * by the device.
  3231. *
  3232. * On the opposite end, the dispatch time of any request is trivially
  3233. * available, and, from this piece of information, the "dispatch rate"
  3234. * of requests can be immediately computed. So, the idea in the next
  3235. * function is to use what is known, namely request dispatch times
  3236. * (plus, when useful, request completion times), to estimate what is
  3237. * unknown, namely in-device request service rate.
  3238. *
  3239. * The main issue is that, because of the above facts, the rate at
  3240. * which a certain set of requests is dispatched over a certain time
  3241. * interval can vary greatly with respect to the rate at which the
  3242. * same requests are then served. But, since the size of any
  3243. * intermediate queue is limited, and the service scheme is lossless
  3244. * (no request is silently dropped), the following obvious convergence
  3245. * property holds: the number of requests dispatched MUST become
  3246. * closer and closer to the number of requests completed as the
  3247. * observation interval grows. This is the key property used in
  3248. * the next function to estimate the peak service rate as a function
  3249. * of the observed dispatch rate. The function assumes to be invoked
  3250. * on every request dispatch.
  3251. */
  3252. static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
  3253. {
  3254. u64 now_ns = blk_time_get_ns();
  3255. if (bfqd->peak_rate_samples == 0) { /* first dispatch */
  3256. bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
  3257. bfqd->peak_rate_samples);
  3258. bfq_reset_rate_computation(bfqd, rq);
  3259. goto update_last_values; /* will add one sample */
  3260. }
  3261. /*
  3262. * Device idle for very long: the observation interval lasting
  3263. * up to this dispatch cannot be a valid observation interval
  3264. * for computing a new peak rate (similarly to the late-
  3265. * completion event in bfq_completed_request()). Go to
  3266. * update_rate_and_reset to have the following three steps
  3267. * taken:
  3268. * - close the observation interval at the last (previous)
  3269. * request dispatch or completion
  3270. * - compute rate, if possible, for that observation interval
  3271. * - start a new observation interval with this dispatch
  3272. */
  3273. if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
  3274. bfqd->tot_rq_in_driver == 0)
  3275. goto update_rate_and_reset;
  3276. /* Update sampling information */
  3277. bfqd->peak_rate_samples++;
  3278. if ((bfqd->tot_rq_in_driver > 0 ||
  3279. now_ns - bfqd->last_completion < BFQ_MIN_TT)
  3280. && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq))
  3281. bfqd->sequential_samples++;
  3282. bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
  3283. /* Reset max observed rq size every 32 dispatches */
  3284. if (likely(bfqd->peak_rate_samples % 32))
  3285. bfqd->last_rq_max_size =
  3286. max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
  3287. else
  3288. bfqd->last_rq_max_size = blk_rq_sectors(rq);
  3289. bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
  3290. /* Target observation interval not yet reached, go on sampling */
  3291. if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
  3292. goto update_last_values;
  3293. update_rate_and_reset:
  3294. bfq_update_rate_reset(bfqd, rq);
  3295. update_last_values:
  3296. bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
  3297. if (RQ_BFQQ(rq) == bfqd->in_service_queue)
  3298. bfqd->in_serv_last_pos = bfqd->last_position;
  3299. bfqd->last_dispatch = now_ns;
  3300. }
  3301. /*
  3302. * Remove request from internal lists.
  3303. */
  3304. static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
  3305. {
  3306. struct bfq_queue *bfqq = RQ_BFQQ(rq);
  3307. /*
  3308. * For consistency, the next instruction should have been
  3309. * executed after removing the request from the queue and
  3310. * dispatching it. We execute instead this instruction before
  3311. * bfq_remove_request() (and hence introduce a temporary
  3312. * inconsistency), for efficiency. In fact, should this
  3313. * dispatch occur for a non in-service bfqq, this anticipated
  3314. * increment prevents two counters related to bfqq->dispatched
  3315. * from risking to be, first, uselessly decremented, and then
  3316. * incremented again when the (new) value of bfqq->dispatched
  3317. * happens to be taken into account.
  3318. */
  3319. bfqq->dispatched++;
  3320. bfq_update_peak_rate(q->elevator->elevator_data, rq);
  3321. bfq_remove_request(q, rq);
  3322. }
  3323. /*
  3324. * There is a case where idling does not have to be performed for
  3325. * throughput concerns, but to preserve the throughput share of
  3326. * the process associated with bfqq.
  3327. *
  3328. * To introduce this case, we can note that allowing the drive
  3329. * to enqueue more than one request at a time, and hence
  3330. * delegating de facto final scheduling decisions to the
  3331. * drive's internal scheduler, entails loss of control on the
  3332. * actual request service order. In particular, the critical
  3333. * situation is when requests from different processes happen
  3334. * to be present, at the same time, in the internal queue(s)
  3335. * of the drive. In such a situation, the drive, by deciding
  3336. * the service order of the internally-queued requests, does
  3337. * determine also the actual throughput distribution among
  3338. * these processes. But the drive typically has no notion or
  3339. * concern about per-process throughput distribution, and
  3340. * makes its decisions only on a per-request basis. Therefore,
  3341. * the service distribution enforced by the drive's internal
  3342. * scheduler is likely to coincide with the desired throughput
  3343. * distribution only in a completely symmetric, or favorably
  3344. * skewed scenario where:
  3345. * (i-a) each of these processes must get the same throughput as
  3346. * the others,
  3347. * (i-b) in case (i-a) does not hold, it holds that the process
  3348. * associated with bfqq must receive a lower or equal
  3349. * throughput than any of the other processes;
  3350. * (ii) the I/O of each process has the same properties, in
  3351. * terms of locality (sequential or random), direction
  3352. * (reads or writes), request sizes, greediness
  3353. * (from I/O-bound to sporadic), and so on;
  3354. * In fact, in such a scenario, the drive tends to treat the requests
  3355. * of each process in about the same way as the requests of the
  3356. * others, and thus to provide each of these processes with about the
  3357. * same throughput. This is exactly the desired throughput
  3358. * distribution if (i-a) holds, or, if (i-b) holds instead, this is an
  3359. * even more convenient distribution for (the process associated with)
  3360. * bfqq.
  3361. *
  3362. * In contrast, in any asymmetric or unfavorable scenario, device
  3363. * idling (I/O-dispatch plugging) is certainly needed to guarantee
  3364. * that bfqq receives its assigned fraction of the device throughput
  3365. * (see [1] for details).
  3366. *
  3367. * The problem is that idling may significantly reduce throughput with
  3368. * certain combinations of types of I/O and devices. An important
  3369. * example is sync random I/O on flash storage with command
  3370. * queueing. So, unless bfqq falls in cases where idling also boosts
  3371. * throughput, it is important to check conditions (i-a), i(-b) and
  3372. * (ii) accurately, so as to avoid idling when not strictly needed for
  3373. * service guarantees.
  3374. *
  3375. * Unfortunately, it is extremely difficult to thoroughly check
  3376. * condition (ii). And, in case there are active groups, it becomes
  3377. * very difficult to check conditions (i-a) and (i-b) too. In fact,
  3378. * if there are active groups, then, for conditions (i-a) or (i-b) to
  3379. * become false 'indirectly', it is enough that an active group
  3380. * contains more active processes or sub-groups than some other active
  3381. * group. More precisely, for conditions (i-a) or (i-b) to become
  3382. * false because of such a group, it is not even necessary that the
  3383. * group is (still) active: it is sufficient that, even if the group
  3384. * has become inactive, some of its descendant processes still have
  3385. * some request already dispatched but still waiting for
  3386. * completion. In fact, requests have still to be guaranteed their
  3387. * share of the throughput even after being dispatched. In this
  3388. * respect, it is easy to show that, if a group frequently becomes
  3389. * inactive while still having in-flight requests, and if, when this
  3390. * happens, the group is not considered in the calculation of whether
  3391. * the scenario is asymmetric, then the group may fail to be
  3392. * guaranteed its fair share of the throughput (basically because
  3393. * idling may not be performed for the descendant processes of the
  3394. * group, but it had to be). We address this issue with the following
  3395. * bi-modal behavior, implemented in the function
  3396. * bfq_asymmetric_scenario().
  3397. *
  3398. * If there are groups with requests waiting for completion
  3399. * (as commented above, some of these groups may even be
  3400. * already inactive), then the scenario is tagged as
  3401. * asymmetric, conservatively, without checking any of the
  3402. * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
  3403. * This behavior matches also the fact that groups are created
  3404. * exactly if controlling I/O is a primary concern (to
  3405. * preserve bandwidth and latency guarantees).
  3406. *
  3407. * On the opposite end, if there are no groups with requests waiting
  3408. * for completion, then only conditions (i-a) and (i-b) are actually
  3409. * controlled, i.e., provided that conditions (i-a) or (i-b) holds,
  3410. * idling is not performed, regardless of whether condition (ii)
  3411. * holds. In other words, only if conditions (i-a) and (i-b) do not
  3412. * hold, then idling is allowed, and the device tends to be prevented
  3413. * from queueing many requests, possibly of several processes. Since
  3414. * there are no groups with requests waiting for completion, then, to
  3415. * control conditions (i-a) and (i-b) it is enough to check just
  3416. * whether all the queues with requests waiting for completion also
  3417. * have the same weight.
  3418. *
  3419. * Not checking condition (ii) evidently exposes bfqq to the
  3420. * risk of getting less throughput than its fair share.
  3421. * However, for queues with the same weight, a further
  3422. * mechanism, preemption, mitigates or even eliminates this
  3423. * problem. And it does so without consequences on overall
  3424. * throughput. This mechanism and its benefits are explained
  3425. * in the next three paragraphs.
  3426. *
  3427. * Even if a queue, say Q, is expired when it remains idle, Q
  3428. * can still preempt the new in-service queue if the next
  3429. * request of Q arrives soon (see the comments on
  3430. * bfq_bfqq_update_budg_for_activation). If all queues and
  3431. * groups have the same weight, this form of preemption,
  3432. * combined with the hole-recovery heuristic described in the
  3433. * comments on function bfq_bfqq_update_budg_for_activation,
  3434. * are enough to preserve a correct bandwidth distribution in
  3435. * the mid term, even without idling. In fact, even if not
  3436. * idling allows the internal queues of the device to contain
  3437. * many requests, and thus to reorder requests, we can rather
  3438. * safely assume that the internal scheduler still preserves a
  3439. * minimum of mid-term fairness.
  3440. *
  3441. * More precisely, this preemption-based, idleless approach
  3442. * provides fairness in terms of IOPS, and not sectors per
  3443. * second. This can be seen with a simple example. Suppose
  3444. * that there are two queues with the same weight, but that
  3445. * the first queue receives requests of 8 sectors, while the
  3446. * second queue receives requests of 1024 sectors. In
  3447. * addition, suppose that each of the two queues contains at
  3448. * most one request at a time, which implies that each queue
  3449. * always remains idle after it is served. Finally, after
  3450. * remaining idle, each queue receives very quickly a new
  3451. * request. It follows that the two queues are served
  3452. * alternatively, preempting each other if needed. This
  3453. * implies that, although both queues have the same weight,
  3454. * the queue with large requests receives a service that is
  3455. * 1024/8 times as high as the service received by the other
  3456. * queue.
  3457. *
  3458. * The motivation for using preemption instead of idling (for
  3459. * queues with the same weight) is that, by not idling,
  3460. * service guarantees are preserved (completely or at least in
  3461. * part) without minimally sacrificing throughput. And, if
  3462. * there is no active group, then the primary expectation for
  3463. * this device is probably a high throughput.
  3464. *
  3465. * We are now left only with explaining the two sub-conditions in the
  3466. * additional compound condition that is checked below for deciding
  3467. * whether the scenario is asymmetric. To explain the first
  3468. * sub-condition, we need to add that the function
  3469. * bfq_asymmetric_scenario checks the weights of only
  3470. * non-weight-raised queues, for efficiency reasons (see comments on
  3471. * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised
  3472. * is checked explicitly here. More precisely, the compound condition
  3473. * below takes into account also the fact that, even if bfqq is being
  3474. * weight-raised, the scenario is still symmetric if all queues with
  3475. * requests waiting for completion happen to be
  3476. * weight-raised. Actually, we should be even more precise here, and
  3477. * differentiate between interactive weight raising and soft real-time
  3478. * weight raising.
  3479. *
  3480. * The second sub-condition checked in the compound condition is
  3481. * whether there is a fair amount of already in-flight I/O not
  3482. * belonging to bfqq. If so, I/O dispatching is to be plugged, for the
  3483. * following reason. The drive may decide to serve in-flight
  3484. * non-bfqq's I/O requests before bfqq's ones, thereby delaying the
  3485. * arrival of new I/O requests for bfqq (recall that bfqq is sync). If
  3486. * I/O-dispatching is not plugged, then, while bfqq remains empty, a
  3487. * basically uncontrolled amount of I/O from other queues may be
  3488. * dispatched too, possibly causing the service of bfqq's I/O to be
  3489. * delayed even longer in the drive. This problem gets more and more
  3490. * serious as the speed and the queue depth of the drive grow,
  3491. * because, as these two quantities grow, the probability to find no
  3492. * queue busy but many requests in flight grows too. By contrast,
  3493. * plugging I/O dispatching minimizes the delay induced by already
  3494. * in-flight I/O, and enables bfqq to recover the bandwidth it may
  3495. * lose because of this delay.
  3496. *
  3497. * As a side note, it is worth considering that the above
  3498. * device-idling countermeasures may however fail in the following
  3499. * unlucky scenario: if I/O-dispatch plugging is (correctly) disabled
  3500. * in a time period during which all symmetry sub-conditions hold, and
  3501. * therefore the device is allowed to enqueue many requests, but at
  3502. * some later point in time some sub-condition stops to hold, then it
  3503. * may become impossible to make requests be served in the desired
  3504. * order until all the requests already queued in the device have been
  3505. * served. The last sub-condition commented above somewhat mitigates
  3506. * this problem for weight-raised queues.
  3507. *
  3508. * However, as an additional mitigation for this problem, we preserve
  3509. * plugging for a special symmetric case that may suddenly turn into
  3510. * asymmetric: the case where only bfqq is busy. In this case, not
  3511. * expiring bfqq does not cause any harm to any other queues in terms
  3512. * of service guarantees. In contrast, it avoids the following unlucky
  3513. * sequence of events: (1) bfqq is expired, (2) a new queue with a
  3514. * lower weight than bfqq becomes busy (or more queues), (3) the new
  3515. * queue is served until a new request arrives for bfqq, (4) when bfqq
  3516. * is finally served, there are so many requests of the new queue in
  3517. * the drive that the pending requests for bfqq take a lot of time to
  3518. * be served. In particular, event (2) may case even already
  3519. * dispatched requests of bfqq to be delayed, inside the drive. So, to
  3520. * avoid this series of events, the scenario is preventively declared
  3521. * as asymmetric also if bfqq is the only busy queues
  3522. */
  3523. static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
  3524. struct bfq_queue *bfqq)
  3525. {
  3526. int tot_busy_queues = bfq_tot_busy_queues(bfqd);
  3527. /* No point in idling for bfqq if it won't get requests any longer */
  3528. if (unlikely(!bfqq_process_refs(bfqq)))
  3529. return false;
  3530. return (bfqq->wr_coeff > 1 &&
  3531. (bfqd->wr_busy_queues < tot_busy_queues ||
  3532. bfqd->tot_rq_in_driver >= bfqq->dispatched + 4)) ||
  3533. bfq_asymmetric_scenario(bfqd, bfqq) ||
  3534. tot_busy_queues == 1;
  3535. }
  3536. static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  3537. enum bfqq_expiration reason)
  3538. {
  3539. /*
  3540. * If this bfqq is shared between multiple processes, check
  3541. * to make sure that those processes are still issuing I/Os
  3542. * within the mean seek distance. If not, it may be time to
  3543. * break the queues apart again.
  3544. */
  3545. if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
  3546. bfq_mark_bfqq_split_coop(bfqq);
  3547. /*
  3548. * Consider queues with a higher finish virtual time than
  3549. * bfqq. If idling_needed_for_service_guarantees(bfqq) returns
  3550. * true, then bfqq's bandwidth would be violated if an
  3551. * uncontrolled amount of I/O from these queues were
  3552. * dispatched while bfqq is waiting for its new I/O to
  3553. * arrive. This is exactly what may happen if this is a forced
  3554. * expiration caused by a preemption attempt, and if bfqq is
  3555. * not re-scheduled. To prevent this from happening, re-queue
  3556. * bfqq if it needs I/O-dispatch plugging, even if it is
  3557. * empty. By doing so, bfqq is granted to be served before the
  3558. * above queues (provided that bfqq is of course eligible).
  3559. */
  3560. if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
  3561. !(reason == BFQQE_PREEMPTED &&
  3562. idling_needed_for_service_guarantees(bfqd, bfqq))) {
  3563. if (bfqq->dispatched == 0)
  3564. /*
  3565. * Overloading budget_timeout field to store
  3566. * the time at which the queue remains with no
  3567. * backlog and no outstanding request; used by
  3568. * the weight-raising mechanism.
  3569. */
  3570. bfqq->budget_timeout = jiffies;
  3571. bfq_del_bfqq_busy(bfqq, true);
  3572. } else {
  3573. bfq_requeue_bfqq(bfqd, bfqq, true);
  3574. /*
  3575. * Resort priority tree of potential close cooperators.
  3576. * See comments on bfq_pos_tree_add_move() for the unlikely().
  3577. */
  3578. if (unlikely(!bfqd->nonrot_with_queueing &&
  3579. !RB_EMPTY_ROOT(&bfqq->sort_list)))
  3580. bfq_pos_tree_add_move(bfqd, bfqq);
  3581. }
  3582. /*
  3583. * All in-service entities must have been properly deactivated
  3584. * or requeued before executing the next function, which
  3585. * resets all in-service entities as no more in service. This
  3586. * may cause bfqq to be freed. If this happens, the next
  3587. * function returns true.
  3588. */
  3589. return __bfq_bfqd_reset_in_service(bfqd);
  3590. }
  3591. /**
  3592. * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
  3593. * @bfqd: device data.
  3594. * @bfqq: queue to update.
  3595. * @reason: reason for expiration.
  3596. *
  3597. * Handle the feedback on @bfqq budget at queue expiration.
  3598. * See the body for detailed comments.
  3599. */
  3600. static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
  3601. struct bfq_queue *bfqq,
  3602. enum bfqq_expiration reason)
  3603. {
  3604. struct request *next_rq;
  3605. int budget, min_budget;
  3606. min_budget = bfq_min_budget(bfqd);
  3607. if (bfqq->wr_coeff == 1)
  3608. budget = bfqq->max_budget;
  3609. else /*
  3610. * Use a constant, low budget for weight-raised queues,
  3611. * to help achieve a low latency. Keep it slightly higher
  3612. * than the minimum possible budget, to cause a little
  3613. * bit fewer expirations.
  3614. */
  3615. budget = 2 * min_budget;
  3616. bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
  3617. bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
  3618. bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
  3619. budget, bfq_min_budget(bfqd));
  3620. bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
  3621. bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
  3622. if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
  3623. switch (reason) {
  3624. /*
  3625. * Caveat: in all the following cases we trade latency
  3626. * for throughput.
  3627. */
  3628. case BFQQE_TOO_IDLE:
  3629. /*
  3630. * This is the only case where we may reduce
  3631. * the budget: if there is no request of the
  3632. * process still waiting for completion, then
  3633. * we assume (tentatively) that the timer has
  3634. * expired because the batch of requests of
  3635. * the process could have been served with a
  3636. * smaller budget. Hence, betting that
  3637. * process will behave in the same way when it
  3638. * becomes backlogged again, we reduce its
  3639. * next budget. As long as we guess right,
  3640. * this budget cut reduces the latency
  3641. * experienced by the process.
  3642. *
  3643. * However, if there are still outstanding
  3644. * requests, then the process may have not yet
  3645. * issued its next request just because it is
  3646. * still waiting for the completion of some of
  3647. * the still outstanding ones. So in this
  3648. * subcase we do not reduce its budget, on the
  3649. * contrary we increase it to possibly boost
  3650. * the throughput, as discussed in the
  3651. * comments to the BUDGET_TIMEOUT case.
  3652. */
  3653. if (bfqq->dispatched > 0) /* still outstanding reqs */
  3654. budget = min(budget * 2, bfqd->bfq_max_budget);
  3655. else {
  3656. if (budget > 5 * min_budget)
  3657. budget -= 4 * min_budget;
  3658. else
  3659. budget = min_budget;
  3660. }
  3661. break;
  3662. case BFQQE_BUDGET_TIMEOUT:
  3663. /*
  3664. * We double the budget here because it gives
  3665. * the chance to boost the throughput if this
  3666. * is not a seeky process (and has bumped into
  3667. * this timeout because of, e.g., ZBR).
  3668. */
  3669. budget = min(budget * 2, bfqd->bfq_max_budget);
  3670. break;
  3671. case BFQQE_BUDGET_EXHAUSTED:
  3672. /*
  3673. * The process still has backlog, and did not
  3674. * let either the budget timeout or the disk
  3675. * idling timeout expire. Hence it is not
  3676. * seeky, has a short thinktime and may be
  3677. * happy with a higher budget too. So
  3678. * definitely increase the budget of this good
  3679. * candidate to boost the disk throughput.
  3680. */
  3681. budget = min(budget * 4, bfqd->bfq_max_budget);
  3682. break;
  3683. case BFQQE_NO_MORE_REQUESTS:
  3684. /*
  3685. * For queues that expire for this reason, it
  3686. * is particularly important to keep the
  3687. * budget close to the actual service they
  3688. * need. Doing so reduces the timestamp
  3689. * misalignment problem described in the
  3690. * comments in the body of
  3691. * __bfq_activate_entity. In fact, suppose
  3692. * that a queue systematically expires for
  3693. * BFQQE_NO_MORE_REQUESTS and presents a
  3694. * new request in time to enjoy timestamp
  3695. * back-shifting. The larger the budget of the
  3696. * queue is with respect to the service the
  3697. * queue actually requests in each service
  3698. * slot, the more times the queue can be
  3699. * reactivated with the same virtual finish
  3700. * time. It follows that, even if this finish
  3701. * time is pushed to the system virtual time
  3702. * to reduce the consequent timestamp
  3703. * misalignment, the queue unjustly enjoys for
  3704. * many re-activations a lower finish time
  3705. * than all newly activated queues.
  3706. *
  3707. * The service needed by bfqq is measured
  3708. * quite precisely by bfqq->entity.service.
  3709. * Since bfqq does not enjoy device idling,
  3710. * bfqq->entity.service is equal to the number
  3711. * of sectors that the process associated with
  3712. * bfqq requested to read/write before waiting
  3713. * for request completions, or blocking for
  3714. * other reasons.
  3715. */
  3716. budget = max_t(int, bfqq->entity.service, min_budget);
  3717. break;
  3718. default:
  3719. return;
  3720. }
  3721. } else if (!bfq_bfqq_sync(bfqq)) {
  3722. /*
  3723. * Async queues get always the maximum possible
  3724. * budget, as for them we do not care about latency
  3725. * (in addition, their ability to dispatch is limited
  3726. * by the charging factor).
  3727. */
  3728. budget = bfqd->bfq_max_budget;
  3729. }
  3730. bfqq->max_budget = budget;
  3731. if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
  3732. !bfqd->bfq_user_max_budget)
  3733. bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
  3734. /*
  3735. * If there is still backlog, then assign a new budget, making
  3736. * sure that it is large enough for the next request. Since
  3737. * the finish time of bfqq must be kept in sync with the
  3738. * budget, be sure to call __bfq_bfqq_expire() *after* this
  3739. * update.
  3740. *
  3741. * If there is no backlog, then no need to update the budget;
  3742. * it will be updated on the arrival of a new request.
  3743. */
  3744. next_rq = bfqq->next_rq;
  3745. if (next_rq)
  3746. bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
  3747. bfq_serv_to_charge(next_rq, bfqq));
  3748. bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
  3749. next_rq ? blk_rq_sectors(next_rq) : 0,
  3750. bfqq->entity.budget);
  3751. }
  3752. /*
  3753. * Return true if the process associated with bfqq is "slow". The slow
  3754. * flag is used, in addition to the budget timeout, to reduce the
  3755. * amount of service provided to seeky processes, and thus reduce
  3756. * their chances to lower the throughput. More details in the comments
  3757. * on the function bfq_bfqq_expire().
  3758. *
  3759. * An important observation is in order: as discussed in the comments
  3760. * on the function bfq_update_peak_rate(), with devices with internal
  3761. * queues, it is hard if ever possible to know when and for how long
  3762. * an I/O request is processed by the device (apart from the trivial
  3763. * I/O pattern where a new request is dispatched only after the
  3764. * previous one has been completed). This makes it hard to evaluate
  3765. * the real rate at which the I/O requests of each bfq_queue are
  3766. * served. In fact, for an I/O scheduler like BFQ, serving a
  3767. * bfq_queue means just dispatching its requests during its service
  3768. * slot (i.e., until the budget of the queue is exhausted, or the
  3769. * queue remains idle, or, finally, a timeout fires). But, during the
  3770. * service slot of a bfq_queue, around 100 ms at most, the device may
  3771. * be even still processing requests of bfq_queues served in previous
  3772. * service slots. On the opposite end, the requests of the in-service
  3773. * bfq_queue may be completed after the service slot of the queue
  3774. * finishes.
  3775. *
  3776. * Anyway, unless more sophisticated solutions are used
  3777. * (where possible), the sum of the sizes of the requests dispatched
  3778. * during the service slot of a bfq_queue is probably the only
  3779. * approximation available for the service received by the bfq_queue
  3780. * during its service slot. And this sum is the quantity used in this
  3781. * function to evaluate the I/O speed of a process.
  3782. */
  3783. static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  3784. bool compensate, unsigned long *delta_ms)
  3785. {
  3786. ktime_t delta_ktime;
  3787. u32 delta_usecs;
  3788. bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
  3789. if (!bfq_bfqq_sync(bfqq))
  3790. return false;
  3791. if (compensate)
  3792. delta_ktime = bfqd->last_idling_start;
  3793. else
  3794. delta_ktime = blk_time_get();
  3795. delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
  3796. delta_usecs = ktime_to_us(delta_ktime);
  3797. /* don't use too short time intervals */
  3798. if (delta_usecs < 1000) {
  3799. if (blk_queue_nonrot(bfqd->queue))
  3800. /*
  3801. * give same worst-case guarantees as idling
  3802. * for seeky
  3803. */
  3804. *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
  3805. else /* charge at least one seek */
  3806. *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
  3807. return slow;
  3808. }
  3809. *delta_ms = delta_usecs / USEC_PER_MSEC;
  3810. /*
  3811. * Use only long (> 20ms) intervals to filter out excessive
  3812. * spikes in service rate estimation.
  3813. */
  3814. if (delta_usecs > 20000) {
  3815. /*
  3816. * Caveat for rotational devices: processes doing I/O
  3817. * in the slower disk zones tend to be slow(er) even
  3818. * if not seeky. In this respect, the estimated peak
  3819. * rate is likely to be an average over the disk
  3820. * surface. Accordingly, to not be too harsh with
  3821. * unlucky processes, a process is deemed slow only if
  3822. * its rate has been lower than half of the estimated
  3823. * peak rate.
  3824. */
  3825. slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
  3826. }
  3827. bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
  3828. return slow;
  3829. }
  3830. /*
  3831. * To be deemed as soft real-time, an application must meet two
  3832. * requirements. First, the application must not require an average
  3833. * bandwidth higher than the approximate bandwidth required to playback or
  3834. * record a compressed high-definition video.
  3835. * The next function is invoked on the completion of the last request of a
  3836. * batch, to compute the next-start time instant, soft_rt_next_start, such
  3837. * that, if the next request of the application does not arrive before
  3838. * soft_rt_next_start, then the above requirement on the bandwidth is met.
  3839. *
  3840. * The second requirement is that the request pattern of the application is
  3841. * isochronous, i.e., that, after issuing a request or a batch of requests,
  3842. * the application stops issuing new requests until all its pending requests
  3843. * have been completed. After that, the application may issue a new batch,
  3844. * and so on.
  3845. * For this reason the next function is invoked to compute
  3846. * soft_rt_next_start only for applications that meet this requirement,
  3847. * whereas soft_rt_next_start is set to infinity for applications that do
  3848. * not.
  3849. *
  3850. * Unfortunately, even a greedy (i.e., I/O-bound) application may
  3851. * happen to meet, occasionally or systematically, both the above
  3852. * bandwidth and isochrony requirements. This may happen at least in
  3853. * the following circumstances. First, if the CPU load is high. The
  3854. * application may stop issuing requests while the CPUs are busy
  3855. * serving other processes, then restart, then stop again for a while,
  3856. * and so on. The other circumstances are related to the storage
  3857. * device: the storage device is highly loaded or reaches a low-enough
  3858. * throughput with the I/O of the application (e.g., because the I/O
  3859. * is random and/or the device is slow). In all these cases, the
  3860. * I/O of the application may be simply slowed down enough to meet
  3861. * the bandwidth and isochrony requirements. To reduce the probability
  3862. * that greedy applications are deemed as soft real-time in these
  3863. * corner cases, a further rule is used in the computation of
  3864. * soft_rt_next_start: the return value of this function is forced to
  3865. * be higher than the maximum between the following two quantities.
  3866. *
  3867. * (a) Current time plus: (1) the maximum time for which the arrival
  3868. * of a request is waited for when a sync queue becomes idle,
  3869. * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
  3870. * postpone for a moment the reason for adding a few extra
  3871. * jiffies; we get back to it after next item (b). Lower-bounding
  3872. * the return value of this function with the current time plus
  3873. * bfqd->bfq_slice_idle tends to filter out greedy applications,
  3874. * because the latter issue their next request as soon as possible
  3875. * after the last one has been completed. In contrast, a soft
  3876. * real-time application spends some time processing data, after a
  3877. * batch of its requests has been completed.
  3878. *
  3879. * (b) Current value of bfqq->soft_rt_next_start. As pointed out
  3880. * above, greedy applications may happen to meet both the
  3881. * bandwidth and isochrony requirements under heavy CPU or
  3882. * storage-device load. In more detail, in these scenarios, these
  3883. * applications happen, only for limited time periods, to do I/O
  3884. * slowly enough to meet all the requirements described so far,
  3885. * including the filtering in above item (a). These slow-speed
  3886. * time intervals are usually interspersed between other time
  3887. * intervals during which these applications do I/O at a very high
  3888. * speed. Fortunately, exactly because of the high speed of the
  3889. * I/O in the high-speed intervals, the values returned by this
  3890. * function happen to be so high, near the end of any such
  3891. * high-speed interval, to be likely to fall *after* the end of
  3892. * the low-speed time interval that follows. These high values are
  3893. * stored in bfqq->soft_rt_next_start after each invocation of
  3894. * this function. As a consequence, if the last value of
  3895. * bfqq->soft_rt_next_start is constantly used to lower-bound the
  3896. * next value that this function may return, then, from the very
  3897. * beginning of a low-speed interval, bfqq->soft_rt_next_start is
  3898. * likely to be constantly kept so high that any I/O request
  3899. * issued during the low-speed interval is considered as arriving
  3900. * to soon for the application to be deemed as soft
  3901. * real-time. Then, in the high-speed interval that follows, the
  3902. * application will not be deemed as soft real-time, just because
  3903. * it will do I/O at a high speed. And so on.
  3904. *
  3905. * Getting back to the filtering in item (a), in the following two
  3906. * cases this filtering might be easily passed by a greedy
  3907. * application, if the reference quantity was just
  3908. * bfqd->bfq_slice_idle:
  3909. * 1) HZ is so low that the duration of a jiffy is comparable to or
  3910. * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
  3911. * devices with HZ=100. The time granularity may be so coarse
  3912. * that the approximation, in jiffies, of bfqd->bfq_slice_idle
  3913. * is rather lower than the exact value.
  3914. * 2) jiffies, instead of increasing at a constant rate, may stop increasing
  3915. * for a while, then suddenly 'jump' by several units to recover the lost
  3916. * increments. This seems to happen, e.g., inside virtual machines.
  3917. * To address this issue, in the filtering in (a) we do not use as a
  3918. * reference time interval just bfqd->bfq_slice_idle, but
  3919. * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
  3920. * minimum number of jiffies for which the filter seems to be quite
  3921. * precise also in embedded systems and KVM/QEMU virtual machines.
  3922. */
  3923. static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
  3924. struct bfq_queue *bfqq)
  3925. {
  3926. return max3(bfqq->soft_rt_next_start,
  3927. bfqq->last_idle_bklogged +
  3928. HZ * bfqq->service_from_backlogged /
  3929. bfqd->bfq_wr_max_softrt_rate,
  3930. jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
  3931. }
  3932. /**
  3933. * bfq_bfqq_expire - expire a queue.
  3934. * @bfqd: device owning the queue.
  3935. * @bfqq: the queue to expire.
  3936. * @compensate: if true, compensate for the time spent idling.
  3937. * @reason: the reason causing the expiration.
  3938. *
  3939. * If the process associated with bfqq does slow I/O (e.g., because it
  3940. * issues random requests), we charge bfqq with the time it has been
  3941. * in service instead of the service it has received (see
  3942. * bfq_bfqq_charge_time for details on how this goal is achieved). As
  3943. * a consequence, bfqq will typically get higher timestamps upon
  3944. * reactivation, and hence it will be rescheduled as if it had
  3945. * received more service than what it has actually received. In the
  3946. * end, bfqq receives less service in proportion to how slowly its
  3947. * associated process consumes its budgets (and hence how seriously it
  3948. * tends to lower the throughput). In addition, this time-charging
  3949. * strategy guarantees time fairness among slow processes. In
  3950. * contrast, if the process associated with bfqq is not slow, we
  3951. * charge bfqq exactly with the service it has received.
  3952. *
  3953. * Charging time to the first type of queues and the exact service to
  3954. * the other has the effect of using the WF2Q+ policy to schedule the
  3955. * former on a timeslice basis, without violating service domain
  3956. * guarantees among the latter.
  3957. */
  3958. void bfq_bfqq_expire(struct bfq_data *bfqd,
  3959. struct bfq_queue *bfqq,
  3960. bool compensate,
  3961. enum bfqq_expiration reason)
  3962. {
  3963. bool slow;
  3964. unsigned long delta = 0;
  3965. struct bfq_entity *entity = &bfqq->entity;
  3966. /*
  3967. * Check whether the process is slow (see bfq_bfqq_is_slow).
  3968. */
  3969. slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, &delta);
  3970. /*
  3971. * As above explained, charge slow (typically seeky) and
  3972. * timed-out queues with the time and not the service
  3973. * received, to favor sequential workloads.
  3974. *
  3975. * Processes doing I/O in the slower disk zones will tend to
  3976. * be slow(er) even if not seeky. Therefore, since the
  3977. * estimated peak rate is actually an average over the disk
  3978. * surface, these processes may timeout just for bad luck. To
  3979. * avoid punishing them, do not charge time to processes that
  3980. * succeeded in consuming at least 2/3 of their budget. This
  3981. * allows BFQ to preserve enough elasticity to still perform
  3982. * bandwidth, and not time, distribution with little unlucky
  3983. * or quasi-sequential processes.
  3984. */
  3985. if (bfqq->wr_coeff == 1 &&
  3986. (slow ||
  3987. (reason == BFQQE_BUDGET_TIMEOUT &&
  3988. bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
  3989. bfq_bfqq_charge_time(bfqd, bfqq, delta);
  3990. if (bfqd->low_latency && bfqq->wr_coeff == 1)
  3991. bfqq->last_wr_start_finish = jiffies;
  3992. if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
  3993. RB_EMPTY_ROOT(&bfqq->sort_list)) {
  3994. /*
  3995. * If we get here, and there are no outstanding
  3996. * requests, then the request pattern is isochronous
  3997. * (see the comments on the function
  3998. * bfq_bfqq_softrt_next_start()). Therefore we can
  3999. * compute soft_rt_next_start.
  4000. *
  4001. * If, instead, the queue still has outstanding
  4002. * requests, then we have to wait for the completion
  4003. * of all the outstanding requests to discover whether
  4004. * the request pattern is actually isochronous.
  4005. */
  4006. if (bfqq->dispatched == 0)
  4007. bfqq->soft_rt_next_start =
  4008. bfq_bfqq_softrt_next_start(bfqd, bfqq);
  4009. else if (bfqq->dispatched > 0) {
  4010. /*
  4011. * Schedule an update of soft_rt_next_start to when
  4012. * the task may be discovered to be isochronous.
  4013. */
  4014. bfq_mark_bfqq_softrt_update(bfqq);
  4015. }
  4016. }
  4017. bfq_log_bfqq(bfqd, bfqq,
  4018. "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason,
  4019. slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
  4020. /*
  4021. * bfqq expired, so no total service time needs to be computed
  4022. * any longer: reset state machine for measuring total service
  4023. * times.
  4024. */
  4025. bfqd->rqs_injected = bfqd->wait_dispatch = false;
  4026. bfqd->waited_rq = NULL;
  4027. /*
  4028. * Increase, decrease or leave budget unchanged according to
  4029. * reason.
  4030. */
  4031. __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
  4032. if (__bfq_bfqq_expire(bfqd, bfqq, reason))
  4033. /* bfqq is gone, no more actions on it */
  4034. return;
  4035. /* mark bfqq as waiting a request only if a bic still points to it */
  4036. if (!bfq_bfqq_busy(bfqq) &&
  4037. reason != BFQQE_BUDGET_TIMEOUT &&
  4038. reason != BFQQE_BUDGET_EXHAUSTED) {
  4039. bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
  4040. /*
  4041. * Not setting service to 0, because, if the next rq
  4042. * arrives in time, the queue will go on receiving
  4043. * service with this same budget (as if it never expired)
  4044. */
  4045. } else
  4046. entity->service = 0;
  4047. /*
  4048. * Reset the received-service counter for every parent entity.
  4049. * Differently from what happens with bfqq->entity.service,
  4050. * the resetting of this counter never needs to be postponed
  4051. * for parent entities. In fact, in case bfqq may have a
  4052. * chance to go on being served using the last, partially
  4053. * consumed budget, bfqq->entity.service needs to be kept,
  4054. * because if bfqq then actually goes on being served using
  4055. * the same budget, the last value of bfqq->entity.service is
  4056. * needed to properly decrement bfqq->entity.budget by the
  4057. * portion already consumed. In contrast, it is not necessary
  4058. * to keep entity->service for parent entities too, because
  4059. * the bubble up of the new value of bfqq->entity.budget will
  4060. * make sure that the budgets of parent entities are correct,
  4061. * even in case bfqq and thus parent entities go on receiving
  4062. * service with the same budget.
  4063. */
  4064. entity = entity->parent;
  4065. for_each_entity(entity)
  4066. entity->service = 0;
  4067. }
  4068. /*
  4069. * Budget timeout is not implemented through a dedicated timer, but
  4070. * just checked on request arrivals and completions, as well as on
  4071. * idle timer expirations.
  4072. */
  4073. static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
  4074. {
  4075. return time_is_before_eq_jiffies(bfqq->budget_timeout);
  4076. }
  4077. /*
  4078. * If we expire a queue that is actively waiting (i.e., with the
  4079. * device idled) for the arrival of a new request, then we may incur
  4080. * the timestamp misalignment problem described in the body of the
  4081. * function __bfq_activate_entity. Hence we return true only if this
  4082. * condition does not hold, or if the queue is slow enough to deserve
  4083. * only to be kicked off for preserving a high throughput.
  4084. */
  4085. static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
  4086. {
  4087. bfq_log_bfqq(bfqq->bfqd, bfqq,
  4088. "may_budget_timeout: wait_request %d left %d timeout %d",
  4089. bfq_bfqq_wait_request(bfqq),
  4090. bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
  4091. bfq_bfqq_budget_timeout(bfqq));
  4092. return (!bfq_bfqq_wait_request(bfqq) ||
  4093. bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
  4094. &&
  4095. bfq_bfqq_budget_timeout(bfqq);
  4096. }
  4097. static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
  4098. struct bfq_queue *bfqq)
  4099. {
  4100. bool rot_without_queueing =
  4101. !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
  4102. bfqq_sequential_and_IO_bound,
  4103. idling_boosts_thr;
  4104. /* No point in idling for bfqq if it won't get requests any longer */
  4105. if (unlikely(!bfqq_process_refs(bfqq)))
  4106. return false;
  4107. bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
  4108. bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
  4109. /*
  4110. * The next variable takes into account the cases where idling
  4111. * boosts the throughput.
  4112. *
  4113. * The value of the variable is computed considering, first, that
  4114. * idling is virtually always beneficial for the throughput if:
  4115. * (a) the device is not NCQ-capable and rotational, or
  4116. * (b) regardless of the presence of NCQ, the device is rotational and
  4117. * the request pattern for bfqq is I/O-bound and sequential, or
  4118. * (c) regardless of whether it is rotational, the device is
  4119. * not NCQ-capable and the request pattern for bfqq is
  4120. * I/O-bound and sequential.
  4121. *
  4122. * Secondly, and in contrast to the above item (b), idling an
  4123. * NCQ-capable flash-based device would not boost the
  4124. * throughput even with sequential I/O; rather it would lower
  4125. * the throughput in proportion to how fast the device
  4126. * is. Accordingly, the next variable is true if any of the
  4127. * above conditions (a), (b) or (c) is true, and, in
  4128. * particular, happens to be false if bfqd is an NCQ-capable
  4129. * flash-based device.
  4130. */
  4131. idling_boosts_thr = rot_without_queueing ||
  4132. ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
  4133. bfqq_sequential_and_IO_bound);
  4134. /*
  4135. * The return value of this function is equal to that of
  4136. * idling_boosts_thr, unless a special case holds. In this
  4137. * special case, described below, idling may cause problems to
  4138. * weight-raised queues.
  4139. *
  4140. * When the request pool is saturated (e.g., in the presence
  4141. * of write hogs), if the processes associated with
  4142. * non-weight-raised queues ask for requests at a lower rate,
  4143. * then processes associated with weight-raised queues have a
  4144. * higher probability to get a request from the pool
  4145. * immediately (or at least soon) when they need one. Thus
  4146. * they have a higher probability to actually get a fraction
  4147. * of the device throughput proportional to their high
  4148. * weight. This is especially true with NCQ-capable drives,
  4149. * which enqueue several requests in advance, and further
  4150. * reorder internally-queued requests.
  4151. *
  4152. * For this reason, we force to false the return value if
  4153. * there are weight-raised busy queues. In this case, and if
  4154. * bfqq is not weight-raised, this guarantees that the device
  4155. * is not idled for bfqq (if, instead, bfqq is weight-raised,
  4156. * then idling will be guaranteed by another variable, see
  4157. * below). Combined with the timestamping rules of BFQ (see
  4158. * [1] for details), this behavior causes bfqq, and hence any
  4159. * sync non-weight-raised queue, to get a lower number of
  4160. * requests served, and thus to ask for a lower number of
  4161. * requests from the request pool, before the busy
  4162. * weight-raised queues get served again. This often mitigates
  4163. * starvation problems in the presence of heavy write
  4164. * workloads and NCQ, thereby guaranteeing a higher
  4165. * application and system responsiveness in these hostile
  4166. * scenarios.
  4167. */
  4168. return idling_boosts_thr &&
  4169. bfqd->wr_busy_queues == 0;
  4170. }
  4171. /*
  4172. * For a queue that becomes empty, device idling is allowed only if
  4173. * this function returns true for that queue. As a consequence, since
  4174. * device idling plays a critical role for both throughput boosting
  4175. * and service guarantees, the return value of this function plays a
  4176. * critical role as well.
  4177. *
  4178. * In a nutshell, this function returns true only if idling is
  4179. * beneficial for throughput or, even if detrimental for throughput,
  4180. * idling is however necessary to preserve service guarantees (low
  4181. * latency, desired throughput distribution, ...). In particular, on
  4182. * NCQ-capable devices, this function tries to return false, so as to
  4183. * help keep the drives' internal queues full, whenever this helps the
  4184. * device boost the throughput without causing any service-guarantee
  4185. * issue.
  4186. *
  4187. * Most of the issues taken into account to get the return value of
  4188. * this function are not trivial. We discuss these issues in the two
  4189. * functions providing the main pieces of information needed by this
  4190. * function.
  4191. */
  4192. static bool bfq_better_to_idle(struct bfq_queue *bfqq)
  4193. {
  4194. struct bfq_data *bfqd = bfqq->bfqd;
  4195. bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
  4196. /* No point in idling for bfqq if it won't get requests any longer */
  4197. if (unlikely(!bfqq_process_refs(bfqq)))
  4198. return false;
  4199. if (unlikely(bfqd->strict_guarantees))
  4200. return true;
  4201. /*
  4202. * Idling is performed only if slice_idle > 0. In addition, we
  4203. * do not idle if
  4204. * (a) bfqq is async
  4205. * (b) bfqq is in the idle io prio class: in this case we do
  4206. * not idle because we want to minimize the bandwidth that
  4207. * queues in this class can steal to higher-priority queues
  4208. */
  4209. if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
  4210. bfq_class_idle(bfqq))
  4211. return false;
  4212. idling_boosts_thr_with_no_issue =
  4213. idling_boosts_thr_without_issues(bfqd, bfqq);
  4214. idling_needed_for_service_guar =
  4215. idling_needed_for_service_guarantees(bfqd, bfqq);
  4216. /*
  4217. * We have now the two components we need to compute the
  4218. * return value of the function, which is true only if idling
  4219. * either boosts the throughput (without issues), or is
  4220. * necessary to preserve service guarantees.
  4221. */
  4222. return idling_boosts_thr_with_no_issue ||
  4223. idling_needed_for_service_guar;
  4224. }
  4225. /*
  4226. * If the in-service queue is empty but the function bfq_better_to_idle
  4227. * returns true, then:
  4228. * 1) the queue must remain in service and cannot be expired, and
  4229. * 2) the device must be idled to wait for the possible arrival of a new
  4230. * request for the queue.
  4231. * See the comments on the function bfq_better_to_idle for the reasons
  4232. * why performing device idling is the best choice to boost the throughput
  4233. * and preserve service guarantees when bfq_better_to_idle itself
  4234. * returns true.
  4235. */
  4236. static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
  4237. {
  4238. return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
  4239. }
  4240. /*
  4241. * This function chooses the queue from which to pick the next extra
  4242. * I/O request to inject, if it finds a compatible queue. See the
  4243. * comments on bfq_update_inject_limit() for details on the injection
  4244. * mechanism, and for the definitions of the quantities mentioned
  4245. * below.
  4246. */
  4247. static struct bfq_queue *
  4248. bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
  4249. {
  4250. struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
  4251. unsigned int limit = in_serv_bfqq->inject_limit;
  4252. int i;
  4253. /*
  4254. * If
  4255. * - bfqq is not weight-raised and therefore does not carry
  4256. * time-critical I/O,
  4257. * or
  4258. * - regardless of whether bfqq is weight-raised, bfqq has
  4259. * however a long think time, during which it can absorb the
  4260. * effect of an appropriate number of extra I/O requests
  4261. * from other queues (see bfq_update_inject_limit for
  4262. * details on the computation of this number);
  4263. * then injection can be performed without restrictions.
  4264. */
  4265. bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 ||
  4266. !bfq_bfqq_has_short_ttime(in_serv_bfqq);
  4267. /*
  4268. * If
  4269. * - the baseline total service time could not be sampled yet,
  4270. * so the inject limit happens to be still 0, and
  4271. * - a lot of time has elapsed since the plugging of I/O
  4272. * dispatching started, so drive speed is being wasted
  4273. * significantly;
  4274. * then temporarily raise inject limit to one request.
  4275. */
  4276. if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 &&
  4277. bfq_bfqq_wait_request(in_serv_bfqq) &&
  4278. time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies +
  4279. bfqd->bfq_slice_idle)
  4280. )
  4281. limit = 1;
  4282. if (bfqd->tot_rq_in_driver >= limit)
  4283. return NULL;
  4284. /*
  4285. * Linear search of the source queue for injection; but, with
  4286. * a high probability, very few steps are needed to find a
  4287. * candidate queue, i.e., a queue with enough budget left for
  4288. * its next request. In fact:
  4289. * - BFQ dynamically updates the budget of every queue so as
  4290. * to accommodate the expected backlog of the queue;
  4291. * - if a queue gets all its requests dispatched as injected
  4292. * service, then the queue is removed from the active list
  4293. * (and re-added only if it gets new requests, but then it
  4294. * is assigned again enough budget for its new backlog).
  4295. */
  4296. for (i = 0; i < bfqd->num_actuators; i++) {
  4297. list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list)
  4298. if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
  4299. (in_serv_always_inject || bfqq->wr_coeff > 1) &&
  4300. bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
  4301. bfq_bfqq_budget_left(bfqq)) {
  4302. /*
  4303. * Allow for only one large in-flight request
  4304. * on non-rotational devices, for the
  4305. * following reason. On non-rotationl drives,
  4306. * large requests take much longer than
  4307. * smaller requests to be served. In addition,
  4308. * the drive prefers to serve large requests
  4309. * w.r.t. to small ones, if it can choose. So,
  4310. * having more than one large requests queued
  4311. * in the drive may easily make the next first
  4312. * request of the in-service queue wait for so
  4313. * long to break bfqq's service guarantees. On
  4314. * the bright side, large requests let the
  4315. * drive reach a very high throughput, even if
  4316. * there is only one in-flight large request
  4317. * at a time.
  4318. */
  4319. if (blk_queue_nonrot(bfqd->queue) &&
  4320. blk_rq_sectors(bfqq->next_rq) >=
  4321. BFQQ_SECT_THR_NONROT &&
  4322. bfqd->tot_rq_in_driver >= 1)
  4323. continue;
  4324. else {
  4325. bfqd->rqs_injected = true;
  4326. return bfqq;
  4327. }
  4328. }
  4329. }
  4330. return NULL;
  4331. }
  4332. static struct bfq_queue *
  4333. bfq_find_active_bfqq_for_actuator(struct bfq_data *bfqd, int idx)
  4334. {
  4335. struct bfq_queue *bfqq;
  4336. if (bfqd->in_service_queue &&
  4337. bfqd->in_service_queue->actuator_idx == idx)
  4338. return bfqd->in_service_queue;
  4339. list_for_each_entry(bfqq, &bfqd->active_list[idx], bfqq_list) {
  4340. if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
  4341. bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
  4342. bfq_bfqq_budget_left(bfqq)) {
  4343. return bfqq;
  4344. }
  4345. }
  4346. return NULL;
  4347. }
  4348. /*
  4349. * Perform a linear scan of each actuator, until an actuator is found
  4350. * for which the following three conditions hold: the load of the
  4351. * actuator is below the threshold (see comments on
  4352. * actuator_load_threshold for details) and lower than that of the
  4353. * next actuator (comments on this extra condition below), and there
  4354. * is a queue that contains I/O for that actuator. On success, return
  4355. * that queue.
  4356. *
  4357. * Performing a plain linear scan entails a prioritization among
  4358. * actuators. The extra condition above breaks this prioritization and
  4359. * tends to distribute injection uniformly across actuators.
  4360. */
  4361. static struct bfq_queue *
  4362. bfq_find_bfqq_for_underused_actuator(struct bfq_data *bfqd)
  4363. {
  4364. int i;
  4365. for (i = 0 ; i < bfqd->num_actuators; i++) {
  4366. if (bfqd->rq_in_driver[i] < bfqd->actuator_load_threshold &&
  4367. (i == bfqd->num_actuators - 1 ||
  4368. bfqd->rq_in_driver[i] < bfqd->rq_in_driver[i+1])) {
  4369. struct bfq_queue *bfqq =
  4370. bfq_find_active_bfqq_for_actuator(bfqd, i);
  4371. if (bfqq)
  4372. return bfqq;
  4373. }
  4374. }
  4375. return NULL;
  4376. }
  4377. /*
  4378. * Select a queue for service. If we have a current queue in service,
  4379. * check whether to continue servicing it, or retrieve and set a new one.
  4380. */
  4381. static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
  4382. {
  4383. struct bfq_queue *bfqq, *inject_bfqq;
  4384. struct request *next_rq;
  4385. enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
  4386. bfqq = bfqd->in_service_queue;
  4387. if (!bfqq)
  4388. goto new_queue;
  4389. bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
  4390. /*
  4391. * Do not expire bfqq for budget timeout if bfqq may be about
  4392. * to enjoy device idling. The reason why, in this case, we
  4393. * prevent bfqq from expiring is the same as in the comments
  4394. * on the case where bfq_bfqq_must_idle() returns true, in
  4395. * bfq_completed_request().
  4396. */
  4397. if (bfq_may_expire_for_budg_timeout(bfqq) &&
  4398. !bfq_bfqq_must_idle(bfqq))
  4399. goto expire;
  4400. check_queue:
  4401. /*
  4402. * If some actuator is underutilized, but the in-service
  4403. * queue does not contain I/O for that actuator, then try to
  4404. * inject I/O for that actuator.
  4405. */
  4406. inject_bfqq = bfq_find_bfqq_for_underused_actuator(bfqd);
  4407. if (inject_bfqq && inject_bfqq != bfqq)
  4408. return inject_bfqq;
  4409. /*
  4410. * This loop is rarely executed more than once. Even when it
  4411. * happens, it is much more convenient to re-execute this loop
  4412. * than to return NULL and trigger a new dispatch to get a
  4413. * request served.
  4414. */
  4415. next_rq = bfqq->next_rq;
  4416. /*
  4417. * If bfqq has requests queued and it has enough budget left to
  4418. * serve them, keep the queue, otherwise expire it.
  4419. */
  4420. if (next_rq) {
  4421. if (bfq_serv_to_charge(next_rq, bfqq) >
  4422. bfq_bfqq_budget_left(bfqq)) {
  4423. /*
  4424. * Expire the queue for budget exhaustion,
  4425. * which makes sure that the next budget is
  4426. * enough to serve the next request, even if
  4427. * it comes from the fifo expired path.
  4428. */
  4429. reason = BFQQE_BUDGET_EXHAUSTED;
  4430. goto expire;
  4431. } else {
  4432. /*
  4433. * The idle timer may be pending because we may
  4434. * not disable disk idling even when a new request
  4435. * arrives.
  4436. */
  4437. if (bfq_bfqq_wait_request(bfqq)) {
  4438. /*
  4439. * If we get here: 1) at least a new request
  4440. * has arrived but we have not disabled the
  4441. * timer because the request was too small,
  4442. * 2) then the block layer has unplugged
  4443. * the device, causing the dispatch to be
  4444. * invoked.
  4445. *
  4446. * Since the device is unplugged, now the
  4447. * requests are probably large enough to
  4448. * provide a reasonable throughput.
  4449. * So we disable idling.
  4450. */
  4451. bfq_clear_bfqq_wait_request(bfqq);
  4452. hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
  4453. }
  4454. goto keep_queue;
  4455. }
  4456. }
  4457. /*
  4458. * No requests pending. However, if the in-service queue is idling
  4459. * for a new request, or has requests waiting for a completion and
  4460. * may idle after their completion, then keep it anyway.
  4461. *
  4462. * Yet, inject service from other queues if it boosts
  4463. * throughput and is possible.
  4464. */
  4465. if (bfq_bfqq_wait_request(bfqq) ||
  4466. (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
  4467. unsigned int act_idx = bfqq->actuator_idx;
  4468. struct bfq_queue *async_bfqq = NULL;
  4469. struct bfq_queue *blocked_bfqq =
  4470. !hlist_empty(&bfqq->woken_list) ?
  4471. container_of(bfqq->woken_list.first,
  4472. struct bfq_queue,
  4473. woken_list_node)
  4474. : NULL;
  4475. if (bfqq->bic && bfqq->bic->bfqq[0][act_idx] &&
  4476. bfq_bfqq_busy(bfqq->bic->bfqq[0][act_idx]) &&
  4477. bfqq->bic->bfqq[0][act_idx]->next_rq)
  4478. async_bfqq = bfqq->bic->bfqq[0][act_idx];
  4479. /*
  4480. * The next four mutually-exclusive ifs decide
  4481. * whether to try injection, and choose the queue to
  4482. * pick an I/O request from.
  4483. *
  4484. * The first if checks whether the process associated
  4485. * with bfqq has also async I/O pending. If so, it
  4486. * injects such I/O unconditionally. Injecting async
  4487. * I/O from the same process can cause no harm to the
  4488. * process. On the contrary, it can only increase
  4489. * bandwidth and reduce latency for the process.
  4490. *
  4491. * The second if checks whether there happens to be a
  4492. * non-empty waker queue for bfqq, i.e., a queue whose
  4493. * I/O needs to be completed for bfqq to receive new
  4494. * I/O. This happens, e.g., if bfqq is associated with
  4495. * a process that does some sync. A sync generates
  4496. * extra blocking I/O, which must be completed before
  4497. * the process associated with bfqq can go on with its
  4498. * I/O. If the I/O of the waker queue is not served,
  4499. * then bfqq remains empty, and no I/O is dispatched,
  4500. * until the idle timeout fires for bfqq. This is
  4501. * likely to result in lower bandwidth and higher
  4502. * latencies for bfqq, and in a severe loss of total
  4503. * throughput. The best action to take is therefore to
  4504. * serve the waker queue as soon as possible. So do it
  4505. * (without relying on the third alternative below for
  4506. * eventually serving waker_bfqq's I/O; see the last
  4507. * paragraph for further details). This systematic
  4508. * injection of I/O from the waker queue does not
  4509. * cause any delay to bfqq's I/O. On the contrary,
  4510. * next bfqq's I/O is brought forward dramatically,
  4511. * for it is not blocked for milliseconds.
  4512. *
  4513. * The third if checks whether there is a queue woken
  4514. * by bfqq, and currently with pending I/O. Such a
  4515. * woken queue does not steal bandwidth from bfqq,
  4516. * because it remains soon without I/O if bfqq is not
  4517. * served. So there is virtually no risk of loss of
  4518. * bandwidth for bfqq if this woken queue has I/O
  4519. * dispatched while bfqq is waiting for new I/O.
  4520. *
  4521. * The fourth if checks whether bfqq is a queue for
  4522. * which it is better to avoid injection. It is so if
  4523. * bfqq delivers more throughput when served without
  4524. * any further I/O from other queues in the middle, or
  4525. * if the service times of bfqq's I/O requests both
  4526. * count more than overall throughput, and may be
  4527. * easily increased by injection (this happens if bfqq
  4528. * has a short think time). If none of these
  4529. * conditions holds, then a candidate queue for
  4530. * injection is looked for through
  4531. * bfq_choose_bfqq_for_injection(). Note that the
  4532. * latter may return NULL (for example if the inject
  4533. * limit for bfqq is currently 0).
  4534. *
  4535. * NOTE: motivation for the second alternative
  4536. *
  4537. * Thanks to the way the inject limit is updated in
  4538. * bfq_update_has_short_ttime(), it is rather likely
  4539. * that, if I/O is being plugged for bfqq and the
  4540. * waker queue has pending I/O requests that are
  4541. * blocking bfqq's I/O, then the fourth alternative
  4542. * above lets the waker queue get served before the
  4543. * I/O-plugging timeout fires. So one may deem the
  4544. * second alternative superfluous. It is not, because
  4545. * the fourth alternative may be way less effective in
  4546. * case of a synchronization. For two main
  4547. * reasons. First, throughput may be low because the
  4548. * inject limit may be too low to guarantee the same
  4549. * amount of injected I/O, from the waker queue or
  4550. * other queues, that the second alternative
  4551. * guarantees (the second alternative unconditionally
  4552. * injects a pending I/O request of the waker queue
  4553. * for each bfq_dispatch_request()). Second, with the
  4554. * fourth alternative, the duration of the plugging,
  4555. * i.e., the time before bfqq finally receives new I/O,
  4556. * may not be minimized, because the waker queue may
  4557. * happen to be served only after other queues.
  4558. */
  4559. if (async_bfqq &&
  4560. icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
  4561. bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <=
  4562. bfq_bfqq_budget_left(async_bfqq))
  4563. bfqq = async_bfqq;
  4564. else if (bfqq->waker_bfqq &&
  4565. bfq_bfqq_busy(bfqq->waker_bfqq) &&
  4566. bfqq->waker_bfqq->next_rq &&
  4567. bfq_serv_to_charge(bfqq->waker_bfqq->next_rq,
  4568. bfqq->waker_bfqq) <=
  4569. bfq_bfqq_budget_left(bfqq->waker_bfqq)
  4570. )
  4571. bfqq = bfqq->waker_bfqq;
  4572. else if (blocked_bfqq &&
  4573. bfq_bfqq_busy(blocked_bfqq) &&
  4574. blocked_bfqq->next_rq &&
  4575. bfq_serv_to_charge(blocked_bfqq->next_rq,
  4576. blocked_bfqq) <=
  4577. bfq_bfqq_budget_left(blocked_bfqq)
  4578. )
  4579. bfqq = blocked_bfqq;
  4580. else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
  4581. (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
  4582. !bfq_bfqq_has_short_ttime(bfqq)))
  4583. bfqq = bfq_choose_bfqq_for_injection(bfqd);
  4584. else
  4585. bfqq = NULL;
  4586. goto keep_queue;
  4587. }
  4588. reason = BFQQE_NO_MORE_REQUESTS;
  4589. expire:
  4590. bfq_bfqq_expire(bfqd, bfqq, false, reason);
  4591. new_queue:
  4592. bfqq = bfq_set_in_service_queue(bfqd);
  4593. if (bfqq) {
  4594. bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
  4595. goto check_queue;
  4596. }
  4597. keep_queue:
  4598. if (bfqq)
  4599. bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
  4600. else
  4601. bfq_log(bfqd, "select_queue: no queue returned");
  4602. return bfqq;
  4603. }
  4604. static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  4605. {
  4606. struct bfq_entity *entity = &bfqq->entity;
  4607. if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
  4608. bfq_log_bfqq(bfqd, bfqq,
  4609. "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
  4610. jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
  4611. jiffies_to_msecs(bfqq->wr_cur_max_time),
  4612. bfqq->wr_coeff,
  4613. bfqq->entity.weight, bfqq->entity.orig_weight);
  4614. if (entity->prio_changed)
  4615. bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
  4616. /*
  4617. * If the queue was activated in a burst, or too much
  4618. * time has elapsed from the beginning of this
  4619. * weight-raising period, then end weight raising.
  4620. */
  4621. if (bfq_bfqq_in_large_burst(bfqq))
  4622. bfq_bfqq_end_wr(bfqq);
  4623. else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
  4624. bfqq->wr_cur_max_time)) {
  4625. if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
  4626. time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
  4627. bfq_wr_duration(bfqd))) {
  4628. /*
  4629. * Either in interactive weight
  4630. * raising, or in soft_rt weight
  4631. * raising with the
  4632. * interactive-weight-raising period
  4633. * elapsed (so no switch back to
  4634. * interactive weight raising).
  4635. */
  4636. bfq_bfqq_end_wr(bfqq);
  4637. } else { /*
  4638. * soft_rt finishing while still in
  4639. * interactive period, switch back to
  4640. * interactive weight raising
  4641. */
  4642. switch_back_to_interactive_wr(bfqq, bfqd);
  4643. bfqq->entity.prio_changed = 1;
  4644. }
  4645. }
  4646. if (bfqq->wr_coeff > 1 &&
  4647. bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
  4648. bfqq->service_from_wr > max_service_from_wr) {
  4649. /* see comments on max_service_from_wr */
  4650. bfq_bfqq_end_wr(bfqq);
  4651. }
  4652. }
  4653. /*
  4654. * To improve latency (for this or other queues), immediately
  4655. * update weight both if it must be raised and if it must be
  4656. * lowered. Since, entity may be on some active tree here, and
  4657. * might have a pending change of its ioprio class, invoke
  4658. * next function with the last parameter unset (see the
  4659. * comments on the function).
  4660. */
  4661. if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
  4662. __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
  4663. entity, false);
  4664. }
  4665. /*
  4666. * Dispatch next request from bfqq.
  4667. */
  4668. static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
  4669. struct bfq_queue *bfqq)
  4670. {
  4671. struct request *rq = bfqq->next_rq;
  4672. unsigned long service_to_charge;
  4673. service_to_charge = bfq_serv_to_charge(rq, bfqq);
  4674. bfq_bfqq_served(bfqq, service_to_charge);
  4675. if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
  4676. bfqd->wait_dispatch = false;
  4677. bfqd->waited_rq = rq;
  4678. }
  4679. bfq_dispatch_remove(bfqd->queue, rq);
  4680. if (bfqq != bfqd->in_service_queue)
  4681. return rq;
  4682. /*
  4683. * If weight raising has to terminate for bfqq, then next
  4684. * function causes an immediate update of bfqq's weight,
  4685. * without waiting for next activation. As a consequence, on
  4686. * expiration, bfqq will be timestamped as if has never been
  4687. * weight-raised during this service slot, even if it has
  4688. * received part or even most of the service as a
  4689. * weight-raised queue. This inflates bfqq's timestamps, which
  4690. * is beneficial, as bfqq is then more willing to leave the
  4691. * device immediately to possible other weight-raised queues.
  4692. */
  4693. bfq_update_wr_data(bfqd, bfqq);
  4694. /*
  4695. * Expire bfqq, pretending that its budget expired, if bfqq
  4696. * belongs to CLASS_IDLE and other queues are waiting for
  4697. * service.
  4698. */
  4699. if (bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq))
  4700. bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
  4701. return rq;
  4702. }
  4703. static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
  4704. {
  4705. struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
  4706. /*
  4707. * Avoiding lock: a race on bfqd->queued should cause at
  4708. * most a call to dispatch for nothing
  4709. */
  4710. return !list_empty_careful(&bfqd->dispatch) ||
  4711. READ_ONCE(bfqd->queued);
  4712. }
  4713. static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
  4714. {
  4715. struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
  4716. struct request *rq = NULL;
  4717. struct bfq_queue *bfqq = NULL;
  4718. if (!list_empty(&bfqd->dispatch)) {
  4719. rq = list_first_entry(&bfqd->dispatch, struct request,
  4720. queuelist);
  4721. list_del_init(&rq->queuelist);
  4722. bfqq = RQ_BFQQ(rq);
  4723. if (bfqq) {
  4724. /*
  4725. * Increment counters here, because this
  4726. * dispatch does not follow the standard
  4727. * dispatch flow (where counters are
  4728. * incremented)
  4729. */
  4730. bfqq->dispatched++;
  4731. goto inc_in_driver_start_rq;
  4732. }
  4733. /*
  4734. * We exploit the bfq_finish_requeue_request hook to
  4735. * decrement tot_rq_in_driver, but
  4736. * bfq_finish_requeue_request will not be invoked on
  4737. * this request. So, to avoid unbalance, just start
  4738. * this request, without incrementing tot_rq_in_driver. As
  4739. * a negative consequence, tot_rq_in_driver is deceptively
  4740. * lower than it should be while this request is in
  4741. * service. This may cause bfq_schedule_dispatch to be
  4742. * invoked uselessly.
  4743. *
  4744. * As for implementing an exact solution, the
  4745. * bfq_finish_requeue_request hook, if defined, is
  4746. * probably invoked also on this request. So, by
  4747. * exploiting this hook, we could 1) increment
  4748. * tot_rq_in_driver here, and 2) decrement it in
  4749. * bfq_finish_requeue_request. Such a solution would
  4750. * let the value of the counter be always accurate,
  4751. * but it would entail using an extra interface
  4752. * function. This cost seems higher than the benefit,
  4753. * being the frequency of non-elevator-private
  4754. * requests very low.
  4755. */
  4756. goto start_rq;
  4757. }
  4758. bfq_log(bfqd, "dispatch requests: %d busy queues",
  4759. bfq_tot_busy_queues(bfqd));
  4760. if (bfq_tot_busy_queues(bfqd) == 0)
  4761. goto exit;
  4762. /*
  4763. * Force device to serve one request at a time if
  4764. * strict_guarantees is true. Forcing this service scheme is
  4765. * currently the ONLY way to guarantee that the request
  4766. * service order enforced by the scheduler is respected by a
  4767. * queueing device. Otherwise the device is free even to make
  4768. * some unlucky request wait for as long as the device
  4769. * wishes.
  4770. *
  4771. * Of course, serving one request at a time may cause loss of
  4772. * throughput.
  4773. */
  4774. if (bfqd->strict_guarantees && bfqd->tot_rq_in_driver > 0)
  4775. goto exit;
  4776. bfqq = bfq_select_queue(bfqd);
  4777. if (!bfqq)
  4778. goto exit;
  4779. rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
  4780. if (rq) {
  4781. inc_in_driver_start_rq:
  4782. bfqd->rq_in_driver[bfqq->actuator_idx]++;
  4783. bfqd->tot_rq_in_driver++;
  4784. start_rq:
  4785. rq->rq_flags |= RQF_STARTED;
  4786. }
  4787. exit:
  4788. return rq;
  4789. }
  4790. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  4791. static void bfq_update_dispatch_stats(struct request_queue *q,
  4792. struct request *rq,
  4793. struct bfq_queue *in_serv_queue,
  4794. bool idle_timer_disabled)
  4795. {
  4796. struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
  4797. if (!idle_timer_disabled && !bfqq)
  4798. return;
  4799. /*
  4800. * rq and bfqq are guaranteed to exist until this function
  4801. * ends, for the following reasons. First, rq can be
  4802. * dispatched to the device, and then can be completed and
  4803. * freed, only after this function ends. Second, rq cannot be
  4804. * merged (and thus freed because of a merge) any longer,
  4805. * because it has already started. Thus rq cannot be freed
  4806. * before this function ends, and, since rq has a reference to
  4807. * bfqq, the same guarantee holds for bfqq too.
  4808. *
  4809. * In addition, the following queue lock guarantees that
  4810. * bfqq_group(bfqq) exists as well.
  4811. */
  4812. spin_lock_irq(&q->queue_lock);
  4813. if (idle_timer_disabled)
  4814. /*
  4815. * Since the idle timer has been disabled,
  4816. * in_serv_queue contained some request when
  4817. * __bfq_dispatch_request was invoked above, which
  4818. * implies that rq was picked exactly from
  4819. * in_serv_queue. Thus in_serv_queue == bfqq, and is
  4820. * therefore guaranteed to exist because of the above
  4821. * arguments.
  4822. */
  4823. bfqg_stats_update_idle_time(bfqq_group(in_serv_queue));
  4824. if (bfqq) {
  4825. struct bfq_group *bfqg = bfqq_group(bfqq);
  4826. bfqg_stats_update_avg_queue_size(bfqg);
  4827. bfqg_stats_set_start_empty_time(bfqg);
  4828. bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
  4829. }
  4830. spin_unlock_irq(&q->queue_lock);
  4831. }
  4832. #else
  4833. static inline void bfq_update_dispatch_stats(struct request_queue *q,
  4834. struct request *rq,
  4835. struct bfq_queue *in_serv_queue,
  4836. bool idle_timer_disabled) {}
  4837. #endif /* CONFIG_BFQ_CGROUP_DEBUG */
  4838. static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
  4839. {
  4840. struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
  4841. struct request *rq;
  4842. struct bfq_queue *in_serv_queue;
  4843. bool waiting_rq, idle_timer_disabled = false;
  4844. spin_lock_irq(&bfqd->lock);
  4845. in_serv_queue = bfqd->in_service_queue;
  4846. waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
  4847. rq = __bfq_dispatch_request(hctx);
  4848. if (in_serv_queue == bfqd->in_service_queue) {
  4849. idle_timer_disabled =
  4850. waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
  4851. }
  4852. spin_unlock_irq(&bfqd->lock);
  4853. bfq_update_dispatch_stats(hctx->queue, rq,
  4854. idle_timer_disabled ? in_serv_queue : NULL,
  4855. idle_timer_disabled);
  4856. return rq;
  4857. }
  4858. /*
  4859. * Task holds one reference to the queue, dropped when task exits. Each rq
  4860. * in-flight on this queue also holds a reference, dropped when rq is freed.
  4861. *
  4862. * Scheduler lock must be held here. Recall not to use bfqq after calling
  4863. * this function on it.
  4864. */
  4865. void bfq_put_queue(struct bfq_queue *bfqq)
  4866. {
  4867. struct bfq_queue *item;
  4868. struct hlist_node *n;
  4869. struct bfq_group *bfqg = bfqq_group(bfqq);
  4870. bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
  4871. bfqq->ref--;
  4872. if (bfqq->ref)
  4873. return;
  4874. if (!hlist_unhashed(&bfqq->burst_list_node)) {
  4875. hlist_del_init(&bfqq->burst_list_node);
  4876. /*
  4877. * Decrement also burst size after the removal, if the
  4878. * process associated with bfqq is exiting, and thus
  4879. * does not contribute to the burst any longer. This
  4880. * decrement helps filter out false positives of large
  4881. * bursts, when some short-lived process (often due to
  4882. * the execution of commands by some service) happens
  4883. * to start and exit while a complex application is
  4884. * starting, and thus spawning several processes that
  4885. * do I/O (and that *must not* be treated as a large
  4886. * burst, see comments on bfq_handle_burst).
  4887. *
  4888. * In particular, the decrement is performed only if:
  4889. * 1) bfqq is not a merged queue, because, if it is,
  4890. * then this free of bfqq is not triggered by the exit
  4891. * of the process bfqq is associated with, but exactly
  4892. * by the fact that bfqq has just been merged.
  4893. * 2) burst_size is greater than 0, to handle
  4894. * unbalanced decrements. Unbalanced decrements may
  4895. * happen in te following case: bfqq is inserted into
  4896. * the current burst list--without incrementing
  4897. * bust_size--because of a split, but the current
  4898. * burst list is not the burst list bfqq belonged to
  4899. * (see comments on the case of a split in
  4900. * bfq_set_request).
  4901. */
  4902. if (bfqq->bic && bfqq->bfqd->burst_size > 0)
  4903. bfqq->bfqd->burst_size--;
  4904. }
  4905. /*
  4906. * bfqq does not exist any longer, so it cannot be woken by
  4907. * any other queue, and cannot wake any other queue. Then bfqq
  4908. * must be removed from the woken list of its possible waker
  4909. * queue, and all queues in the woken list of bfqq must stop
  4910. * having a waker queue. Strictly speaking, these updates
  4911. * should be performed when bfqq remains with no I/O source
  4912. * attached to it, which happens before bfqq gets freed. In
  4913. * particular, this happens when the last process associated
  4914. * with bfqq exits or gets associated with a different
  4915. * queue. However, both events lead to bfqq being freed soon,
  4916. * and dangling references would come out only after bfqq gets
  4917. * freed. So these updates are done here, as a simple and safe
  4918. * way to handle all cases.
  4919. */
  4920. /* remove bfqq from woken list */
  4921. if (!hlist_unhashed(&bfqq->woken_list_node))
  4922. hlist_del_init(&bfqq->woken_list_node);
  4923. /* reset waker for all queues in woken list */
  4924. hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
  4925. woken_list_node) {
  4926. item->waker_bfqq = NULL;
  4927. hlist_del_init(&item->woken_list_node);
  4928. }
  4929. if (bfqq->bfqd->last_completed_rq_bfqq == bfqq)
  4930. bfqq->bfqd->last_completed_rq_bfqq = NULL;
  4931. WARN_ON_ONCE(!list_empty(&bfqq->fifo));
  4932. WARN_ON_ONCE(!RB_EMPTY_ROOT(&bfqq->sort_list));
  4933. WARN_ON_ONCE(bfqq->dispatched);
  4934. kmem_cache_free(bfq_pool, bfqq);
  4935. bfqg_and_blkg_put(bfqg);
  4936. }
  4937. static void bfq_put_stable_ref(struct bfq_queue *bfqq)
  4938. {
  4939. bfqq->stable_ref--;
  4940. bfq_put_queue(bfqq);
  4941. }
  4942. void bfq_put_cooperator(struct bfq_queue *bfqq)
  4943. {
  4944. struct bfq_queue *__bfqq, *next;
  4945. /*
  4946. * If this queue was scheduled to merge with another queue, be
  4947. * sure to drop the reference taken on that queue (and others in
  4948. * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
  4949. */
  4950. __bfqq = bfqq->new_bfqq;
  4951. while (__bfqq) {
  4952. next = __bfqq->new_bfqq;
  4953. bfq_put_queue(__bfqq);
  4954. __bfqq = next;
  4955. }
  4956. }
  4957. static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  4958. {
  4959. if (bfqq == bfqd->in_service_queue) {
  4960. __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT);
  4961. bfq_schedule_dispatch(bfqd);
  4962. }
  4963. bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
  4964. bfq_put_cooperator(bfqq);
  4965. bfq_release_process_ref(bfqd, bfqq);
  4966. }
  4967. static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync,
  4968. unsigned int actuator_idx)
  4969. {
  4970. struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync, actuator_idx);
  4971. struct bfq_data *bfqd;
  4972. if (bfqq)
  4973. bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
  4974. if (bfqq && bfqd) {
  4975. bic_set_bfqq(bic, NULL, is_sync, actuator_idx);
  4976. bfq_exit_bfqq(bfqd, bfqq);
  4977. }
  4978. }
  4979. static void _bfq_exit_icq(struct bfq_io_cq *bic, unsigned int num_actuators)
  4980. {
  4981. struct bfq_iocq_bfqq_data *bfqq_data = bic->bfqq_data;
  4982. unsigned int act_idx;
  4983. for (act_idx = 0; act_idx < num_actuators; act_idx++) {
  4984. if (bfqq_data[act_idx].stable_merge_bfqq)
  4985. bfq_put_stable_ref(bfqq_data[act_idx].stable_merge_bfqq);
  4986. bfq_exit_icq_bfqq(bic, true, act_idx);
  4987. bfq_exit_icq_bfqq(bic, false, act_idx);
  4988. }
  4989. }
  4990. static void bfq_exit_icq(struct io_cq *icq)
  4991. {
  4992. struct bfq_io_cq *bic = icq_to_bic(icq);
  4993. struct bfq_data *bfqd = bic_to_bfqd(bic);
  4994. unsigned long flags;
  4995. /*
  4996. * If bfqd and thus bfqd->num_actuators is not available any
  4997. * longer, then cycle over all possible per-actuator bfqqs in
  4998. * next loop. We rely on bic being zeroed on creation, and
  4999. * therefore on its unused per-actuator fields being NULL.
  5000. *
  5001. * bfqd is NULL if scheduler already exited, and in that case
  5002. * this is the last time these queues are accessed.
  5003. */
  5004. if (bfqd) {
  5005. spin_lock_irqsave(&bfqd->lock, flags);
  5006. _bfq_exit_icq(bic, bfqd->num_actuators);
  5007. spin_unlock_irqrestore(&bfqd->lock, flags);
  5008. } else {
  5009. _bfq_exit_icq(bic, BFQ_MAX_ACTUATORS);
  5010. }
  5011. }
  5012. /*
  5013. * Update the entity prio values; note that the new values will not
  5014. * be used until the next (re)activation.
  5015. */
  5016. static void
  5017. bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
  5018. {
  5019. struct task_struct *tsk = current;
  5020. int ioprio_class;
  5021. struct bfq_data *bfqd = bfqq->bfqd;
  5022. if (!bfqd)
  5023. return;
  5024. ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
  5025. switch (ioprio_class) {
  5026. default:
  5027. pr_err("bdi %s: bfq: bad prio class %d\n",
  5028. bdi_dev_name(bfqq->bfqd->queue->disk->bdi),
  5029. ioprio_class);
  5030. fallthrough;
  5031. case IOPRIO_CLASS_NONE:
  5032. /*
  5033. * No prio set, inherit CPU scheduling settings.
  5034. */
  5035. bfqq->new_ioprio = task_nice_ioprio(tsk);
  5036. bfqq->new_ioprio_class = task_nice_ioclass(tsk);
  5037. break;
  5038. case IOPRIO_CLASS_RT:
  5039. bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio);
  5040. bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
  5041. break;
  5042. case IOPRIO_CLASS_BE:
  5043. bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio);
  5044. bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
  5045. break;
  5046. case IOPRIO_CLASS_IDLE:
  5047. bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
  5048. bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1;
  5049. break;
  5050. }
  5051. if (bfqq->new_ioprio >= IOPRIO_NR_LEVELS) {
  5052. pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
  5053. bfqq->new_ioprio);
  5054. bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1;
  5055. }
  5056. bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
  5057. bfq_log_bfqq(bfqd, bfqq, "new_ioprio %d new_weight %d",
  5058. bfqq->new_ioprio, bfqq->entity.new_weight);
  5059. bfqq->entity.prio_changed = 1;
  5060. }
  5061. static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
  5062. struct bio *bio, bool is_sync,
  5063. struct bfq_io_cq *bic,
  5064. bool respawn);
  5065. static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
  5066. {
  5067. struct bfq_data *bfqd = bic_to_bfqd(bic);
  5068. struct bfq_queue *bfqq;
  5069. int ioprio = bic->icq.ioc->ioprio;
  5070. /*
  5071. * This condition may trigger on a newly created bic, be sure to
  5072. * drop the lock before returning.
  5073. */
  5074. if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
  5075. return;
  5076. bic->ioprio = ioprio;
  5077. bfqq = bic_to_bfqq(bic, false, bfq_actuator_index(bfqd, bio));
  5078. if (bfqq) {
  5079. struct bfq_queue *old_bfqq = bfqq;
  5080. bfqq = bfq_get_queue(bfqd, bio, false, bic, true);
  5081. bic_set_bfqq(bic, bfqq, false, bfq_actuator_index(bfqd, bio));
  5082. bfq_release_process_ref(bfqd, old_bfqq);
  5083. }
  5084. bfqq = bic_to_bfqq(bic, true, bfq_actuator_index(bfqd, bio));
  5085. if (bfqq)
  5086. bfq_set_next_ioprio_data(bfqq, bic);
  5087. }
  5088. static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  5089. struct bfq_io_cq *bic, pid_t pid, int is_sync,
  5090. unsigned int act_idx)
  5091. {
  5092. u64 now_ns = blk_time_get_ns();
  5093. bfqq->actuator_idx = act_idx;
  5094. RB_CLEAR_NODE(&bfqq->entity.rb_node);
  5095. INIT_LIST_HEAD(&bfqq->fifo);
  5096. INIT_HLIST_NODE(&bfqq->burst_list_node);
  5097. INIT_HLIST_NODE(&bfqq->woken_list_node);
  5098. INIT_HLIST_HEAD(&bfqq->woken_list);
  5099. bfqq->ref = 0;
  5100. bfqq->bfqd = bfqd;
  5101. if (bic)
  5102. bfq_set_next_ioprio_data(bfqq, bic);
  5103. if (is_sync) {
  5104. /*
  5105. * No need to mark as has_short_ttime if in
  5106. * idle_class, because no device idling is performed
  5107. * for queues in idle class
  5108. */
  5109. if (!bfq_class_idle(bfqq))
  5110. /* tentatively mark as has_short_ttime */
  5111. bfq_mark_bfqq_has_short_ttime(bfqq);
  5112. bfq_mark_bfqq_sync(bfqq);
  5113. bfq_mark_bfqq_just_created(bfqq);
  5114. } else
  5115. bfq_clear_bfqq_sync(bfqq);
  5116. /* set end request to minus infinity from now */
  5117. bfqq->ttime.last_end_request = now_ns + 1;
  5118. bfqq->creation_time = jiffies;
  5119. bfqq->io_start_time = now_ns;
  5120. bfq_mark_bfqq_IO_bound(bfqq);
  5121. bfqq->pid = pid;
  5122. /* Tentative initial value to trade off between thr and lat */
  5123. bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
  5124. bfqq->budget_timeout = bfq_smallest_from_now();
  5125. bfqq->wr_coeff = 1;
  5126. bfqq->last_wr_start_finish = jiffies;
  5127. bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
  5128. bfqq->split_time = bfq_smallest_from_now();
  5129. /*
  5130. * To not forget the possibly high bandwidth consumed by a
  5131. * process/queue in the recent past,
  5132. * bfq_bfqq_softrt_next_start() returns a value at least equal
  5133. * to the current value of bfqq->soft_rt_next_start (see
  5134. * comments on bfq_bfqq_softrt_next_start). Set
  5135. * soft_rt_next_start to now, to mean that bfqq has consumed
  5136. * no bandwidth so far.
  5137. */
  5138. bfqq->soft_rt_next_start = jiffies;
  5139. /* first request is almost certainly seeky */
  5140. bfqq->seek_history = 1;
  5141. bfqq->decrease_time_jif = jiffies;
  5142. }
  5143. static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
  5144. struct bfq_group *bfqg,
  5145. int ioprio_class, int ioprio, int act_idx)
  5146. {
  5147. switch (ioprio_class) {
  5148. case IOPRIO_CLASS_RT:
  5149. return &bfqg->async_bfqq[0][ioprio][act_idx];
  5150. case IOPRIO_CLASS_NONE:
  5151. ioprio = IOPRIO_BE_NORM;
  5152. fallthrough;
  5153. case IOPRIO_CLASS_BE:
  5154. return &bfqg->async_bfqq[1][ioprio][act_idx];
  5155. case IOPRIO_CLASS_IDLE:
  5156. return &bfqg->async_idle_bfqq[act_idx];
  5157. default:
  5158. return NULL;
  5159. }
  5160. }
  5161. static struct bfq_queue *
  5162. bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  5163. struct bfq_io_cq *bic,
  5164. struct bfq_queue *last_bfqq_created)
  5165. {
  5166. unsigned int a_idx = last_bfqq_created->actuator_idx;
  5167. struct bfq_queue *new_bfqq =
  5168. bfq_setup_merge(bfqq, last_bfqq_created);
  5169. if (!new_bfqq)
  5170. return bfqq;
  5171. if (new_bfqq->bic)
  5172. new_bfqq->bic->bfqq_data[a_idx].stably_merged = true;
  5173. bic->bfqq_data[a_idx].stably_merged = true;
  5174. /*
  5175. * Reusing merge functions. This implies that
  5176. * bfqq->bic must be set too, for
  5177. * bfq_merge_bfqqs to correctly save bfqq's
  5178. * state before killing it.
  5179. */
  5180. bfqq->bic = bic;
  5181. return bfq_merge_bfqqs(bfqd, bic, bfqq);
  5182. }
  5183. /*
  5184. * Many throughput-sensitive workloads are made of several parallel
  5185. * I/O flows, with all flows generated by the same application, or
  5186. * more generically by the same task (e.g., system boot). The most
  5187. * counterproductive action with these workloads is plugging I/O
  5188. * dispatch when one of the bfq_queues associated with these flows
  5189. * remains temporarily empty.
  5190. *
  5191. * To avoid this plugging, BFQ has been using a burst-handling
  5192. * mechanism for years now. This mechanism has proven effective for
  5193. * throughput, and not detrimental for service guarantees. The
  5194. * following function pushes this mechanism a little bit further,
  5195. * basing on the following two facts.
  5196. *
  5197. * First, all the I/O flows of a the same application or task
  5198. * contribute to the execution/completion of that common application
  5199. * or task. So the performance figures that matter are total
  5200. * throughput of the flows and task-wide I/O latency. In particular,
  5201. * these flows do not need to be protected from each other, in terms
  5202. * of individual bandwidth or latency.
  5203. *
  5204. * Second, the above fact holds regardless of the number of flows.
  5205. *
  5206. * Putting these two facts together, this commits merges stably the
  5207. * bfq_queues associated with these I/O flows, i.e., with the
  5208. * processes that generate these IO/ flows, regardless of how many the
  5209. * involved processes are.
  5210. *
  5211. * To decide whether a set of bfq_queues is actually associated with
  5212. * the I/O flows of a common application or task, and to merge these
  5213. * queues stably, this function operates as follows: given a bfq_queue,
  5214. * say Q2, currently being created, and the last bfq_queue, say Q1,
  5215. * created before Q2, Q2 is merged stably with Q1 if
  5216. * - very little time has elapsed since when Q1 was created
  5217. * - Q2 has the same ioprio as Q1
  5218. * - Q2 belongs to the same group as Q1
  5219. *
  5220. * Merging bfq_queues also reduces scheduling overhead. A fio test
  5221. * with ten random readers on /dev/nullb shows a throughput boost of
  5222. * 40%, with a quadcore. Since BFQ's execution time amounts to ~50% of
  5223. * the total per-request processing time, the above throughput boost
  5224. * implies that BFQ's overhead is reduced by more than 50%.
  5225. *
  5226. * This new mechanism most certainly obsoletes the current
  5227. * burst-handling heuristics. We keep those heuristics for the moment.
  5228. */
  5229. static struct bfq_queue *bfq_do_or_sched_stable_merge(struct bfq_data *bfqd,
  5230. struct bfq_queue *bfqq,
  5231. struct bfq_io_cq *bic)
  5232. {
  5233. struct bfq_queue **source_bfqq = bfqq->entity.parent ?
  5234. &bfqq->entity.parent->last_bfqq_created :
  5235. &bfqd->last_bfqq_created;
  5236. struct bfq_queue *last_bfqq_created = *source_bfqq;
  5237. /*
  5238. * If last_bfqq_created has not been set yet, then init it. If
  5239. * it has been set already, but too long ago, then move it
  5240. * forward to bfqq. Finally, move also if bfqq belongs to a
  5241. * different group than last_bfqq_created, or if bfqq has a
  5242. * different ioprio, ioprio_class or actuator_idx. If none of
  5243. * these conditions holds true, then try an early stable merge
  5244. * or schedule a delayed stable merge. As for the condition on
  5245. * actuator_idx, the reason is that, if queues associated with
  5246. * different actuators are merged, then control is lost on
  5247. * each actuator. Therefore some actuator may be
  5248. * underutilized, and throughput may decrease.
  5249. *
  5250. * A delayed merge is scheduled (instead of performing an
  5251. * early merge), in case bfqq might soon prove to be more
  5252. * throughput-beneficial if not merged. Currently this is
  5253. * possible only if bfqd is rotational with no queueing. For
  5254. * such a drive, not merging bfqq is better for throughput if
  5255. * bfqq happens to contain sequential I/O. So, we wait a
  5256. * little bit for enough I/O to flow through bfqq. After that,
  5257. * if such an I/O is sequential, then the merge is
  5258. * canceled. Otherwise the merge is finally performed.
  5259. */
  5260. if (!last_bfqq_created ||
  5261. time_before(last_bfqq_created->creation_time +
  5262. msecs_to_jiffies(bfq_activation_stable_merging),
  5263. bfqq->creation_time) ||
  5264. bfqq->entity.parent != last_bfqq_created->entity.parent ||
  5265. bfqq->ioprio != last_bfqq_created->ioprio ||
  5266. bfqq->ioprio_class != last_bfqq_created->ioprio_class ||
  5267. bfqq->actuator_idx != last_bfqq_created->actuator_idx)
  5268. *source_bfqq = bfqq;
  5269. else if (time_after_eq(last_bfqq_created->creation_time +
  5270. bfqd->bfq_burst_interval,
  5271. bfqq->creation_time)) {
  5272. if (likely(bfqd->nonrot_with_queueing))
  5273. /*
  5274. * With this type of drive, leaving
  5275. * bfqq alone may provide no
  5276. * throughput benefits compared with
  5277. * merging bfqq. So merge bfqq now.
  5278. */
  5279. bfqq = bfq_do_early_stable_merge(bfqd, bfqq,
  5280. bic,
  5281. last_bfqq_created);
  5282. else { /* schedule tentative stable merge */
  5283. /*
  5284. * get reference on last_bfqq_created,
  5285. * to prevent it from being freed,
  5286. * until we decide whether to merge
  5287. */
  5288. last_bfqq_created->ref++;
  5289. /*
  5290. * need to keep track of stable refs, to
  5291. * compute process refs correctly
  5292. */
  5293. last_bfqq_created->stable_ref++;
  5294. /*
  5295. * Record the bfqq to merge to.
  5296. */
  5297. bic->bfqq_data[last_bfqq_created->actuator_idx].stable_merge_bfqq =
  5298. last_bfqq_created;
  5299. }
  5300. }
  5301. return bfqq;
  5302. }
  5303. static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
  5304. struct bio *bio, bool is_sync,
  5305. struct bfq_io_cq *bic,
  5306. bool respawn)
  5307. {
  5308. const int ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio);
  5309. const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
  5310. struct bfq_queue **async_bfqq = NULL;
  5311. struct bfq_queue *bfqq;
  5312. struct bfq_group *bfqg;
  5313. bfqg = bfq_bio_bfqg(bfqd, bio);
  5314. if (!is_sync) {
  5315. async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
  5316. ioprio,
  5317. bfq_actuator_index(bfqd, bio));
  5318. bfqq = *async_bfqq;
  5319. if (bfqq)
  5320. goto out;
  5321. }
  5322. bfqq = kmem_cache_alloc_node(bfq_pool,
  5323. GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
  5324. bfqd->queue->node);
  5325. if (bfqq) {
  5326. bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
  5327. is_sync, bfq_actuator_index(bfqd, bio));
  5328. bfq_init_entity(&bfqq->entity, bfqg);
  5329. bfq_log_bfqq(bfqd, bfqq, "allocated");
  5330. } else {
  5331. bfqq = &bfqd->oom_bfqq;
  5332. bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
  5333. goto out;
  5334. }
  5335. /*
  5336. * Pin the queue now that it's allocated, scheduler exit will
  5337. * prune it.
  5338. */
  5339. if (async_bfqq) {
  5340. bfqq->ref++; /*
  5341. * Extra group reference, w.r.t. sync
  5342. * queue. This extra reference is removed
  5343. * only if bfqq->bfqg disappears, to
  5344. * guarantee that this queue is not freed
  5345. * until its group goes away.
  5346. */
  5347. bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
  5348. bfqq, bfqq->ref);
  5349. *async_bfqq = bfqq;
  5350. }
  5351. out:
  5352. bfqq->ref++; /* get a process reference to this queue */
  5353. if (bfqq != &bfqd->oom_bfqq && is_sync && !respawn)
  5354. bfqq = bfq_do_or_sched_stable_merge(bfqd, bfqq, bic);
  5355. return bfqq;
  5356. }
  5357. static void bfq_update_io_thinktime(struct bfq_data *bfqd,
  5358. struct bfq_queue *bfqq)
  5359. {
  5360. struct bfq_ttime *ttime = &bfqq->ttime;
  5361. u64 elapsed;
  5362. /*
  5363. * We are really interested in how long it takes for the queue to
  5364. * become busy when there is no outstanding IO for this queue. So
  5365. * ignore cases when the bfq queue has already IO queued.
  5366. */
  5367. if (bfqq->dispatched || bfq_bfqq_busy(bfqq))
  5368. return;
  5369. elapsed = blk_time_get_ns() - bfqq->ttime.last_end_request;
  5370. elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
  5371. ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
  5372. ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
  5373. ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
  5374. ttime->ttime_samples);
  5375. }
  5376. static void
  5377. bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  5378. struct request *rq)
  5379. {
  5380. bfqq->seek_history <<= 1;
  5381. bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
  5382. if (bfqq->wr_coeff > 1 &&
  5383. bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
  5384. BFQQ_TOTALLY_SEEKY(bfqq)) {
  5385. if (time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
  5386. bfq_wr_duration(bfqd))) {
  5387. /*
  5388. * In soft_rt weight raising with the
  5389. * interactive-weight-raising period
  5390. * elapsed (so no switch back to
  5391. * interactive weight raising).
  5392. */
  5393. bfq_bfqq_end_wr(bfqq);
  5394. } else { /*
  5395. * stopping soft_rt weight raising
  5396. * while still in interactive period,
  5397. * switch back to interactive weight
  5398. * raising
  5399. */
  5400. switch_back_to_interactive_wr(bfqq, bfqd);
  5401. bfqq->entity.prio_changed = 1;
  5402. }
  5403. }
  5404. }
  5405. static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
  5406. struct bfq_queue *bfqq,
  5407. struct bfq_io_cq *bic)
  5408. {
  5409. bool has_short_ttime = true, state_changed;
  5410. /*
  5411. * No need to update has_short_ttime if bfqq is async or in
  5412. * idle io prio class, or if bfq_slice_idle is zero, because
  5413. * no device idling is performed for bfqq in this case.
  5414. */
  5415. if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
  5416. bfqd->bfq_slice_idle == 0)
  5417. return;
  5418. /* Idle window just restored, statistics are meaningless. */
  5419. if (time_is_after_eq_jiffies(bfqq->split_time +
  5420. bfqd->bfq_wr_min_idle_time))
  5421. return;
  5422. /* Think time is infinite if no process is linked to
  5423. * bfqq. Otherwise check average think time to decide whether
  5424. * to mark as has_short_ttime. To this goal, compare average
  5425. * think time with half the I/O-plugging timeout.
  5426. */
  5427. if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
  5428. (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
  5429. bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle>>1))
  5430. has_short_ttime = false;
  5431. state_changed = has_short_ttime != bfq_bfqq_has_short_ttime(bfqq);
  5432. if (has_short_ttime)
  5433. bfq_mark_bfqq_has_short_ttime(bfqq);
  5434. else
  5435. bfq_clear_bfqq_has_short_ttime(bfqq);
  5436. /*
  5437. * Until the base value for the total service time gets
  5438. * finally computed for bfqq, the inject limit does depend on
  5439. * the think-time state (short|long). In particular, the limit
  5440. * is 0 or 1 if the think time is deemed, respectively, as
  5441. * short or long (details in the comments in
  5442. * bfq_update_inject_limit()). Accordingly, the next
  5443. * instructions reset the inject limit if the think-time state
  5444. * has changed and the above base value is still to be
  5445. * computed.
  5446. *
  5447. * However, the reset is performed only if more than 100 ms
  5448. * have elapsed since the last update of the inject limit, or
  5449. * (inclusive) if the change is from short to long think
  5450. * time. The reason for this waiting is as follows.
  5451. *
  5452. * bfqq may have a long think time because of a
  5453. * synchronization with some other queue, i.e., because the
  5454. * I/O of some other queue may need to be completed for bfqq
  5455. * to receive new I/O. Details in the comments on the choice
  5456. * of the queue for injection in bfq_select_queue().
  5457. *
  5458. * As stressed in those comments, if such a synchronization is
  5459. * actually in place, then, without injection on bfqq, the
  5460. * blocking I/O cannot happen to served while bfqq is in
  5461. * service. As a consequence, if bfqq is granted
  5462. * I/O-dispatch-plugging, then bfqq remains empty, and no I/O
  5463. * is dispatched, until the idle timeout fires. This is likely
  5464. * to result in lower bandwidth and higher latencies for bfqq,
  5465. * and in a severe loss of total throughput.
  5466. *
  5467. * On the opposite end, a non-zero inject limit may allow the
  5468. * I/O that blocks bfqq to be executed soon, and therefore
  5469. * bfqq to receive new I/O soon.
  5470. *
  5471. * But, if the blocking gets actually eliminated, then the
  5472. * next think-time sample for bfqq may be very low. This in
  5473. * turn may cause bfqq's think time to be deemed
  5474. * short. Without the 100 ms barrier, this new state change
  5475. * would cause the body of the next if to be executed
  5476. * immediately. But this would set to 0 the inject
  5477. * limit. Without injection, the blocking I/O would cause the
  5478. * think time of bfqq to become long again, and therefore the
  5479. * inject limit to be raised again, and so on. The only effect
  5480. * of such a steady oscillation between the two think-time
  5481. * states would be to prevent effective injection on bfqq.
  5482. *
  5483. * In contrast, if the inject limit is not reset during such a
  5484. * long time interval as 100 ms, then the number of short
  5485. * think time samples can grow significantly before the reset
  5486. * is performed. As a consequence, the think time state can
  5487. * become stable before the reset. Therefore there will be no
  5488. * state change when the 100 ms elapse, and no reset of the
  5489. * inject limit. The inject limit remains steadily equal to 1
  5490. * both during and after the 100 ms. So injection can be
  5491. * performed at all times, and throughput gets boosted.
  5492. *
  5493. * An inject limit equal to 1 is however in conflict, in
  5494. * general, with the fact that the think time of bfqq is
  5495. * short, because injection may be likely to delay bfqq's I/O
  5496. * (as explained in the comments in
  5497. * bfq_update_inject_limit()). But this does not happen in
  5498. * this special case, because bfqq's low think time is due to
  5499. * an effective handling of a synchronization, through
  5500. * injection. In this special case, bfqq's I/O does not get
  5501. * delayed by injection; on the contrary, bfqq's I/O is
  5502. * brought forward, because it is not blocked for
  5503. * milliseconds.
  5504. *
  5505. * In addition, serving the blocking I/O much sooner, and much
  5506. * more frequently than once per I/O-plugging timeout, makes
  5507. * it much quicker to detect a waker queue (the concept of
  5508. * waker queue is defined in the comments in
  5509. * bfq_add_request()). This makes it possible to start sooner
  5510. * to boost throughput more effectively, by injecting the I/O
  5511. * of the waker queue unconditionally on every
  5512. * bfq_dispatch_request().
  5513. *
  5514. * One last, important benefit of not resetting the inject
  5515. * limit before 100 ms is that, during this time interval, the
  5516. * base value for the total service time is likely to get
  5517. * finally computed for bfqq, freeing the inject limit from
  5518. * its relation with the think time.
  5519. */
  5520. if (state_changed && bfqq->last_serv_time_ns == 0 &&
  5521. (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
  5522. msecs_to_jiffies(100)) ||
  5523. !has_short_ttime))
  5524. bfq_reset_inject_limit(bfqd, bfqq);
  5525. }
  5526. /*
  5527. * Called when a new fs request (rq) is added to bfqq. Check if there's
  5528. * something we should do about it.
  5529. */
  5530. static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  5531. struct request *rq)
  5532. {
  5533. if (rq->cmd_flags & REQ_META)
  5534. bfqq->meta_pending++;
  5535. bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
  5536. if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
  5537. bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
  5538. blk_rq_sectors(rq) < 32;
  5539. bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
  5540. /*
  5541. * There is just this request queued: if
  5542. * - the request is small, and
  5543. * - we are idling to boost throughput, and
  5544. * - the queue is not to be expired,
  5545. * then just exit.
  5546. *
  5547. * In this way, if the device is being idled to wait
  5548. * for a new request from the in-service queue, we
  5549. * avoid unplugging the device and committing the
  5550. * device to serve just a small request. In contrast
  5551. * we wait for the block layer to decide when to
  5552. * unplug the device: hopefully, new requests will be
  5553. * merged to this one quickly, then the device will be
  5554. * unplugged and larger requests will be dispatched.
  5555. */
  5556. if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) &&
  5557. !budget_timeout)
  5558. return;
  5559. /*
  5560. * A large enough request arrived, or idling is being
  5561. * performed to preserve service guarantees, or
  5562. * finally the queue is to be expired: in all these
  5563. * cases disk idling is to be stopped, so clear
  5564. * wait_request flag and reset timer.
  5565. */
  5566. bfq_clear_bfqq_wait_request(bfqq);
  5567. hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
  5568. /*
  5569. * The queue is not empty, because a new request just
  5570. * arrived. Hence we can safely expire the queue, in
  5571. * case of budget timeout, without risking that the
  5572. * timestamps of the queue are not updated correctly.
  5573. * See [1] for more details.
  5574. */
  5575. if (budget_timeout)
  5576. bfq_bfqq_expire(bfqd, bfqq, false,
  5577. BFQQE_BUDGET_TIMEOUT);
  5578. }
  5579. }
  5580. static void bfqq_request_allocated(struct bfq_queue *bfqq)
  5581. {
  5582. struct bfq_entity *entity = &bfqq->entity;
  5583. for_each_entity(entity)
  5584. entity->allocated++;
  5585. }
  5586. static void bfqq_request_freed(struct bfq_queue *bfqq)
  5587. {
  5588. struct bfq_entity *entity = &bfqq->entity;
  5589. for_each_entity(entity)
  5590. entity->allocated--;
  5591. }
  5592. /* returns true if it causes the idle timer to be disabled */
  5593. static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
  5594. {
  5595. struct bfq_queue *bfqq = RQ_BFQQ(rq),
  5596. *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true,
  5597. RQ_BIC(rq));
  5598. bool waiting, idle_timer_disabled = false;
  5599. if (new_bfqq) {
  5600. struct bfq_queue *old_bfqq = bfqq;
  5601. /*
  5602. * Release the request's reference to the old bfqq
  5603. * and make sure one is taken to the shared queue.
  5604. */
  5605. bfqq_request_allocated(new_bfqq);
  5606. bfqq_request_freed(bfqq);
  5607. new_bfqq->ref++;
  5608. /*
  5609. * If the bic associated with the process
  5610. * issuing this request still points to bfqq
  5611. * (and thus has not been already redirected
  5612. * to new_bfqq or even some other bfq_queue),
  5613. * then complete the merge and redirect it to
  5614. * new_bfqq.
  5615. */
  5616. if (bic_to_bfqq(RQ_BIC(rq), true,
  5617. bfq_actuator_index(bfqd, rq->bio)) == bfqq) {
  5618. while (bfqq != new_bfqq)
  5619. bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq);
  5620. }
  5621. bfq_clear_bfqq_just_created(old_bfqq);
  5622. /*
  5623. * rq is about to be enqueued into new_bfqq,
  5624. * release rq reference on bfqq
  5625. */
  5626. bfq_put_queue(old_bfqq);
  5627. rq->elv.priv[1] = new_bfqq;
  5628. }
  5629. bfq_update_io_thinktime(bfqd, bfqq);
  5630. bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
  5631. bfq_update_io_seektime(bfqd, bfqq, rq);
  5632. waiting = bfqq && bfq_bfqq_wait_request(bfqq);
  5633. bfq_add_request(rq);
  5634. idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
  5635. rq->fifo_time = blk_time_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
  5636. list_add_tail(&rq->queuelist, &bfqq->fifo);
  5637. bfq_rq_enqueued(bfqd, bfqq, rq);
  5638. return idle_timer_disabled;
  5639. }
  5640. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  5641. static void bfq_update_insert_stats(struct request_queue *q,
  5642. struct bfq_queue *bfqq,
  5643. bool idle_timer_disabled,
  5644. blk_opf_t cmd_flags)
  5645. {
  5646. if (!bfqq)
  5647. return;
  5648. /*
  5649. * bfqq still exists, because it can disappear only after
  5650. * either it is merged with another queue, or the process it
  5651. * is associated with exits. But both actions must be taken by
  5652. * the same process currently executing this flow of
  5653. * instructions.
  5654. *
  5655. * In addition, the following queue lock guarantees that
  5656. * bfqq_group(bfqq) exists as well.
  5657. */
  5658. spin_lock_irq(&q->queue_lock);
  5659. bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
  5660. if (idle_timer_disabled)
  5661. bfqg_stats_update_idle_time(bfqq_group(bfqq));
  5662. spin_unlock_irq(&q->queue_lock);
  5663. }
  5664. #else
  5665. static inline void bfq_update_insert_stats(struct request_queue *q,
  5666. struct bfq_queue *bfqq,
  5667. bool idle_timer_disabled,
  5668. blk_opf_t cmd_flags) {}
  5669. #endif /* CONFIG_BFQ_CGROUP_DEBUG */
  5670. static struct bfq_queue *bfq_init_rq(struct request *rq);
  5671. static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  5672. blk_insert_t flags)
  5673. {
  5674. struct request_queue *q = hctx->queue;
  5675. struct bfq_data *bfqd = q->elevator->elevator_data;
  5676. struct bfq_queue *bfqq;
  5677. bool idle_timer_disabled = false;
  5678. blk_opf_t cmd_flags;
  5679. LIST_HEAD(free);
  5680. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  5681. if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
  5682. bfqg_stats_update_legacy_io(q, rq);
  5683. #endif
  5684. spin_lock_irq(&bfqd->lock);
  5685. bfqq = bfq_init_rq(rq);
  5686. if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
  5687. spin_unlock_irq(&bfqd->lock);
  5688. blk_mq_free_requests(&free);
  5689. return;
  5690. }
  5691. trace_block_rq_insert(rq);
  5692. if (flags & BLK_MQ_INSERT_AT_HEAD) {
  5693. list_add(&rq->queuelist, &bfqd->dispatch);
  5694. } else if (!bfqq) {
  5695. list_add_tail(&rq->queuelist, &bfqd->dispatch);
  5696. } else {
  5697. idle_timer_disabled = __bfq_insert_request(bfqd, rq);
  5698. /*
  5699. * Update bfqq, because, if a queue merge has occurred
  5700. * in __bfq_insert_request, then rq has been
  5701. * redirected into a new queue.
  5702. */
  5703. bfqq = RQ_BFQQ(rq);
  5704. if (rq_mergeable(rq)) {
  5705. elv_rqhash_add(q, rq);
  5706. if (!q->last_merge)
  5707. q->last_merge = rq;
  5708. }
  5709. }
  5710. /*
  5711. * Cache cmd_flags before releasing scheduler lock, because rq
  5712. * may disappear afterwards (for example, because of a request
  5713. * merge).
  5714. */
  5715. cmd_flags = rq->cmd_flags;
  5716. spin_unlock_irq(&bfqd->lock);
  5717. bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
  5718. cmd_flags);
  5719. }
  5720. static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
  5721. struct list_head *list,
  5722. blk_insert_t flags)
  5723. {
  5724. while (!list_empty(list)) {
  5725. struct request *rq;
  5726. rq = list_first_entry(list, struct request, queuelist);
  5727. list_del_init(&rq->queuelist);
  5728. bfq_insert_request(hctx, rq, flags);
  5729. }
  5730. }
  5731. static void bfq_update_hw_tag(struct bfq_data *bfqd)
  5732. {
  5733. struct bfq_queue *bfqq = bfqd->in_service_queue;
  5734. bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
  5735. bfqd->tot_rq_in_driver);
  5736. if (bfqd->hw_tag == 1)
  5737. return;
  5738. /*
  5739. * This sample is valid if the number of outstanding requests
  5740. * is large enough to allow a queueing behavior. Note that the
  5741. * sum is not exact, as it's not taking into account deactivated
  5742. * requests.
  5743. */
  5744. if (bfqd->tot_rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD)
  5745. return;
  5746. /*
  5747. * If active queue hasn't enough requests and can idle, bfq might not
  5748. * dispatch sufficient requests to hardware. Don't zero hw_tag in this
  5749. * case
  5750. */
  5751. if (bfqq && bfq_bfqq_has_short_ttime(bfqq) &&
  5752. bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] <
  5753. BFQ_HW_QUEUE_THRESHOLD &&
  5754. bfqd->tot_rq_in_driver < BFQ_HW_QUEUE_THRESHOLD)
  5755. return;
  5756. if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
  5757. return;
  5758. bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
  5759. bfqd->max_rq_in_driver = 0;
  5760. bfqd->hw_tag_samples = 0;
  5761. bfqd->nonrot_with_queueing =
  5762. blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
  5763. }
  5764. static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
  5765. {
  5766. u64 now_ns;
  5767. u32 delta_us;
  5768. bfq_update_hw_tag(bfqd);
  5769. bfqd->rq_in_driver[bfqq->actuator_idx]--;
  5770. bfqd->tot_rq_in_driver--;
  5771. bfqq->dispatched--;
  5772. if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
  5773. /*
  5774. * Set budget_timeout (which we overload to store the
  5775. * time at which the queue remains with no backlog and
  5776. * no outstanding request; used by the weight-raising
  5777. * mechanism).
  5778. */
  5779. bfqq->budget_timeout = jiffies;
  5780. bfq_del_bfqq_in_groups_with_pending_reqs(bfqq);
  5781. bfq_weights_tree_remove(bfqq);
  5782. }
  5783. now_ns = blk_time_get_ns();
  5784. bfqq->ttime.last_end_request = now_ns;
  5785. /*
  5786. * Using us instead of ns, to get a reasonable precision in
  5787. * computing rate in next check.
  5788. */
  5789. delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
  5790. /*
  5791. * If the request took rather long to complete, and, according
  5792. * to the maximum request size recorded, this completion latency
  5793. * implies that the request was certainly served at a very low
  5794. * rate (less than 1M sectors/sec), then the whole observation
  5795. * interval that lasts up to this time instant cannot be a
  5796. * valid time interval for computing a new peak rate. Invoke
  5797. * bfq_update_rate_reset to have the following three steps
  5798. * taken:
  5799. * - close the observation interval at the last (previous)
  5800. * request dispatch or completion
  5801. * - compute rate, if possible, for that observation interval
  5802. * - reset to zero samples, which will trigger a proper
  5803. * re-initialization of the observation interval on next
  5804. * dispatch
  5805. */
  5806. if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
  5807. (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
  5808. 1UL<<(BFQ_RATE_SHIFT - 10))
  5809. bfq_update_rate_reset(bfqd, NULL);
  5810. bfqd->last_completion = now_ns;
  5811. /*
  5812. * Shared queues are likely to receive I/O at a high
  5813. * rate. This may deceptively let them be considered as wakers
  5814. * of other queues. But a false waker will unjustly steal
  5815. * bandwidth to its supposedly woken queue. So considering
  5816. * also shared queues in the waking mechanism may cause more
  5817. * control troubles than throughput benefits. Then reset
  5818. * last_completed_rq_bfqq if bfqq is a shared queue.
  5819. */
  5820. if (!bfq_bfqq_coop(bfqq))
  5821. bfqd->last_completed_rq_bfqq = bfqq;
  5822. else
  5823. bfqd->last_completed_rq_bfqq = NULL;
  5824. /*
  5825. * If we are waiting to discover whether the request pattern
  5826. * of the task associated with the queue is actually
  5827. * isochronous, and both requisites for this condition to hold
  5828. * are now satisfied, then compute soft_rt_next_start (see the
  5829. * comments on the function bfq_bfqq_softrt_next_start()). We
  5830. * do not compute soft_rt_next_start if bfqq is in interactive
  5831. * weight raising (see the comments in bfq_bfqq_expire() for
  5832. * an explanation). We schedule this delayed update when bfqq
  5833. * expires, if it still has in-flight requests.
  5834. */
  5835. if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
  5836. RB_EMPTY_ROOT(&bfqq->sort_list) &&
  5837. bfqq->wr_coeff != bfqd->bfq_wr_coeff)
  5838. bfqq->soft_rt_next_start =
  5839. bfq_bfqq_softrt_next_start(bfqd, bfqq);
  5840. /*
  5841. * If this is the in-service queue, check if it needs to be expired,
  5842. * or if we want to idle in case it has no pending requests.
  5843. */
  5844. if (bfqd->in_service_queue == bfqq) {
  5845. if (bfq_bfqq_must_idle(bfqq)) {
  5846. if (bfqq->dispatched == 0)
  5847. bfq_arm_slice_timer(bfqd);
  5848. /*
  5849. * If we get here, we do not expire bfqq, even
  5850. * if bfqq was in budget timeout or had no
  5851. * more requests (as controlled in the next
  5852. * conditional instructions). The reason for
  5853. * not expiring bfqq is as follows.
  5854. *
  5855. * Here bfqq->dispatched > 0 holds, but
  5856. * bfq_bfqq_must_idle() returned true. This
  5857. * implies that, even if no request arrives
  5858. * for bfqq before bfqq->dispatched reaches 0,
  5859. * bfqq will, however, not be expired on the
  5860. * completion event that causes bfqq->dispatch
  5861. * to reach zero. In contrast, on this event,
  5862. * bfqq will start enjoying device idling
  5863. * (I/O-dispatch plugging).
  5864. *
  5865. * But, if we expired bfqq here, bfqq would
  5866. * not have the chance to enjoy device idling
  5867. * when bfqq->dispatched finally reaches
  5868. * zero. This would expose bfqq to violation
  5869. * of its reserved service guarantees.
  5870. */
  5871. return;
  5872. } else if (bfq_may_expire_for_budg_timeout(bfqq))
  5873. bfq_bfqq_expire(bfqd, bfqq, false,
  5874. BFQQE_BUDGET_TIMEOUT);
  5875. else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
  5876. (bfqq->dispatched == 0 ||
  5877. !bfq_better_to_idle(bfqq)))
  5878. bfq_bfqq_expire(bfqd, bfqq, false,
  5879. BFQQE_NO_MORE_REQUESTS);
  5880. }
  5881. if (!bfqd->tot_rq_in_driver)
  5882. bfq_schedule_dispatch(bfqd);
  5883. }
  5884. /*
  5885. * The processes associated with bfqq may happen to generate their
  5886. * cumulative I/O at a lower rate than the rate at which the device
  5887. * could serve the same I/O. This is rather probable, e.g., if only
  5888. * one process is associated with bfqq and the device is an SSD. It
  5889. * results in bfqq becoming often empty while in service. In this
  5890. * respect, if BFQ is allowed to switch to another queue when bfqq
  5891. * remains empty, then the device goes on being fed with I/O requests,
  5892. * and the throughput is not affected. In contrast, if BFQ is not
  5893. * allowed to switch to another queue---because bfqq is sync and
  5894. * I/O-dispatch needs to be plugged while bfqq is temporarily
  5895. * empty---then, during the service of bfqq, there will be frequent
  5896. * "service holes", i.e., time intervals during which bfqq gets empty
  5897. * and the device can only consume the I/O already queued in its
  5898. * hardware queues. During service holes, the device may even get to
  5899. * remaining idle. In the end, during the service of bfqq, the device
  5900. * is driven at a lower speed than the one it can reach with the kind
  5901. * of I/O flowing through bfqq.
  5902. *
  5903. * To counter this loss of throughput, BFQ implements a "request
  5904. * injection mechanism", which tries to fill the above service holes
  5905. * with I/O requests taken from other queues. The hard part in this
  5906. * mechanism is finding the right amount of I/O to inject, so as to
  5907. * both boost throughput and not break bfqq's bandwidth and latency
  5908. * guarantees. In this respect, the mechanism maintains a per-queue
  5909. * inject limit, computed as below. While bfqq is empty, the injection
  5910. * mechanism dispatches extra I/O requests only until the total number
  5911. * of I/O requests in flight---i.e., already dispatched but not yet
  5912. * completed---remains lower than this limit.
  5913. *
  5914. * A first definition comes in handy to introduce the algorithm by
  5915. * which the inject limit is computed. We define as first request for
  5916. * bfqq, an I/O request for bfqq that arrives while bfqq is in
  5917. * service, and causes bfqq to switch from empty to non-empty. The
  5918. * algorithm updates the limit as a function of the effect of
  5919. * injection on the service times of only the first requests of
  5920. * bfqq. The reason for this restriction is that these are the
  5921. * requests whose service time is affected most, because they are the
  5922. * first to arrive after injection possibly occurred.
  5923. *
  5924. * To evaluate the effect of injection, the algorithm measures the
  5925. * "total service time" of first requests. We define as total service
  5926. * time of an I/O request, the time that elapses since when the
  5927. * request is enqueued into bfqq, to when it is completed. This
  5928. * quantity allows the whole effect of injection to be measured. It is
  5929. * easy to see why. Suppose that some requests of other queues are
  5930. * actually injected while bfqq is empty, and that a new request R
  5931. * then arrives for bfqq. If the device does start to serve all or
  5932. * part of the injected requests during the service hole, then,
  5933. * because of this extra service, it may delay the next invocation of
  5934. * the dispatch hook of BFQ. Then, even after R gets eventually
  5935. * dispatched, the device may delay the actual service of R if it is
  5936. * still busy serving the extra requests, or if it decides to serve,
  5937. * before R, some extra request still present in its queues. As a
  5938. * conclusion, the cumulative extra delay caused by injection can be
  5939. * easily evaluated by just comparing the total service time of first
  5940. * requests with and without injection.
  5941. *
  5942. * The limit-update algorithm works as follows. On the arrival of a
  5943. * first request of bfqq, the algorithm measures the total time of the
  5944. * request only if one of the three cases below holds, and, for each
  5945. * case, it updates the limit as described below:
  5946. *
  5947. * (1) If there is no in-flight request. This gives a baseline for the
  5948. * total service time of the requests of bfqq. If the baseline has
  5949. * not been computed yet, then, after computing it, the limit is
  5950. * set to 1, to start boosting throughput, and to prepare the
  5951. * ground for the next case. If the baseline has already been
  5952. * computed, then it is updated, in case it results to be lower
  5953. * than the previous value.
  5954. *
  5955. * (2) If the limit is higher than 0 and there are in-flight
  5956. * requests. By comparing the total service time in this case with
  5957. * the above baseline, it is possible to know at which extent the
  5958. * current value of the limit is inflating the total service
  5959. * time. If the inflation is below a certain threshold, then bfqq
  5960. * is assumed to be suffering from no perceivable loss of its
  5961. * service guarantees, and the limit is even tentatively
  5962. * increased. If the inflation is above the threshold, then the
  5963. * limit is decreased. Due to the lack of any hysteresis, this
  5964. * logic makes the limit oscillate even in steady workload
  5965. * conditions. Yet we opted for it, because it is fast in reaching
  5966. * the best value for the limit, as a function of the current I/O
  5967. * workload. To reduce oscillations, this step is disabled for a
  5968. * short time interval after the limit happens to be decreased.
  5969. *
  5970. * (3) Periodically, after resetting the limit, to make sure that the
  5971. * limit eventually drops in case the workload changes. This is
  5972. * needed because, after the limit has gone safely up for a
  5973. * certain workload, it is impossible to guess whether the
  5974. * baseline total service time may have changed, without measuring
  5975. * it again without injection. A more effective version of this
  5976. * step might be to just sample the baseline, by interrupting
  5977. * injection only once, and then to reset/lower the limit only if
  5978. * the total service time with the current limit does happen to be
  5979. * too large.
  5980. *
  5981. * More details on each step are provided in the comments on the
  5982. * pieces of code that implement these steps: the branch handling the
  5983. * transition from empty to non empty in bfq_add_request(), the branch
  5984. * handling injection in bfq_select_queue(), and the function
  5985. * bfq_choose_bfqq_for_injection(). These comments also explain some
  5986. * exceptions, made by the injection mechanism in some special cases.
  5987. */
  5988. static void bfq_update_inject_limit(struct bfq_data *bfqd,
  5989. struct bfq_queue *bfqq)
  5990. {
  5991. u64 tot_time_ns = blk_time_get_ns() - bfqd->last_empty_occupied_ns;
  5992. unsigned int old_limit = bfqq->inject_limit;
  5993. if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
  5994. u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
  5995. if (tot_time_ns >= threshold && old_limit > 0) {
  5996. bfqq->inject_limit--;
  5997. bfqq->decrease_time_jif = jiffies;
  5998. } else if (tot_time_ns < threshold &&
  5999. old_limit <= bfqd->max_rq_in_driver)
  6000. bfqq->inject_limit++;
  6001. }
  6002. /*
  6003. * Either we still have to compute the base value for the
  6004. * total service time, and there seem to be the right
  6005. * conditions to do it, or we can lower the last base value
  6006. * computed.
  6007. *
  6008. * NOTE: (bfqd->tot_rq_in_driver == 1) means that there is no I/O
  6009. * request in flight, because this function is in the code
  6010. * path that handles the completion of a request of bfqq, and,
  6011. * in particular, this function is executed before
  6012. * bfqd->tot_rq_in_driver is decremented in such a code path.
  6013. */
  6014. if ((bfqq->last_serv_time_ns == 0 && bfqd->tot_rq_in_driver == 1) ||
  6015. tot_time_ns < bfqq->last_serv_time_ns) {
  6016. if (bfqq->last_serv_time_ns == 0) {
  6017. /*
  6018. * Now we certainly have a base value: make sure we
  6019. * start trying injection.
  6020. */
  6021. bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
  6022. }
  6023. bfqq->last_serv_time_ns = tot_time_ns;
  6024. } else if (!bfqd->rqs_injected && bfqd->tot_rq_in_driver == 1)
  6025. /*
  6026. * No I/O injected and no request still in service in
  6027. * the drive: these are the exact conditions for
  6028. * computing the base value of the total service time
  6029. * for bfqq. So let's update this value, because it is
  6030. * rather variable. For example, it varies if the size
  6031. * or the spatial locality of the I/O requests in bfqq
  6032. * change.
  6033. */
  6034. bfqq->last_serv_time_ns = tot_time_ns;
  6035. /* update complete, not waiting for any request completion any longer */
  6036. bfqd->waited_rq = NULL;
  6037. bfqd->rqs_injected = false;
  6038. }
  6039. /*
  6040. * Handle either a requeue or a finish for rq. The things to do are
  6041. * the same in both cases: all references to rq are to be dropped. In
  6042. * particular, rq is considered completed from the point of view of
  6043. * the scheduler.
  6044. */
  6045. static void bfq_finish_requeue_request(struct request *rq)
  6046. {
  6047. struct bfq_queue *bfqq = RQ_BFQQ(rq);
  6048. struct bfq_data *bfqd;
  6049. unsigned long flags;
  6050. /*
  6051. * rq either is not associated with any icq, or is an already
  6052. * requeued request that has not (yet) been re-inserted into
  6053. * a bfq_queue.
  6054. */
  6055. if (!rq->elv.icq || !bfqq)
  6056. return;
  6057. bfqd = bfqq->bfqd;
  6058. if (rq->rq_flags & RQF_STARTED)
  6059. bfqg_stats_update_completion(bfqq_group(bfqq),
  6060. rq->start_time_ns,
  6061. rq->io_start_time_ns,
  6062. rq->cmd_flags);
  6063. spin_lock_irqsave(&bfqd->lock, flags);
  6064. if (likely(rq->rq_flags & RQF_STARTED)) {
  6065. if (rq == bfqd->waited_rq)
  6066. bfq_update_inject_limit(bfqd, bfqq);
  6067. bfq_completed_request(bfqq, bfqd);
  6068. }
  6069. bfqq_request_freed(bfqq);
  6070. bfq_put_queue(bfqq);
  6071. RQ_BIC(rq)->requests--;
  6072. spin_unlock_irqrestore(&bfqd->lock, flags);
  6073. /*
  6074. * Reset private fields. In case of a requeue, this allows
  6075. * this function to correctly do nothing if it is spuriously
  6076. * invoked again on this same request (see the check at the
  6077. * beginning of the function). Probably, a better general
  6078. * design would be to prevent blk-mq from invoking the requeue
  6079. * or finish hooks of an elevator, for a request that is not
  6080. * referred by that elevator.
  6081. *
  6082. * Resetting the following fields would break the
  6083. * request-insertion logic if rq is re-inserted into a bfq
  6084. * internal queue, without a re-preparation. Here we assume
  6085. * that re-insertions of requeued requests, without
  6086. * re-preparation, can happen only for pass_through or at_head
  6087. * requests (which are not re-inserted into bfq internal
  6088. * queues).
  6089. */
  6090. rq->elv.priv[0] = NULL;
  6091. rq->elv.priv[1] = NULL;
  6092. }
  6093. static void bfq_finish_request(struct request *rq)
  6094. {
  6095. bfq_finish_requeue_request(rq);
  6096. if (rq->elv.icq) {
  6097. put_io_context(rq->elv.icq->ioc);
  6098. rq->elv.icq = NULL;
  6099. }
  6100. }
  6101. /*
  6102. * Removes the association between the current task and bfqq, assuming
  6103. * that bic points to the bfq iocontext of the task.
  6104. * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
  6105. * was the last process referring to that bfqq.
  6106. */
  6107. static struct bfq_queue *
  6108. bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
  6109. {
  6110. bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
  6111. if (bfqq_process_refs(bfqq) == 1 && !bfqq->new_bfqq) {
  6112. bfqq->pid = current->pid;
  6113. bfq_clear_bfqq_coop(bfqq);
  6114. bfq_clear_bfqq_split_coop(bfqq);
  6115. return bfqq;
  6116. }
  6117. bic_set_bfqq(bic, NULL, true, bfqq->actuator_idx);
  6118. bfq_put_cooperator(bfqq);
  6119. bfq_release_process_ref(bfqq->bfqd, bfqq);
  6120. return NULL;
  6121. }
  6122. static struct bfq_queue *
  6123. __bfq_get_bfqq_handle_split(struct bfq_data *bfqd, struct bfq_io_cq *bic,
  6124. struct bio *bio, bool split, bool is_sync,
  6125. bool *new_queue)
  6126. {
  6127. unsigned int act_idx = bfq_actuator_index(bfqd, bio);
  6128. struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync, act_idx);
  6129. struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[act_idx];
  6130. if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
  6131. return bfqq;
  6132. if (new_queue)
  6133. *new_queue = true;
  6134. if (bfqq)
  6135. bfq_put_queue(bfqq);
  6136. bfqq = bfq_get_queue(bfqd, bio, is_sync, bic, split);
  6137. bic_set_bfqq(bic, bfqq, is_sync, act_idx);
  6138. if (split && is_sync) {
  6139. if ((bfqq_data->was_in_burst_list && bfqd->large_burst) ||
  6140. bfqq_data->saved_in_large_burst)
  6141. bfq_mark_bfqq_in_large_burst(bfqq);
  6142. else {
  6143. bfq_clear_bfqq_in_large_burst(bfqq);
  6144. if (bfqq_data->was_in_burst_list)
  6145. /*
  6146. * If bfqq was in the current
  6147. * burst list before being
  6148. * merged, then we have to add
  6149. * it back. And we do not need
  6150. * to increase burst_size, as
  6151. * we did not decrement
  6152. * burst_size when we removed
  6153. * bfqq from the burst list as
  6154. * a consequence of a merge
  6155. * (see comments in
  6156. * bfq_put_queue). In this
  6157. * respect, it would be rather
  6158. * costly to know whether the
  6159. * current burst list is still
  6160. * the same burst list from
  6161. * which bfqq was removed on
  6162. * the merge. To avoid this
  6163. * cost, if bfqq was in a
  6164. * burst list, then we add
  6165. * bfqq to the current burst
  6166. * list without any further
  6167. * check. This can cause
  6168. * inappropriate insertions,
  6169. * but rarely enough to not
  6170. * harm the detection of large
  6171. * bursts significantly.
  6172. */
  6173. hlist_add_head(&bfqq->burst_list_node,
  6174. &bfqd->burst_list);
  6175. }
  6176. bfqq->split_time = jiffies;
  6177. }
  6178. return bfqq;
  6179. }
  6180. /*
  6181. * Only reset private fields. The actual request preparation will be
  6182. * performed by bfq_init_rq, when rq is either inserted or merged. See
  6183. * comments on bfq_init_rq for the reason behind this delayed
  6184. * preparation.
  6185. */
  6186. static void bfq_prepare_request(struct request *rq)
  6187. {
  6188. rq->elv.icq = ioc_find_get_icq(rq->q);
  6189. /*
  6190. * Regardless of whether we have an icq attached, we have to
  6191. * clear the scheduler pointers, as they might point to
  6192. * previously allocated bic/bfqq structs.
  6193. */
  6194. rq->elv.priv[0] = rq->elv.priv[1] = NULL;
  6195. }
  6196. static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq)
  6197. {
  6198. struct bfq_queue *new_bfqq = bfqq->new_bfqq;
  6199. struct bfq_queue *waker_bfqq = bfqq->waker_bfqq;
  6200. if (!waker_bfqq)
  6201. return NULL;
  6202. while (new_bfqq) {
  6203. if (new_bfqq == waker_bfqq) {
  6204. /*
  6205. * If waker_bfqq is in the merge chain, and current
  6206. * is the only process, waker_bfqq can be freed.
  6207. */
  6208. if (bfqq_process_refs(waker_bfqq) == 1)
  6209. return NULL;
  6210. return waker_bfqq;
  6211. }
  6212. new_bfqq = new_bfqq->new_bfqq;
  6213. }
  6214. /*
  6215. * If waker_bfqq is not in the merge chain, and it's procress reference
  6216. * is 0, waker_bfqq can be freed.
  6217. */
  6218. if (bfqq_process_refs(waker_bfqq) == 0)
  6219. return NULL;
  6220. return waker_bfqq;
  6221. }
  6222. static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
  6223. struct bfq_io_cq *bic,
  6224. struct bio *bio,
  6225. unsigned int idx,
  6226. bool is_sync)
  6227. {
  6228. struct bfq_queue *waker_bfqq;
  6229. struct bfq_queue *bfqq;
  6230. bool new_queue = false;
  6231. bfqq = __bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
  6232. &new_queue);
  6233. if (unlikely(new_queue))
  6234. return bfqq;
  6235. /* If the queue was seeky for too long, break it apart. */
  6236. if (!bfq_bfqq_coop(bfqq) || !bfq_bfqq_split_coop(bfqq) ||
  6237. bic->bfqq_data[idx].stably_merged)
  6238. return bfqq;
  6239. waker_bfqq = bfq_waker_bfqq(bfqq);
  6240. /* Update bic before losing reference to bfqq */
  6241. if (bfq_bfqq_in_large_burst(bfqq))
  6242. bic->bfqq_data[idx].saved_in_large_burst = true;
  6243. bfqq = bfq_split_bfqq(bic, bfqq);
  6244. if (bfqq) {
  6245. bfq_bfqq_resume_state(bfqq, bfqd, bic, true);
  6246. return bfqq;
  6247. }
  6248. bfqq = __bfq_get_bfqq_handle_split(bfqd, bic, bio, true, is_sync, NULL);
  6249. if (unlikely(bfqq == &bfqd->oom_bfqq))
  6250. return bfqq;
  6251. bfq_bfqq_resume_state(bfqq, bfqd, bic, false);
  6252. bfqq->waker_bfqq = waker_bfqq;
  6253. bfqq->tentative_waker_bfqq = NULL;
  6254. /*
  6255. * If the waker queue disappears, then new_bfqq->waker_bfqq must be
  6256. * reset. So insert new_bfqq into the
  6257. * woken_list of the waker. See
  6258. * bfq_check_waker for details.
  6259. */
  6260. if (waker_bfqq)
  6261. hlist_add_head(&bfqq->woken_list_node,
  6262. &bfqq->waker_bfqq->woken_list);
  6263. return bfqq;
  6264. }
  6265. /*
  6266. * If needed, init rq, allocate bfq data structures associated with
  6267. * rq, and increment reference counters in the destination bfq_queue
  6268. * for rq. Return the destination bfq_queue for rq, or NULL is rq is
  6269. * not associated with any bfq_queue.
  6270. *
  6271. * This function is invoked by the functions that perform rq insertion
  6272. * or merging. One may have expected the above preparation operations
  6273. * to be performed in bfq_prepare_request, and not delayed to when rq
  6274. * is inserted or merged. The rationale behind this delayed
  6275. * preparation is that, after the prepare_request hook is invoked for
  6276. * rq, rq may still be transformed into a request with no icq, i.e., a
  6277. * request not associated with any queue. No bfq hook is invoked to
  6278. * signal this transformation. As a consequence, should these
  6279. * preparation operations be performed when the prepare_request hook
  6280. * is invoked, and should rq be transformed one moment later, bfq
  6281. * would end up in an inconsistent state, because it would have
  6282. * incremented some queue counters for an rq destined to
  6283. * transformation, without any chance to correctly lower these
  6284. * counters back. In contrast, no transformation can still happen for
  6285. * rq after rq has been inserted or merged. So, it is safe to execute
  6286. * these preparation operations when rq is finally inserted or merged.
  6287. */
  6288. static struct bfq_queue *bfq_init_rq(struct request *rq)
  6289. {
  6290. struct request_queue *q = rq->q;
  6291. struct bio *bio = rq->bio;
  6292. struct bfq_data *bfqd = q->elevator->elevator_data;
  6293. struct bfq_io_cq *bic;
  6294. const int is_sync = rq_is_sync(rq);
  6295. struct bfq_queue *bfqq;
  6296. unsigned int a_idx = bfq_actuator_index(bfqd, bio);
  6297. if (unlikely(!rq->elv.icq))
  6298. return NULL;
  6299. /*
  6300. * Assuming that RQ_BFQQ(rq) is set only if everything is set
  6301. * for this rq. This holds true, because this function is
  6302. * invoked only for insertion or merging, and, after such
  6303. * events, a request cannot be manipulated any longer before
  6304. * being removed from bfq.
  6305. */
  6306. if (RQ_BFQQ(rq))
  6307. return RQ_BFQQ(rq);
  6308. bic = icq_to_bic(rq->elv.icq);
  6309. bfq_check_ioprio_change(bic, bio);
  6310. bfq_bic_update_cgroup(bic, bio);
  6311. bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, a_idx, is_sync);
  6312. bfqq_request_allocated(bfqq);
  6313. bfqq->ref++;
  6314. bic->requests++;
  6315. bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
  6316. rq, bfqq, bfqq->ref);
  6317. rq->elv.priv[0] = bic;
  6318. rq->elv.priv[1] = bfqq;
  6319. /*
  6320. * If a bfq_queue has only one process reference, it is owned
  6321. * by only this bic: we can then set bfqq->bic = bic. in
  6322. * addition, if the queue has also just been split, we have to
  6323. * resume its state.
  6324. */
  6325. if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq &&
  6326. bfqq_process_refs(bfqq) == 1)
  6327. bfqq->bic = bic;
  6328. /*
  6329. * Consider bfqq as possibly belonging to a burst of newly
  6330. * created queues only if:
  6331. * 1) A burst is actually happening (bfqd->burst_size > 0)
  6332. * or
  6333. * 2) There is no other active queue. In fact, if, in
  6334. * contrast, there are active queues not belonging to the
  6335. * possible burst bfqq may belong to, then there is no gain
  6336. * in considering bfqq as belonging to a burst, and
  6337. * therefore in not weight-raising bfqq. See comments on
  6338. * bfq_handle_burst().
  6339. *
  6340. * This filtering also helps eliminating false positives,
  6341. * occurring when bfqq does not belong to an actual large
  6342. * burst, but some background task (e.g., a service) happens
  6343. * to trigger the creation of new queues very close to when
  6344. * bfqq and its possible companion queues are created. See
  6345. * comments on bfq_handle_burst() for further details also on
  6346. * this issue.
  6347. */
  6348. if (unlikely(bfq_bfqq_just_created(bfqq) &&
  6349. (bfqd->burst_size > 0 ||
  6350. bfq_tot_busy_queues(bfqd) == 0)))
  6351. bfq_handle_burst(bfqd, bfqq);
  6352. return bfqq;
  6353. }
  6354. static void
  6355. bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  6356. {
  6357. enum bfqq_expiration reason;
  6358. unsigned long flags;
  6359. spin_lock_irqsave(&bfqd->lock, flags);
  6360. /*
  6361. * Considering that bfqq may be in race, we should firstly check
  6362. * whether bfqq is in service before doing something on it. If
  6363. * the bfqq in race is not in service, it has already been expired
  6364. * through __bfq_bfqq_expire func and its wait_request flags has
  6365. * been cleared in __bfq_bfqd_reset_in_service func.
  6366. */
  6367. if (bfqq != bfqd->in_service_queue) {
  6368. spin_unlock_irqrestore(&bfqd->lock, flags);
  6369. return;
  6370. }
  6371. bfq_clear_bfqq_wait_request(bfqq);
  6372. if (bfq_bfqq_budget_timeout(bfqq))
  6373. /*
  6374. * Also here the queue can be safely expired
  6375. * for budget timeout without wasting
  6376. * guarantees
  6377. */
  6378. reason = BFQQE_BUDGET_TIMEOUT;
  6379. else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
  6380. /*
  6381. * The queue may not be empty upon timer expiration,
  6382. * because we may not disable the timer when the
  6383. * first request of the in-service queue arrives
  6384. * during disk idling.
  6385. */
  6386. reason = BFQQE_TOO_IDLE;
  6387. else
  6388. goto schedule_dispatch;
  6389. bfq_bfqq_expire(bfqd, bfqq, true, reason);
  6390. schedule_dispatch:
  6391. bfq_schedule_dispatch(bfqd);
  6392. spin_unlock_irqrestore(&bfqd->lock, flags);
  6393. }
  6394. /*
  6395. * Handler of the expiration of the timer running if the in-service queue
  6396. * is idling inside its time slice.
  6397. */
  6398. static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
  6399. {
  6400. struct bfq_data *bfqd = container_of(timer, struct bfq_data,
  6401. idle_slice_timer);
  6402. struct bfq_queue *bfqq = bfqd->in_service_queue;
  6403. /*
  6404. * Theoretical race here: the in-service queue can be NULL or
  6405. * different from the queue that was idling if a new request
  6406. * arrives for the current queue and there is a full dispatch
  6407. * cycle that changes the in-service queue. This can hardly
  6408. * happen, but in the worst case we just expire a queue too
  6409. * early.
  6410. */
  6411. if (bfqq)
  6412. bfq_idle_slice_timer_body(bfqd, bfqq);
  6413. return HRTIMER_NORESTART;
  6414. }
  6415. static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
  6416. struct bfq_queue **bfqq_ptr)
  6417. {
  6418. struct bfq_queue *bfqq = *bfqq_ptr;
  6419. bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
  6420. if (bfqq) {
  6421. bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
  6422. bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
  6423. bfqq, bfqq->ref);
  6424. bfq_put_queue(bfqq);
  6425. *bfqq_ptr = NULL;
  6426. }
  6427. }
  6428. /*
  6429. * Release all the bfqg references to its async queues. If we are
  6430. * deallocating the group these queues may still contain requests, so
  6431. * we reparent them to the root cgroup (i.e., the only one that will
  6432. * exist for sure until all the requests on a device are gone).
  6433. */
  6434. void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
  6435. {
  6436. int i, j, k;
  6437. for (k = 0; k < bfqd->num_actuators; k++) {
  6438. for (i = 0; i < 2; i++)
  6439. for (j = 0; j < IOPRIO_NR_LEVELS; j++)
  6440. __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j][k]);
  6441. __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq[k]);
  6442. }
  6443. }
  6444. /*
  6445. * See the comments on bfq_limit_depth for the purpose of
  6446. * the depths set in the function. Return minimum shallow depth we'll use.
  6447. */
  6448. static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
  6449. {
  6450. unsigned int depth = 1U << bt->sb.shift;
  6451. bfqd->full_depth_shift = bt->sb.shift;
  6452. /*
  6453. * In-word depths if no bfq_queue is being weight-raised:
  6454. * leaving 25% of tags only for sync reads.
  6455. *
  6456. * In next formulas, right-shift the value
  6457. * (1U<<bt->sb.shift), instead of computing directly
  6458. * (1U<<(bt->sb.shift - something)), to be robust against
  6459. * any possible value of bt->sb.shift, without having to
  6460. * limit 'something'.
  6461. */
  6462. /* no more than 50% of tags for async I/O */
  6463. bfqd->word_depths[0][0] = max(depth >> 1, 1U);
  6464. /*
  6465. * no more than 75% of tags for sync writes (25% extra tags
  6466. * w.r.t. async I/O, to prevent async I/O from starving sync
  6467. * writes)
  6468. */
  6469. bfqd->word_depths[0][1] = max((depth * 3) >> 2, 1U);
  6470. /*
  6471. * In-word depths in case some bfq_queue is being weight-
  6472. * raised: leaving ~63% of tags for sync reads. This is the
  6473. * highest percentage for which, in our tests, application
  6474. * start-up times didn't suffer from any regression due to tag
  6475. * shortage.
  6476. */
  6477. /* no more than ~18% of tags for async I/O */
  6478. bfqd->word_depths[1][0] = max((depth * 3) >> 4, 1U);
  6479. /* no more than ~37% of tags for sync writes (~20% extra tags) */
  6480. bfqd->word_depths[1][1] = max((depth * 6) >> 4, 1U);
  6481. }
  6482. static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
  6483. {
  6484. struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
  6485. struct blk_mq_tags *tags = hctx->sched_tags;
  6486. bfq_update_depths(bfqd, &tags->bitmap_tags);
  6487. sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
  6488. }
  6489. static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
  6490. {
  6491. bfq_depth_updated(hctx);
  6492. return 0;
  6493. }
  6494. static void bfq_exit_queue(struct elevator_queue *e)
  6495. {
  6496. struct bfq_data *bfqd = e->elevator_data;
  6497. struct bfq_queue *bfqq, *n;
  6498. unsigned int actuator;
  6499. hrtimer_cancel(&bfqd->idle_slice_timer);
  6500. spin_lock_irq(&bfqd->lock);
  6501. list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
  6502. bfq_deactivate_bfqq(bfqd, bfqq, false, false);
  6503. spin_unlock_irq(&bfqd->lock);
  6504. for (actuator = 0; actuator < bfqd->num_actuators; actuator++)
  6505. WARN_ON_ONCE(bfqd->rq_in_driver[actuator]);
  6506. WARN_ON_ONCE(bfqd->tot_rq_in_driver);
  6507. hrtimer_cancel(&bfqd->idle_slice_timer);
  6508. /* release oom-queue reference to root group */
  6509. bfqg_and_blkg_put(bfqd->root_group);
  6510. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6511. blkcg_deactivate_policy(bfqd->queue->disk, &blkcg_policy_bfq);
  6512. #else
  6513. spin_lock_irq(&bfqd->lock);
  6514. bfq_put_async_queues(bfqd, bfqd->root_group);
  6515. kfree(bfqd->root_group);
  6516. spin_unlock_irq(&bfqd->lock);
  6517. #endif
  6518. blk_stat_disable_accounting(bfqd->queue);
  6519. clear_bit(ELEVATOR_FLAG_DISABLE_WBT, &e->flags);
  6520. wbt_enable_default(bfqd->queue->disk);
  6521. kfree(bfqd);
  6522. }
  6523. static void bfq_init_root_group(struct bfq_group *root_group,
  6524. struct bfq_data *bfqd)
  6525. {
  6526. int i;
  6527. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6528. root_group->entity.parent = NULL;
  6529. root_group->my_entity = NULL;
  6530. root_group->bfqd = bfqd;
  6531. #endif
  6532. root_group->rq_pos_tree = RB_ROOT;
  6533. for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
  6534. root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
  6535. root_group->sched_data.bfq_class_idle_last_service = jiffies;
  6536. }
  6537. static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
  6538. {
  6539. struct bfq_data *bfqd;
  6540. struct elevator_queue *eq;
  6541. unsigned int i;
  6542. struct blk_independent_access_ranges *ia_ranges = q->disk->ia_ranges;
  6543. eq = elevator_alloc(q, e);
  6544. if (!eq)
  6545. return -ENOMEM;
  6546. bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
  6547. if (!bfqd) {
  6548. kobject_put(&eq->kobj);
  6549. return -ENOMEM;
  6550. }
  6551. eq->elevator_data = bfqd;
  6552. spin_lock_irq(&q->queue_lock);
  6553. q->elevator = eq;
  6554. spin_unlock_irq(&q->queue_lock);
  6555. /*
  6556. * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
  6557. * Grab a permanent reference to it, so that the normal code flow
  6558. * will not attempt to free it.
  6559. * Set zero as actuator index: we will pretend that
  6560. * all I/O requests are for the same actuator.
  6561. */
  6562. bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0, 0);
  6563. bfqd->oom_bfqq.ref++;
  6564. bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
  6565. bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
  6566. bfqd->oom_bfqq.entity.new_weight =
  6567. bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
  6568. /* oom_bfqq does not participate to bursts */
  6569. bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
  6570. /*
  6571. * Trigger weight initialization, according to ioprio, at the
  6572. * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
  6573. * class won't be changed any more.
  6574. */
  6575. bfqd->oom_bfqq.entity.prio_changed = 1;
  6576. bfqd->queue = q;
  6577. bfqd->num_actuators = 1;
  6578. /*
  6579. * If the disk supports multiple actuators, copy independent
  6580. * access ranges from the request queue structure.
  6581. */
  6582. spin_lock_irq(&q->queue_lock);
  6583. if (ia_ranges) {
  6584. /*
  6585. * Check if the disk ia_ranges size exceeds the current bfq
  6586. * actuator limit.
  6587. */
  6588. if (ia_ranges->nr_ia_ranges > BFQ_MAX_ACTUATORS) {
  6589. pr_crit("nr_ia_ranges higher than act limit: iars=%d, max=%d.\n",
  6590. ia_ranges->nr_ia_ranges, BFQ_MAX_ACTUATORS);
  6591. pr_crit("Falling back to single actuator mode.\n");
  6592. } else {
  6593. bfqd->num_actuators = ia_ranges->nr_ia_ranges;
  6594. for (i = 0; i < bfqd->num_actuators; i++) {
  6595. bfqd->sector[i] = ia_ranges->ia_range[i].sector;
  6596. bfqd->nr_sectors[i] =
  6597. ia_ranges->ia_range[i].nr_sectors;
  6598. }
  6599. }
  6600. }
  6601. /* Otherwise use single-actuator dev info */
  6602. if (bfqd->num_actuators == 1) {
  6603. bfqd->sector[0] = 0;
  6604. bfqd->nr_sectors[0] = get_capacity(q->disk);
  6605. }
  6606. spin_unlock_irq(&q->queue_lock);
  6607. INIT_LIST_HEAD(&bfqd->dispatch);
  6608. hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
  6609. HRTIMER_MODE_REL);
  6610. bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
  6611. bfqd->queue_weights_tree = RB_ROOT_CACHED;
  6612. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6613. bfqd->num_groups_with_pending_reqs = 0;
  6614. #endif
  6615. INIT_LIST_HEAD(&bfqd->active_list[0]);
  6616. INIT_LIST_HEAD(&bfqd->active_list[1]);
  6617. INIT_LIST_HEAD(&bfqd->idle_list);
  6618. INIT_HLIST_HEAD(&bfqd->burst_list);
  6619. bfqd->hw_tag = -1;
  6620. bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
  6621. bfqd->bfq_max_budget = bfq_default_max_budget;
  6622. bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
  6623. bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
  6624. bfqd->bfq_back_max = bfq_back_max;
  6625. bfqd->bfq_back_penalty = bfq_back_penalty;
  6626. bfqd->bfq_slice_idle = bfq_slice_idle;
  6627. bfqd->bfq_timeout = bfq_timeout;
  6628. bfqd->bfq_large_burst_thresh = 8;
  6629. bfqd->bfq_burst_interval = msecs_to_jiffies(180);
  6630. bfqd->low_latency = true;
  6631. /*
  6632. * Trade-off between responsiveness and fairness.
  6633. */
  6634. bfqd->bfq_wr_coeff = 30;
  6635. bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
  6636. bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
  6637. bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
  6638. bfqd->bfq_wr_max_softrt_rate = 7000; /*
  6639. * Approximate rate required
  6640. * to playback or record a
  6641. * high-definition compressed
  6642. * video.
  6643. */
  6644. bfqd->wr_busy_queues = 0;
  6645. /*
  6646. * Begin by assuming, optimistically, that the device peak
  6647. * rate is equal to 2/3 of the highest reference rate.
  6648. */
  6649. bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] *
  6650. ref_wr_duration[blk_queue_nonrot(bfqd->queue)];
  6651. bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
  6652. /* see comments on the definition of next field inside bfq_data */
  6653. bfqd->actuator_load_threshold = 4;
  6654. spin_lock_init(&bfqd->lock);
  6655. /*
  6656. * The invocation of the next bfq_create_group_hierarchy
  6657. * function is the head of a chain of function calls
  6658. * (bfq_create_group_hierarchy->blkcg_activate_policy->
  6659. * blk_mq_freeze_queue) that may lead to the invocation of the
  6660. * has_work hook function. For this reason,
  6661. * bfq_create_group_hierarchy is invoked only after all
  6662. * scheduler data has been initialized, apart from the fields
  6663. * that can be initialized only after invoking
  6664. * bfq_create_group_hierarchy. This, in particular, enables
  6665. * has_work to correctly return false. Of course, to avoid
  6666. * other inconsistencies, the blk-mq stack must then refrain
  6667. * from invoking further scheduler hooks before this init
  6668. * function is finished.
  6669. */
  6670. bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
  6671. if (!bfqd->root_group)
  6672. goto out_free;
  6673. bfq_init_root_group(bfqd->root_group, bfqd);
  6674. bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
  6675. /* We dispatch from request queue wide instead of hw queue */
  6676. blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
  6677. set_bit(ELEVATOR_FLAG_DISABLE_WBT, &eq->flags);
  6678. wbt_disable_default(q->disk);
  6679. blk_stat_enable_accounting(q);
  6680. return 0;
  6681. out_free:
  6682. kfree(bfqd);
  6683. kobject_put(&eq->kobj);
  6684. return -ENOMEM;
  6685. }
  6686. static void bfq_slab_kill(void)
  6687. {
  6688. kmem_cache_destroy(bfq_pool);
  6689. }
  6690. static int __init bfq_slab_setup(void)
  6691. {
  6692. bfq_pool = KMEM_CACHE(bfq_queue, 0);
  6693. if (!bfq_pool)
  6694. return -ENOMEM;
  6695. return 0;
  6696. }
  6697. static ssize_t bfq_var_show(unsigned int var, char *page)
  6698. {
  6699. return sprintf(page, "%u\n", var);
  6700. }
  6701. static int bfq_var_store(unsigned long *var, const char *page)
  6702. {
  6703. unsigned long new_val;
  6704. int ret = kstrtoul(page, 10, &new_val);
  6705. if (ret)
  6706. return ret;
  6707. *var = new_val;
  6708. return 0;
  6709. }
  6710. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  6711. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  6712. { \
  6713. struct bfq_data *bfqd = e->elevator_data; \
  6714. u64 __data = __VAR; \
  6715. if (__CONV == 1) \
  6716. __data = jiffies_to_msecs(__data); \
  6717. else if (__CONV == 2) \
  6718. __data = div_u64(__data, NSEC_PER_MSEC); \
  6719. return bfq_var_show(__data, (page)); \
  6720. }
  6721. SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
  6722. SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
  6723. SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
  6724. SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
  6725. SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
  6726. SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
  6727. SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
  6728. SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
  6729. SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
  6730. #undef SHOW_FUNCTION
  6731. #define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
  6732. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  6733. { \
  6734. struct bfq_data *bfqd = e->elevator_data; \
  6735. u64 __data = __VAR; \
  6736. __data = div_u64(__data, NSEC_PER_USEC); \
  6737. return bfq_var_show(__data, (page)); \
  6738. }
  6739. USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
  6740. #undef USEC_SHOW_FUNCTION
  6741. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  6742. static ssize_t \
  6743. __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  6744. { \
  6745. struct bfq_data *bfqd = e->elevator_data; \
  6746. unsigned long __data, __min = (MIN), __max = (MAX); \
  6747. int ret; \
  6748. \
  6749. ret = bfq_var_store(&__data, (page)); \
  6750. if (ret) \
  6751. return ret; \
  6752. if (__data < __min) \
  6753. __data = __min; \
  6754. else if (__data > __max) \
  6755. __data = __max; \
  6756. if (__CONV == 1) \
  6757. *(__PTR) = msecs_to_jiffies(__data); \
  6758. else if (__CONV == 2) \
  6759. *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
  6760. else \
  6761. *(__PTR) = __data; \
  6762. return count; \
  6763. }
  6764. STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
  6765. INT_MAX, 2);
  6766. STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
  6767. INT_MAX, 2);
  6768. STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
  6769. STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
  6770. INT_MAX, 0);
  6771. STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
  6772. #undef STORE_FUNCTION
  6773. #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
  6774. static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
  6775. { \
  6776. struct bfq_data *bfqd = e->elevator_data; \
  6777. unsigned long __data, __min = (MIN), __max = (MAX); \
  6778. int ret; \
  6779. \
  6780. ret = bfq_var_store(&__data, (page)); \
  6781. if (ret) \
  6782. return ret; \
  6783. if (__data < __min) \
  6784. __data = __min; \
  6785. else if (__data > __max) \
  6786. __data = __max; \
  6787. *(__PTR) = (u64)__data * NSEC_PER_USEC; \
  6788. return count; \
  6789. }
  6790. USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
  6791. UINT_MAX);
  6792. #undef USEC_STORE_FUNCTION
  6793. static ssize_t bfq_max_budget_store(struct elevator_queue *e,
  6794. const char *page, size_t count)
  6795. {
  6796. struct bfq_data *bfqd = e->elevator_data;
  6797. unsigned long __data;
  6798. int ret;
  6799. ret = bfq_var_store(&__data, (page));
  6800. if (ret)
  6801. return ret;
  6802. if (__data == 0)
  6803. bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
  6804. else {
  6805. if (__data > INT_MAX)
  6806. __data = INT_MAX;
  6807. bfqd->bfq_max_budget = __data;
  6808. }
  6809. bfqd->bfq_user_max_budget = __data;
  6810. return count;
  6811. }
  6812. /*
  6813. * Leaving this name to preserve name compatibility with cfq
  6814. * parameters, but this timeout is used for both sync and async.
  6815. */
  6816. static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
  6817. const char *page, size_t count)
  6818. {
  6819. struct bfq_data *bfqd = e->elevator_data;
  6820. unsigned long __data;
  6821. int ret;
  6822. ret = bfq_var_store(&__data, (page));
  6823. if (ret)
  6824. return ret;
  6825. if (__data < 1)
  6826. __data = 1;
  6827. else if (__data > INT_MAX)
  6828. __data = INT_MAX;
  6829. bfqd->bfq_timeout = msecs_to_jiffies(__data);
  6830. if (bfqd->bfq_user_max_budget == 0)
  6831. bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
  6832. return count;
  6833. }
  6834. static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
  6835. const char *page, size_t count)
  6836. {
  6837. struct bfq_data *bfqd = e->elevator_data;
  6838. unsigned long __data;
  6839. int ret;
  6840. ret = bfq_var_store(&__data, (page));
  6841. if (ret)
  6842. return ret;
  6843. if (__data > 1)
  6844. __data = 1;
  6845. if (!bfqd->strict_guarantees && __data == 1
  6846. && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
  6847. bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
  6848. bfqd->strict_guarantees = __data;
  6849. return count;
  6850. }
  6851. static ssize_t bfq_low_latency_store(struct elevator_queue *e,
  6852. const char *page, size_t count)
  6853. {
  6854. struct bfq_data *bfqd = e->elevator_data;
  6855. unsigned long __data;
  6856. int ret;
  6857. ret = bfq_var_store(&__data, (page));
  6858. if (ret)
  6859. return ret;
  6860. if (__data > 1)
  6861. __data = 1;
  6862. if (__data == 0 && bfqd->low_latency != 0)
  6863. bfq_end_wr(bfqd);
  6864. bfqd->low_latency = __data;
  6865. return count;
  6866. }
  6867. #define BFQ_ATTR(name) \
  6868. __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
  6869. static struct elv_fs_entry bfq_attrs[] = {
  6870. BFQ_ATTR(fifo_expire_sync),
  6871. BFQ_ATTR(fifo_expire_async),
  6872. BFQ_ATTR(back_seek_max),
  6873. BFQ_ATTR(back_seek_penalty),
  6874. BFQ_ATTR(slice_idle),
  6875. BFQ_ATTR(slice_idle_us),
  6876. BFQ_ATTR(max_budget),
  6877. BFQ_ATTR(timeout_sync),
  6878. BFQ_ATTR(strict_guarantees),
  6879. BFQ_ATTR(low_latency),
  6880. __ATTR_NULL
  6881. };
  6882. static struct elevator_type iosched_bfq_mq = {
  6883. .ops = {
  6884. .limit_depth = bfq_limit_depth,
  6885. .prepare_request = bfq_prepare_request,
  6886. .requeue_request = bfq_finish_requeue_request,
  6887. .finish_request = bfq_finish_request,
  6888. .exit_icq = bfq_exit_icq,
  6889. .insert_requests = bfq_insert_requests,
  6890. .dispatch_request = bfq_dispatch_request,
  6891. .next_request = elv_rb_latter_request,
  6892. .former_request = elv_rb_former_request,
  6893. .allow_merge = bfq_allow_bio_merge,
  6894. .bio_merge = bfq_bio_merge,
  6895. .request_merge = bfq_request_merge,
  6896. .requests_merged = bfq_requests_merged,
  6897. .request_merged = bfq_request_merged,
  6898. .has_work = bfq_has_work,
  6899. .depth_updated = bfq_depth_updated,
  6900. .init_hctx = bfq_init_hctx,
  6901. .init_sched = bfq_init_queue,
  6902. .exit_sched = bfq_exit_queue,
  6903. },
  6904. .icq_size = sizeof(struct bfq_io_cq),
  6905. .icq_align = __alignof__(struct bfq_io_cq),
  6906. .elevator_attrs = bfq_attrs,
  6907. .elevator_name = "bfq",
  6908. .elevator_owner = THIS_MODULE,
  6909. };
  6910. MODULE_ALIAS("bfq-iosched");
  6911. static int __init bfq_init(void)
  6912. {
  6913. int ret;
  6914. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6915. ret = blkcg_policy_register(&blkcg_policy_bfq);
  6916. if (ret)
  6917. return ret;
  6918. #endif
  6919. ret = -ENOMEM;
  6920. if (bfq_slab_setup())
  6921. goto err_pol_unreg;
  6922. /*
  6923. * Times to load large popular applications for the typical
  6924. * systems installed on the reference devices (see the
  6925. * comments before the definition of the next
  6926. * array). Actually, we use slightly lower values, as the
  6927. * estimated peak rate tends to be smaller than the actual
  6928. * peak rate. The reason for this last fact is that estimates
  6929. * are computed over much shorter time intervals than the long
  6930. * intervals typically used for benchmarking. Why? First, to
  6931. * adapt more quickly to variations. Second, because an I/O
  6932. * scheduler cannot rely on a peak-rate-evaluation workload to
  6933. * be run for a long time.
  6934. */
  6935. ref_wr_duration[0] = msecs_to_jiffies(7000); /* actually 8 sec */
  6936. ref_wr_duration[1] = msecs_to_jiffies(2500); /* actually 3 sec */
  6937. ret = elv_register(&iosched_bfq_mq);
  6938. if (ret)
  6939. goto slab_kill;
  6940. return 0;
  6941. slab_kill:
  6942. bfq_slab_kill();
  6943. err_pol_unreg:
  6944. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6945. blkcg_policy_unregister(&blkcg_policy_bfq);
  6946. #endif
  6947. return ret;
  6948. }
  6949. static void __exit bfq_exit(void)
  6950. {
  6951. elv_unregister(&iosched_bfq_mq);
  6952. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6953. blkcg_policy_unregister(&blkcg_policy_bfq);
  6954. #endif
  6955. bfq_slab_kill();
  6956. }
  6957. module_init(bfq_init);
  6958. module_exit(bfq_exit);
  6959. MODULE_AUTHOR("Paolo Valente");
  6960. MODULE_LICENSE("GPL");
  6961. MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");