mpi3mr_os.c 166 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Driver for Broadcom MPI3 Storage Controllers
  4. *
  5. * Copyright (C) 2017-2023 Broadcom Inc.
  6. * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
  7. *
  8. */
  9. #include "mpi3mr.h"
  10. #include <linux/idr.h>
  11. /* global driver scop variables */
  12. LIST_HEAD(mrioc_list);
  13. DEFINE_SPINLOCK(mrioc_list_lock);
  14. static DEFINE_IDA(mrioc_ida);
  15. static int warn_non_secure_ctlr;
  16. atomic64_t event_counter;
  17. MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
  18. MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
  19. MODULE_LICENSE(MPI3MR_DRIVER_LICENSE);
  20. MODULE_VERSION(MPI3MR_DRIVER_VERSION);
  21. /* Module parameters*/
  22. int prot_mask = -1;
  23. module_param(prot_mask, int, 0);
  24. MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07");
  25. static int prot_guard_mask = 3;
  26. module_param(prot_guard_mask, int, 0);
  27. MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3");
  28. static int logging_level;
  29. module_param(logging_level, int, 0);
  30. MODULE_PARM_DESC(logging_level,
  31. " bits for enabling additional logging info (default=0)");
  32. static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
  33. module_param(max_sgl_entries, int, 0444);
  34. MODULE_PARM_DESC(max_sgl_entries,
  35. "Preferred max number of SG entries to be used for a single I/O\n"
  36. "The actual value will be determined by the driver\n"
  37. "(Minimum=256, Maximum=2048, default=256)");
  38. /* Forward declarations*/
  39. static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
  40. struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
  41. #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF)
  42. #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE)
  43. /**
  44. * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
  45. * @mrioc: Adapter instance reference
  46. * @scmd: SCSI command reference
  47. *
  48. * Calculate the host tag based on block tag for a given scmd.
  49. *
  50. * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID.
  51. */
  52. static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
  53. struct scsi_cmnd *scmd)
  54. {
  55. struct scmd_priv *priv = NULL;
  56. u32 unique_tag;
  57. u16 host_tag, hw_queue;
  58. unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
  59. hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
  60. if (hw_queue >= mrioc->num_op_reply_q)
  61. return MPI3MR_HOSTTAG_INVALID;
  62. host_tag = blk_mq_unique_tag_to_tag(unique_tag);
  63. if (WARN_ON(host_tag >= mrioc->max_host_ios))
  64. return MPI3MR_HOSTTAG_INVALID;
  65. priv = scsi_cmd_priv(scmd);
  66. /*host_tag 0 is invalid hence incrementing by 1*/
  67. priv->host_tag = host_tag + 1;
  68. priv->scmd = scmd;
  69. priv->in_lld_scope = 1;
  70. priv->req_q_idx = hw_queue;
  71. priv->meta_chain_idx = -1;
  72. priv->chain_idx = -1;
  73. priv->meta_sg_valid = 0;
  74. return priv->host_tag;
  75. }
  76. /**
  77. * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag
  78. * @mrioc: Adapter instance reference
  79. * @host_tag: Host tag
  80. * @qidx: Operational queue index
  81. *
  82. * Identify the block tag from the host tag and queue index and
  83. * retrieve associated scsi command using scsi_host_find_tag().
  84. *
  85. * Return: SCSI command reference or NULL.
  86. */
  87. static struct scsi_cmnd *mpi3mr_scmd_from_host_tag(
  88. struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx)
  89. {
  90. struct scsi_cmnd *scmd = NULL;
  91. struct scmd_priv *priv = NULL;
  92. u32 unique_tag = host_tag - 1;
  93. if (WARN_ON(host_tag > mrioc->max_host_ios))
  94. goto out;
  95. unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS);
  96. scmd = scsi_host_find_tag(mrioc->shost, unique_tag);
  97. if (scmd) {
  98. priv = scsi_cmd_priv(scmd);
  99. if (!priv->in_lld_scope)
  100. scmd = NULL;
  101. }
  102. out:
  103. return scmd;
  104. }
  105. /**
  106. * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date
  107. * @mrioc: Adapter instance reference
  108. * @scmd: SCSI command reference
  109. *
  110. * Invalidate the SCSI command private data to mark the command
  111. * is not in LLD scope anymore.
  112. *
  113. * Return: Nothing.
  114. */
  115. static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc,
  116. struct scsi_cmnd *scmd)
  117. {
  118. struct scmd_priv *priv = NULL;
  119. priv = scsi_cmd_priv(scmd);
  120. if (WARN_ON(priv->in_lld_scope == 0))
  121. return;
  122. priv->host_tag = MPI3MR_HOSTTAG_INVALID;
  123. priv->req_q_idx = 0xFFFF;
  124. priv->scmd = NULL;
  125. priv->in_lld_scope = 0;
  126. priv->meta_sg_valid = 0;
  127. if (priv->chain_idx >= 0) {
  128. clear_bit(priv->chain_idx, mrioc->chain_bitmap);
  129. priv->chain_idx = -1;
  130. }
  131. if (priv->meta_chain_idx >= 0) {
  132. clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap);
  133. priv->meta_chain_idx = -1;
  134. }
  135. }
  136. static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
  137. struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc);
  138. static void mpi3mr_fwevt_worker(struct work_struct *work);
  139. /**
  140. * mpi3mr_fwevt_free - firmware event memory dealloctor
  141. * @r: k reference pointer of the firmware event
  142. *
  143. * Free firmware event memory when no reference.
  144. */
  145. static void mpi3mr_fwevt_free(struct kref *r)
  146. {
  147. kfree(container_of(r, struct mpi3mr_fwevt, ref_count));
  148. }
  149. /**
  150. * mpi3mr_fwevt_get - k reference incrementor
  151. * @fwevt: Firmware event reference
  152. *
  153. * Increment firmware event reference count.
  154. */
  155. static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt)
  156. {
  157. kref_get(&fwevt->ref_count);
  158. }
  159. /**
  160. * mpi3mr_fwevt_put - k reference decrementor
  161. * @fwevt: Firmware event reference
  162. *
  163. * decrement firmware event reference count.
  164. */
  165. static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt)
  166. {
  167. kref_put(&fwevt->ref_count, mpi3mr_fwevt_free);
  168. }
  169. /**
  170. * mpi3mr_alloc_fwevt - Allocate firmware event
  171. * @len: length of firmware event data to allocate
  172. *
  173. * Allocate firmware event with required length and initialize
  174. * the reference counter.
  175. *
  176. * Return: firmware event reference.
  177. */
  178. static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len)
  179. {
  180. struct mpi3mr_fwevt *fwevt;
  181. fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
  182. if (!fwevt)
  183. return NULL;
  184. kref_init(&fwevt->ref_count);
  185. return fwevt;
  186. }
  187. /**
  188. * mpi3mr_fwevt_add_to_list - Add firmware event to the list
  189. * @mrioc: Adapter instance reference
  190. * @fwevt: Firmware event reference
  191. *
  192. * Add the given firmware event to the firmware event list.
  193. *
  194. * Return: Nothing.
  195. */
  196. static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
  197. struct mpi3mr_fwevt *fwevt)
  198. {
  199. unsigned long flags;
  200. if (!mrioc->fwevt_worker_thread)
  201. return;
  202. spin_lock_irqsave(&mrioc->fwevt_lock, flags);
  203. /* get fwevt reference count while adding it to fwevt_list */
  204. mpi3mr_fwevt_get(fwevt);
  205. INIT_LIST_HEAD(&fwevt->list);
  206. list_add_tail(&fwevt->list, &mrioc->fwevt_list);
  207. INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
  208. /* get fwevt reference count while enqueueing it to worker queue */
  209. mpi3mr_fwevt_get(fwevt);
  210. queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
  211. spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
  212. }
  213. /**
  214. * mpi3mr_hdb_trigger_data_event - Add hdb trigger data event to
  215. * the list
  216. * @mrioc: Adapter instance reference
  217. * @event_data: Event data
  218. *
  219. * Add the given hdb trigger data event to the firmware event
  220. * list.
  221. *
  222. * Return: Nothing.
  223. */
  224. void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc,
  225. struct trigger_event_data *event_data)
  226. {
  227. struct mpi3mr_fwevt *fwevt;
  228. u16 sz = sizeof(*event_data);
  229. fwevt = mpi3mr_alloc_fwevt(sz);
  230. if (!fwevt) {
  231. ioc_warn(mrioc, "failed to queue hdb trigger data event\n");
  232. return;
  233. }
  234. fwevt->mrioc = mrioc;
  235. fwevt->event_id = MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER;
  236. fwevt->send_ack = 0;
  237. fwevt->process_evt = 1;
  238. fwevt->evt_ctx = 0;
  239. fwevt->event_data_size = sz;
  240. memcpy(fwevt->event_data, event_data, sz);
  241. mpi3mr_fwevt_add_to_list(mrioc, fwevt);
  242. }
  243. /**
  244. * mpi3mr_fwevt_del_from_list - Delete firmware event from list
  245. * @mrioc: Adapter instance reference
  246. * @fwevt: Firmware event reference
  247. *
  248. * Delete the given firmware event from the firmware event list.
  249. *
  250. * Return: Nothing.
  251. */
  252. static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc,
  253. struct mpi3mr_fwevt *fwevt)
  254. {
  255. unsigned long flags;
  256. spin_lock_irqsave(&mrioc->fwevt_lock, flags);
  257. if (!list_empty(&fwevt->list)) {
  258. list_del_init(&fwevt->list);
  259. /*
  260. * Put fwevt reference count after
  261. * removing it from fwevt_list
  262. */
  263. mpi3mr_fwevt_put(fwevt);
  264. }
  265. spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
  266. }
  267. /**
  268. * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list
  269. * @mrioc: Adapter instance reference
  270. *
  271. * Dequeue a firmware event from the firmware event list.
  272. *
  273. * Return: firmware event.
  274. */
  275. static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
  276. struct mpi3mr_ioc *mrioc)
  277. {
  278. unsigned long flags;
  279. struct mpi3mr_fwevt *fwevt = NULL;
  280. spin_lock_irqsave(&mrioc->fwevt_lock, flags);
  281. if (!list_empty(&mrioc->fwevt_list)) {
  282. fwevt = list_first_entry(&mrioc->fwevt_list,
  283. struct mpi3mr_fwevt, list);
  284. list_del_init(&fwevt->list);
  285. /*
  286. * Put fwevt reference count after
  287. * removing it from fwevt_list
  288. */
  289. mpi3mr_fwevt_put(fwevt);
  290. }
  291. spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
  292. return fwevt;
  293. }
  294. /**
  295. * mpi3mr_cancel_work - cancel firmware event
  296. * @fwevt: fwevt object which needs to be canceled
  297. *
  298. * Return: Nothing.
  299. */
  300. static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt)
  301. {
  302. /*
  303. * Wait on the fwevt to complete. If this returns 1, then
  304. * the event was never executed.
  305. *
  306. * If it did execute, we wait for it to finish, and the put will
  307. * happen from mpi3mr_process_fwevt()
  308. */
  309. if (cancel_work_sync(&fwevt->work)) {
  310. /*
  311. * Put fwevt reference count after
  312. * dequeuing it from worker queue
  313. */
  314. mpi3mr_fwevt_put(fwevt);
  315. /*
  316. * Put fwevt reference count to neutralize
  317. * kref_init increment
  318. */
  319. mpi3mr_fwevt_put(fwevt);
  320. }
  321. }
  322. /**
  323. * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
  324. * @mrioc: Adapter instance reference
  325. *
  326. * Flush all pending firmware events from the firmware event
  327. * list.
  328. *
  329. * Return: Nothing.
  330. */
  331. void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
  332. {
  333. struct mpi3mr_fwevt *fwevt = NULL;
  334. if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) ||
  335. !mrioc->fwevt_worker_thread)
  336. return;
  337. while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)))
  338. mpi3mr_cancel_work(fwevt);
  339. if (mrioc->current_event) {
  340. fwevt = mrioc->current_event;
  341. /*
  342. * Don't call cancel_work_sync() API for the
  343. * fwevt work if the controller reset is
  344. * get called as part of processing the
  345. * same fwevt work (or) when worker thread is
  346. * waiting for device add/remove APIs to complete.
  347. * Otherwise we will see deadlock.
  348. */
  349. if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
  350. fwevt->discard = 1;
  351. return;
  352. }
  353. mpi3mr_cancel_work(fwevt);
  354. }
  355. }
  356. /**
  357. * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event
  358. * @mrioc: Adapter instance reference
  359. * @tg: Throttle group information pointer
  360. *
  361. * Accessor to queue on synthetically generated driver event to
  362. * the event worker thread, the driver event will be used to
  363. * reduce the QD of all VDs in the TG from the worker thread.
  364. *
  365. * Return: None.
  366. */
  367. static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc,
  368. struct mpi3mr_throttle_group_info *tg)
  369. {
  370. struct mpi3mr_fwevt *fwevt;
  371. u16 sz = sizeof(struct mpi3mr_throttle_group_info *);
  372. /*
  373. * If the QD reduction event is already queued due to throttle and if
  374. * the QD is not restored through device info change event
  375. * then dont queue further reduction events
  376. */
  377. if (tg->fw_qd != tg->modified_qd)
  378. return;
  379. fwevt = mpi3mr_alloc_fwevt(sz);
  380. if (!fwevt) {
  381. ioc_warn(mrioc, "failed to queue TG QD reduction event\n");
  382. return;
  383. }
  384. *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg;
  385. fwevt->mrioc = mrioc;
  386. fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION;
  387. fwevt->send_ack = 0;
  388. fwevt->process_evt = 1;
  389. fwevt->evt_ctx = 0;
  390. fwevt->event_data_size = sz;
  391. tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8);
  392. dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n",
  393. tg->id);
  394. mpi3mr_fwevt_add_to_list(mrioc, fwevt);
  395. }
  396. /**
  397. * mpi3mr_invalidate_devhandles -Invalidate device handles
  398. * @mrioc: Adapter instance reference
  399. *
  400. * Invalidate the device handles in the target device structures
  401. * . Called post reset prior to reinitializing the controller.
  402. *
  403. * Return: Nothing.
  404. */
  405. void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
  406. {
  407. struct mpi3mr_tgt_dev *tgtdev;
  408. struct mpi3mr_stgt_priv_data *tgt_priv;
  409. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
  410. tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  411. if (tgtdev->starget && tgtdev->starget->hostdata) {
  412. tgt_priv = tgtdev->starget->hostdata;
  413. tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  414. tgt_priv->io_throttle_enabled = 0;
  415. tgt_priv->io_divert = 0;
  416. tgt_priv->throttle_group = NULL;
  417. tgt_priv->wslen = 0;
  418. if (tgtdev->host_exposed)
  419. atomic_set(&tgt_priv->block_io, 1);
  420. }
  421. }
  422. }
  423. /**
  424. * mpi3mr_print_scmd - print individual SCSI command
  425. * @rq: Block request
  426. * @data: Adapter instance reference
  427. *
  428. * Print the SCSI command details if it is in LLD scope.
  429. *
  430. * Return: true always.
  431. */
  432. static bool mpi3mr_print_scmd(struct request *rq, void *data)
  433. {
  434. struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
  435. struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
  436. struct scmd_priv *priv = NULL;
  437. if (scmd) {
  438. priv = scsi_cmd_priv(scmd);
  439. if (!priv->in_lld_scope)
  440. goto out;
  441. ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n",
  442. __func__, priv->host_tag, priv->req_q_idx + 1);
  443. scsi_print_command(scmd);
  444. }
  445. out:
  446. return(true);
  447. }
  448. /**
  449. * mpi3mr_flush_scmd - Flush individual SCSI command
  450. * @rq: Block request
  451. * @data: Adapter instance reference
  452. *
  453. * Return the SCSI command to the upper layers if it is in LLD
  454. * scope.
  455. *
  456. * Return: true always.
  457. */
  458. static bool mpi3mr_flush_scmd(struct request *rq, void *data)
  459. {
  460. struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
  461. struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
  462. struct scmd_priv *priv = NULL;
  463. if (scmd) {
  464. priv = scsi_cmd_priv(scmd);
  465. if (!priv->in_lld_scope)
  466. goto out;
  467. if (priv->meta_sg_valid)
  468. dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
  469. scsi_prot_sg_count(scmd), scmd->sc_data_direction);
  470. mpi3mr_clear_scmd_priv(mrioc, scmd);
  471. scsi_dma_unmap(scmd);
  472. scmd->result = DID_RESET << 16;
  473. scsi_print_command(scmd);
  474. scsi_done(scmd);
  475. mrioc->flush_io_count++;
  476. }
  477. out:
  478. return(true);
  479. }
  480. /**
  481. * mpi3mr_count_dev_pending - Count commands pending for a lun
  482. * @rq: Block request
  483. * @data: SCSI device reference
  484. *
  485. * This is an iterator function called for each SCSI command in
  486. * a host and if the command is pending in the LLD for the
  487. * specific device(lun) then device specific pending I/O counter
  488. * is updated in the device structure.
  489. *
  490. * Return: true always.
  491. */
  492. static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
  493. {
  494. struct scsi_device *sdev = (struct scsi_device *)data;
  495. struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
  496. struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
  497. struct scmd_priv *priv;
  498. if (scmd) {
  499. priv = scsi_cmd_priv(scmd);
  500. if (!priv->in_lld_scope)
  501. goto out;
  502. if (scmd->device == sdev)
  503. sdev_priv_data->pend_count++;
  504. }
  505. out:
  506. return true;
  507. }
  508. /**
  509. * mpi3mr_count_tgt_pending - Count commands pending for target
  510. * @rq: Block request
  511. * @data: SCSI target reference
  512. *
  513. * This is an iterator function called for each SCSI command in
  514. * a host and if the command is pending in the LLD for the
  515. * specific target then target specific pending I/O counter is
  516. * updated in the target structure.
  517. *
  518. * Return: true always.
  519. */
  520. static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
  521. {
  522. struct scsi_target *starget = (struct scsi_target *)data;
  523. struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
  524. struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
  525. struct scmd_priv *priv;
  526. if (scmd) {
  527. priv = scsi_cmd_priv(scmd);
  528. if (!priv->in_lld_scope)
  529. goto out;
  530. if (scmd->device && (scsi_target(scmd->device) == starget))
  531. stgt_priv_data->pend_count++;
  532. }
  533. out:
  534. return true;
  535. }
  536. /**
  537. * mpi3mr_flush_host_io - Flush host I/Os
  538. * @mrioc: Adapter instance reference
  539. *
  540. * Flush all of the pending I/Os by calling
  541. * blk_mq_tagset_busy_iter() for each possible tag. This is
  542. * executed post controller reset
  543. *
  544. * Return: Nothing.
  545. */
  546. void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
  547. {
  548. struct Scsi_Host *shost = mrioc->shost;
  549. mrioc->flush_io_count = 0;
  550. ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__);
  551. blk_mq_tagset_busy_iter(&shost->tag_set,
  552. mpi3mr_flush_scmd, (void *)mrioc);
  553. ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__,
  554. mrioc->flush_io_count);
  555. }
  556. /**
  557. * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds
  558. * @mrioc: Adapter instance reference
  559. *
  560. * This function waits for currently running IO poll threads to
  561. * exit and then flushes all host I/Os and any internal pending
  562. * cmds. This is executed after controller is marked as
  563. * unrecoverable.
  564. *
  565. * Return: Nothing.
  566. */
  567. void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc)
  568. {
  569. struct Scsi_Host *shost = mrioc->shost;
  570. int i;
  571. if (!mrioc->unrecoverable)
  572. return;
  573. if (mrioc->op_reply_qinfo) {
  574. for (i = 0; i < mrioc->num_queues; i++) {
  575. while (atomic_read(&mrioc->op_reply_qinfo[i].in_use))
  576. udelay(500);
  577. atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
  578. }
  579. }
  580. mrioc->flush_io_count = 0;
  581. blk_mq_tagset_busy_iter(&shost->tag_set,
  582. mpi3mr_flush_scmd, (void *)mrioc);
  583. mpi3mr_flush_delayed_cmd_lists(mrioc);
  584. mpi3mr_flush_drv_cmds(mrioc);
  585. }
  586. /**
  587. * mpi3mr_alloc_tgtdev - target device allocator
  588. *
  589. * Allocate target device instance and initialize the reference
  590. * count
  591. *
  592. * Return: target device instance.
  593. */
  594. static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void)
  595. {
  596. struct mpi3mr_tgt_dev *tgtdev;
  597. tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC);
  598. if (!tgtdev)
  599. return NULL;
  600. kref_init(&tgtdev->ref_count);
  601. return tgtdev;
  602. }
  603. /**
  604. * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list
  605. * @mrioc: Adapter instance reference
  606. * @tgtdev: Target device
  607. *
  608. * Add the target device to the target device list
  609. *
  610. * Return: Nothing.
  611. */
  612. static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
  613. struct mpi3mr_tgt_dev *tgtdev)
  614. {
  615. unsigned long flags;
  616. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  617. mpi3mr_tgtdev_get(tgtdev);
  618. INIT_LIST_HEAD(&tgtdev->list);
  619. list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
  620. tgtdev->state = MPI3MR_DEV_CREATED;
  621. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  622. }
  623. /**
  624. * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
  625. * @mrioc: Adapter instance reference
  626. * @tgtdev: Target device
  627. * @must_delete: Must delete the target device from the list irrespective
  628. * of the device state.
  629. *
  630. * Remove the target device from the target device list
  631. *
  632. * Return: Nothing.
  633. */
  634. static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
  635. struct mpi3mr_tgt_dev *tgtdev, bool must_delete)
  636. {
  637. unsigned long flags;
  638. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  639. if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) {
  640. if (!list_empty(&tgtdev->list)) {
  641. list_del_init(&tgtdev->list);
  642. tgtdev->state = MPI3MR_DEV_DELETED;
  643. mpi3mr_tgtdev_put(tgtdev);
  644. }
  645. }
  646. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  647. }
  648. /**
  649. * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
  650. * @mrioc: Adapter instance reference
  651. * @handle: Device handle
  652. *
  653. * Accessor to retrieve target device from the device handle.
  654. * Non Lock version
  655. *
  656. * Return: Target device reference.
  657. */
  658. static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle(
  659. struct mpi3mr_ioc *mrioc, u16 handle)
  660. {
  661. struct mpi3mr_tgt_dev *tgtdev;
  662. assert_spin_locked(&mrioc->tgtdev_lock);
  663. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
  664. if (tgtdev->dev_handle == handle)
  665. goto found_tgtdev;
  666. return NULL;
  667. found_tgtdev:
  668. mpi3mr_tgtdev_get(tgtdev);
  669. return tgtdev;
  670. }
  671. /**
  672. * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
  673. * @mrioc: Adapter instance reference
  674. * @handle: Device handle
  675. *
  676. * Accessor to retrieve target device from the device handle.
  677. * Lock version
  678. *
  679. * Return: Target device reference.
  680. */
  681. struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
  682. struct mpi3mr_ioc *mrioc, u16 handle)
  683. {
  684. struct mpi3mr_tgt_dev *tgtdev;
  685. unsigned long flags;
  686. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  687. tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
  688. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  689. return tgtdev;
  690. }
  691. /**
  692. * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID
  693. * @mrioc: Adapter instance reference
  694. * @persist_id: Persistent ID
  695. *
  696. * Accessor to retrieve target device from the Persistent ID.
  697. * Non Lock version
  698. *
  699. * Return: Target device reference.
  700. */
  701. static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id(
  702. struct mpi3mr_ioc *mrioc, u16 persist_id)
  703. {
  704. struct mpi3mr_tgt_dev *tgtdev;
  705. assert_spin_locked(&mrioc->tgtdev_lock);
  706. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
  707. if (tgtdev->perst_id == persist_id)
  708. goto found_tgtdev;
  709. return NULL;
  710. found_tgtdev:
  711. mpi3mr_tgtdev_get(tgtdev);
  712. return tgtdev;
  713. }
  714. /**
  715. * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID
  716. * @mrioc: Adapter instance reference
  717. * @persist_id: Persistent ID
  718. *
  719. * Accessor to retrieve target device from the Persistent ID.
  720. * Lock version
  721. *
  722. * Return: Target device reference.
  723. */
  724. static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id(
  725. struct mpi3mr_ioc *mrioc, u16 persist_id)
  726. {
  727. struct mpi3mr_tgt_dev *tgtdev;
  728. unsigned long flags;
  729. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  730. tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id);
  731. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  732. return tgtdev;
  733. }
  734. /**
  735. * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private
  736. * @mrioc: Adapter instance reference
  737. * @tgt_priv: Target private data
  738. *
  739. * Accessor to return target device from the target private
  740. * data. Non Lock version
  741. *
  742. * Return: Target device reference.
  743. */
  744. static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
  745. struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv)
  746. {
  747. struct mpi3mr_tgt_dev *tgtdev;
  748. assert_spin_locked(&mrioc->tgtdev_lock);
  749. tgtdev = tgt_priv->tgt_dev;
  750. if (tgtdev)
  751. mpi3mr_tgtdev_get(tgtdev);
  752. return tgtdev;
  753. }
  754. /**
  755. * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs
  756. * @mrioc: Adapter instance reference
  757. * @tg: Throttle group information pointer
  758. * @divert_value: 1 or 0
  759. *
  760. * Accessor to set io_divert flag for each device associated
  761. * with the given throttle group with the given value.
  762. *
  763. * Return: None.
  764. */
  765. static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
  766. struct mpi3mr_throttle_group_info *tg, u8 divert_value)
  767. {
  768. unsigned long flags;
  769. struct mpi3mr_tgt_dev *tgtdev;
  770. struct mpi3mr_stgt_priv_data *tgt_priv;
  771. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  772. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
  773. if (tgtdev->starget && tgtdev->starget->hostdata) {
  774. tgt_priv = tgtdev->starget->hostdata;
  775. if (tgt_priv->throttle_group == tg)
  776. tgt_priv->io_divert = divert_value;
  777. }
  778. }
  779. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  780. }
  781. /**
  782. * mpi3mr_print_device_event_notice - print notice related to post processing of
  783. * device event after controller reset.
  784. *
  785. * @mrioc: Adapter instance reference
  786. * @device_add: true for device add event and false for device removal event
  787. *
  788. * Return: None.
  789. */
  790. void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
  791. bool device_add)
  792. {
  793. ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
  794. (device_add ? "addition" : "removal"));
  795. ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n");
  796. ioc_notice(mrioc, "are matched with attached devices for correctness\n");
  797. }
  798. /**
  799. * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
  800. * @mrioc: Adapter instance reference
  801. * @tgtdev: Target device structure
  802. *
  803. * Checks whether the device is exposed to upper layers and if it
  804. * is then remove the device from upper layers by calling
  805. * scsi_remove_target().
  806. *
  807. * Return: 0 on success, non zero on failure.
  808. */
  809. void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
  810. struct mpi3mr_tgt_dev *tgtdev)
  811. {
  812. struct mpi3mr_stgt_priv_data *tgt_priv;
  813. ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n",
  814. __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
  815. if (tgtdev->starget && tgtdev->starget->hostdata) {
  816. tgt_priv = tgtdev->starget->hostdata;
  817. atomic_set(&tgt_priv->block_io, 0);
  818. tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  819. }
  820. if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
  821. MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) {
  822. if (tgtdev->starget) {
  823. if (mrioc->current_event)
  824. mrioc->current_event->pending_at_sml = 1;
  825. scsi_remove_target(&tgtdev->starget->dev);
  826. tgtdev->host_exposed = 0;
  827. if (mrioc->current_event) {
  828. mrioc->current_event->pending_at_sml = 0;
  829. if (mrioc->current_event->discard) {
  830. mpi3mr_print_device_event_notice(mrioc,
  831. false);
  832. return;
  833. }
  834. }
  835. }
  836. } else
  837. mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev);
  838. mpi3mr_global_trigger(mrioc,
  839. MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED);
  840. ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
  841. __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
  842. }
  843. /**
  844. * mpi3mr_report_tgtdev_to_host - Expose device to upper layers
  845. * @mrioc: Adapter instance reference
  846. * @perst_id: Persistent ID of the device
  847. *
  848. * Checks whether the device can be exposed to upper layers and
  849. * if it is not then expose the device to upper layers by
  850. * calling scsi_scan_target().
  851. *
  852. * Return: 0 on success, non zero on failure.
  853. */
  854. static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
  855. u16 perst_id)
  856. {
  857. int retval = 0;
  858. struct mpi3mr_tgt_dev *tgtdev;
  859. if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
  860. return -1;
  861. tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
  862. if (!tgtdev) {
  863. retval = -1;
  864. goto out;
  865. }
  866. if (tgtdev->is_hidden || tgtdev->host_exposed) {
  867. retval = -1;
  868. goto out;
  869. }
  870. if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
  871. MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){
  872. tgtdev->host_exposed = 1;
  873. if (mrioc->current_event)
  874. mrioc->current_event->pending_at_sml = 1;
  875. scsi_scan_target(&mrioc->shost->shost_gendev,
  876. mrioc->scsi_device_channel, tgtdev->perst_id,
  877. SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
  878. if (!tgtdev->starget)
  879. tgtdev->host_exposed = 0;
  880. if (mrioc->current_event) {
  881. mrioc->current_event->pending_at_sml = 0;
  882. if (mrioc->current_event->discard) {
  883. mpi3mr_print_device_event_notice(mrioc, true);
  884. goto out;
  885. }
  886. }
  887. } else
  888. mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev);
  889. out:
  890. if (tgtdev)
  891. mpi3mr_tgtdev_put(tgtdev);
  892. return retval;
  893. }
  894. /**
  895. * mpi3mr_change_queue_depth- Change QD callback handler
  896. * @sdev: SCSI device reference
  897. * @q_depth: Queue depth
  898. *
  899. * Validate and limit QD and call scsi_change_queue_depth.
  900. *
  901. * Return: return value of scsi_change_queue_depth
  902. */
  903. static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
  904. int q_depth)
  905. {
  906. struct scsi_target *starget = scsi_target(sdev);
  907. struct Scsi_Host *shost = dev_to_shost(&starget->dev);
  908. int retval = 0;
  909. if (!sdev->tagged_supported)
  910. q_depth = 1;
  911. if (q_depth > shost->can_queue)
  912. q_depth = shost->can_queue;
  913. else if (!q_depth)
  914. q_depth = MPI3MR_DEFAULT_SDEV_QD;
  915. retval = scsi_change_queue_depth(sdev, q_depth);
  916. sdev->max_queue_depth = sdev->queue_depth;
  917. return retval;
  918. }
  919. static void mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev *tgt_dev,
  920. struct queue_limits *lim)
  921. {
  922. u8 pgsz = tgt_dev->dev_spec.pcie_inf.pgsz ? : MPI3MR_DEFAULT_PGSZEXP;
  923. lim->max_hw_sectors = tgt_dev->dev_spec.pcie_inf.mdts / 512;
  924. lim->virt_boundary_mask = (1 << pgsz) - 1;
  925. }
  926. static void mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev *tgt_dev,
  927. struct queue_limits *lim)
  928. {
  929. if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE &&
  930. (tgt_dev->dev_spec.pcie_inf.dev_info &
  931. MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
  932. MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)
  933. mpi3mr_configure_nvme_dev(tgt_dev, lim);
  934. }
  935. /**
  936. * mpi3mr_update_sdev - Update SCSI device information
  937. * @sdev: SCSI device reference
  938. * @data: target device reference
  939. *
  940. * This is an iterator function called for each SCSI device in a
  941. * target to update the target specific information into each
  942. * SCSI device.
  943. *
  944. * Return: Nothing.
  945. */
  946. static void
  947. mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
  948. {
  949. struct mpi3mr_tgt_dev *tgtdev;
  950. struct queue_limits lim;
  951. tgtdev = (struct mpi3mr_tgt_dev *)data;
  952. if (!tgtdev)
  953. return;
  954. mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
  955. lim = queue_limits_start_update(sdev->request_queue);
  956. mpi3mr_configure_tgt_dev(tgtdev, &lim);
  957. WARN_ON_ONCE(queue_limits_commit_update(sdev->request_queue, &lim));
  958. }
  959. /**
  960. * mpi3mr_refresh_tgtdevs - Refresh target device exposure
  961. * @mrioc: Adapter instance reference
  962. *
  963. * This is executed post controller reset to identify any
  964. * missing devices during reset and remove from the upper layers
  965. * or expose any newly detected device to the upper layers.
  966. *
  967. * Return: Nothing.
  968. */
  969. static void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc)
  970. {
  971. struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
  972. struct mpi3mr_stgt_priv_data *tgt_priv;
  973. dprint_reset(mrioc, "refresh target devices: check for removals\n");
  974. list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
  975. list) {
  976. if (((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) ||
  977. tgtdev->is_hidden) &&
  978. tgtdev->host_exposed && tgtdev->starget &&
  979. tgtdev->starget->hostdata) {
  980. tgt_priv = tgtdev->starget->hostdata;
  981. tgt_priv->dev_removed = 1;
  982. atomic_set(&tgt_priv->block_io, 0);
  983. }
  984. }
  985. list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
  986. list) {
  987. if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
  988. dprint_reset(mrioc, "removing target device with perst_id(%d)\n",
  989. tgtdev->perst_id);
  990. if (tgtdev->host_exposed)
  991. mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
  992. mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
  993. mpi3mr_tgtdev_put(tgtdev);
  994. } else if (tgtdev->is_hidden & tgtdev->host_exposed) {
  995. dprint_reset(mrioc, "hiding target device with perst_id(%d)\n",
  996. tgtdev->perst_id);
  997. mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
  998. }
  999. }
  1000. tgtdev = NULL;
  1001. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
  1002. if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
  1003. !tgtdev->is_hidden) {
  1004. if (!tgtdev->host_exposed)
  1005. mpi3mr_report_tgtdev_to_host(mrioc,
  1006. tgtdev->perst_id);
  1007. else if (tgtdev->starget)
  1008. starget_for_each_device(tgtdev->starget,
  1009. (void *)tgtdev, mpi3mr_update_sdev);
  1010. }
  1011. }
  1012. }
  1013. /**
  1014. * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf
  1015. * @mrioc: Adapter instance reference
  1016. * @tgtdev: Target device internal structure
  1017. * @dev_pg0: New device page0
  1018. * @is_added: Flag to indicate the device is just added
  1019. *
  1020. * Update the information from the device page0 into the driver
  1021. * cached target device structure.
  1022. *
  1023. * Return: Nothing.
  1024. */
  1025. static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
  1026. struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0,
  1027. bool is_added)
  1028. {
  1029. u16 flags = 0;
  1030. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
  1031. struct mpi3mr_enclosure_node *enclosure_dev = NULL;
  1032. u8 prot_mask = 0;
  1033. tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
  1034. tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
  1035. tgtdev->dev_type = dev_pg0->device_form;
  1036. tgtdev->io_unit_port = dev_pg0->io_unit_port;
  1037. tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
  1038. tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
  1039. tgtdev->slot = le16_to_cpu(dev_pg0->slot);
  1040. tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
  1041. tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
  1042. tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags);
  1043. if (tgtdev->encl_handle)
  1044. enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
  1045. tgtdev->encl_handle);
  1046. if (enclosure_dev)
  1047. tgtdev->enclosure_logical_id = le64_to_cpu(
  1048. enclosure_dev->pg0.enclosure_logical_id);
  1049. flags = tgtdev->devpg0_flag;
  1050. tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
  1051. if (is_added == true)
  1052. tgtdev->io_throttle_enabled =
  1053. (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
  1054. switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) {
  1055. case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB:
  1056. tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS;
  1057. break;
  1058. case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB:
  1059. tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS;
  1060. break;
  1061. case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT:
  1062. default:
  1063. tgtdev->wslen = 0;
  1064. break;
  1065. }
  1066. if (tgtdev->starget && tgtdev->starget->hostdata) {
  1067. scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
  1068. tgtdev->starget->hostdata;
  1069. scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
  1070. scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
  1071. scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
  1072. scsi_tgt_priv_data->io_throttle_enabled =
  1073. tgtdev->io_throttle_enabled;
  1074. if (is_added == true)
  1075. atomic_set(&scsi_tgt_priv_data->block_io, 0);
  1076. scsi_tgt_priv_data->wslen = tgtdev->wslen;
  1077. }
  1078. switch (dev_pg0->access_status) {
  1079. case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
  1080. case MPI3_DEVICE0_ASTATUS_PREPARE:
  1081. case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
  1082. case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
  1083. break;
  1084. default:
  1085. tgtdev->is_hidden = 1;
  1086. break;
  1087. }
  1088. switch (tgtdev->dev_type) {
  1089. case MPI3_DEVICE_DEVFORM_SAS_SATA:
  1090. {
  1091. struct mpi3_device0_sas_sata_format *sasinf =
  1092. &dev_pg0->device_specific.sas_sata_format;
  1093. u16 dev_info = le16_to_cpu(sasinf->device_info);
  1094. tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
  1095. tgtdev->dev_spec.sas_sata_inf.sas_address =
  1096. le64_to_cpu(sasinf->sas_address);
  1097. tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num;
  1098. tgtdev->dev_spec.sas_sata_inf.attached_phy_id =
  1099. sasinf->attached_phy_identifier;
  1100. if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
  1101. MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
  1102. tgtdev->is_hidden = 1;
  1103. else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
  1104. MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
  1105. tgtdev->is_hidden = 1;
  1106. if (((tgtdev->devpg0_flag &
  1107. MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)
  1108. && (tgtdev->devpg0_flag &
  1109. MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) ||
  1110. (tgtdev->parent_handle == 0xFFFF))
  1111. tgtdev->non_stl = 1;
  1112. if (tgtdev->dev_spec.sas_sata_inf.hba_port)
  1113. tgtdev->dev_spec.sas_sata_inf.hba_port->port_id =
  1114. dev_pg0->io_unit_port;
  1115. break;
  1116. }
  1117. case MPI3_DEVICE_DEVFORM_PCIE:
  1118. {
  1119. struct mpi3_device0_pcie_format *pcieinf =
  1120. &dev_pg0->device_specific.pcie_format;
  1121. u16 dev_info = le16_to_cpu(pcieinf->device_info);
  1122. tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
  1123. tgtdev->dev_spec.pcie_inf.capb =
  1124. le32_to_cpu(pcieinf->capabilities);
  1125. tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
  1126. /* 2^12 = 4096 */
  1127. tgtdev->dev_spec.pcie_inf.pgsz = 12;
  1128. if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
  1129. tgtdev->dev_spec.pcie_inf.mdts =
  1130. le32_to_cpu(pcieinf->maximum_data_transfer_size);
  1131. tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
  1132. tgtdev->dev_spec.pcie_inf.reset_to =
  1133. max_t(u8, pcieinf->controller_reset_to,
  1134. MPI3MR_INTADMCMD_TIMEOUT);
  1135. tgtdev->dev_spec.pcie_inf.abort_to =
  1136. max_t(u8, pcieinf->nvme_abort_to,
  1137. MPI3MR_INTADMCMD_TIMEOUT);
  1138. }
  1139. if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
  1140. tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
  1141. if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
  1142. MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
  1143. ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
  1144. MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
  1145. tgtdev->is_hidden = 1;
  1146. tgtdev->non_stl = 1;
  1147. if (!mrioc->shost)
  1148. break;
  1149. prot_mask = scsi_host_get_prot(mrioc->shost);
  1150. if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) {
  1151. scsi_host_set_prot(mrioc->shost, prot_mask & 0x77);
  1152. ioc_info(mrioc,
  1153. "%s : Disabling DIX0 prot capability\n", __func__);
  1154. ioc_info(mrioc,
  1155. "because HBA does not support DIX0 operation on NVME drives\n");
  1156. }
  1157. break;
  1158. }
  1159. case MPI3_DEVICE_DEVFORM_VD:
  1160. {
  1161. struct mpi3_device0_vd_format *vdinf =
  1162. &dev_pg0->device_specific.vd_format;
  1163. struct mpi3mr_throttle_group_info *tg = NULL;
  1164. u16 vdinf_io_throttle_group =
  1165. le16_to_cpu(vdinf->io_throttle_group);
  1166. tgtdev->dev_spec.vd_inf.state = vdinf->vd_state;
  1167. if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
  1168. tgtdev->is_hidden = 1;
  1169. tgtdev->non_stl = 1;
  1170. tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
  1171. tgtdev->dev_spec.vd_inf.tg_high =
  1172. le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
  1173. tgtdev->dev_spec.vd_inf.tg_low =
  1174. le16_to_cpu(vdinf->io_throttle_group_low) * 2048;
  1175. if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) {
  1176. tg = mrioc->throttle_groups + vdinf_io_throttle_group;
  1177. tg->id = vdinf_io_throttle_group;
  1178. tg->high = tgtdev->dev_spec.vd_inf.tg_high;
  1179. tg->low = tgtdev->dev_spec.vd_inf.tg_low;
  1180. tg->qd_reduction =
  1181. tgtdev->dev_spec.vd_inf.tg_qd_reduction;
  1182. if (is_added == true)
  1183. tg->fw_qd = tgtdev->q_depth;
  1184. tg->modified_qd = tgtdev->q_depth;
  1185. }
  1186. tgtdev->dev_spec.vd_inf.tg = tg;
  1187. if (scsi_tgt_priv_data)
  1188. scsi_tgt_priv_data->throttle_group = tg;
  1189. break;
  1190. }
  1191. default:
  1192. break;
  1193. }
  1194. }
  1195. /**
  1196. * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
  1197. * @mrioc: Adapter instance reference
  1198. * @fwevt: Firmware event information.
  1199. *
  1200. * Process Device status Change event and based on device's new
  1201. * information, either expose the device to the upper layers, or
  1202. * remove the device from upper layers.
  1203. *
  1204. * Return: Nothing.
  1205. */
  1206. static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
  1207. struct mpi3mr_fwevt *fwevt)
  1208. {
  1209. u16 dev_handle = 0;
  1210. u8 uhide = 0, delete = 0, cleanup = 0;
  1211. struct mpi3mr_tgt_dev *tgtdev = NULL;
  1212. struct mpi3_event_data_device_status_change *evtdata =
  1213. (struct mpi3_event_data_device_status_change *)fwevt->event_data;
  1214. dev_handle = le16_to_cpu(evtdata->dev_handle);
  1215. ioc_info(mrioc,
  1216. "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
  1217. __func__, dev_handle, evtdata->reason_code);
  1218. switch (evtdata->reason_code) {
  1219. case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
  1220. delete = 1;
  1221. break;
  1222. case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
  1223. uhide = 1;
  1224. break;
  1225. case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
  1226. delete = 1;
  1227. cleanup = 1;
  1228. break;
  1229. default:
  1230. ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__,
  1231. evtdata->reason_code);
  1232. break;
  1233. }
  1234. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
  1235. if (!tgtdev)
  1236. goto out;
  1237. if (uhide) {
  1238. tgtdev->is_hidden = 0;
  1239. if (!tgtdev->host_exposed)
  1240. mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
  1241. }
  1242. if (delete)
  1243. mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
  1244. if (cleanup) {
  1245. mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
  1246. mpi3mr_tgtdev_put(tgtdev);
  1247. }
  1248. out:
  1249. if (tgtdev)
  1250. mpi3mr_tgtdev_put(tgtdev);
  1251. }
  1252. /**
  1253. * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
  1254. * @mrioc: Adapter instance reference
  1255. * @dev_pg0: New device page0
  1256. *
  1257. * Process Device Info Change event and based on device's new
  1258. * information, either expose the device to the upper layers, or
  1259. * remove the device from upper layers or update the details of
  1260. * the device.
  1261. *
  1262. * Return: Nothing.
  1263. */
  1264. static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
  1265. struct mpi3_device_page0 *dev_pg0)
  1266. {
  1267. struct mpi3mr_tgt_dev *tgtdev = NULL;
  1268. u16 dev_handle = 0, perst_id = 0;
  1269. perst_id = le16_to_cpu(dev_pg0->persistent_id);
  1270. dev_handle = le16_to_cpu(dev_pg0->dev_handle);
  1271. ioc_info(mrioc,
  1272. "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
  1273. __func__, dev_handle, perst_id);
  1274. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
  1275. if (!tgtdev)
  1276. goto out;
  1277. mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false);
  1278. if (!tgtdev->is_hidden && !tgtdev->host_exposed)
  1279. mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
  1280. if (tgtdev->is_hidden && tgtdev->host_exposed)
  1281. mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
  1282. if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget)
  1283. starget_for_each_device(tgtdev->starget, (void *)tgtdev,
  1284. mpi3mr_update_sdev);
  1285. out:
  1286. if (tgtdev)
  1287. mpi3mr_tgtdev_put(tgtdev);
  1288. }
  1289. /**
  1290. * mpi3mr_free_enclosure_list - release enclosures
  1291. * @mrioc: Adapter instance reference
  1292. *
  1293. * Free memory allocated during encloure add.
  1294. *
  1295. * Return nothing.
  1296. */
  1297. void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc)
  1298. {
  1299. struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next;
  1300. list_for_each_entry_safe(enclosure_dev,
  1301. enclosure_dev_next, &mrioc->enclosure_list, list) {
  1302. list_del(&enclosure_dev->list);
  1303. kfree(enclosure_dev);
  1304. }
  1305. }
  1306. /**
  1307. * mpi3mr_enclosure_find_by_handle - enclosure search by handle
  1308. * @mrioc: Adapter instance reference
  1309. * @handle: Firmware device handle of the enclosure
  1310. *
  1311. * This searches for enclosure device based on handle, then returns the
  1312. * enclosure object.
  1313. *
  1314. * Return: Enclosure object reference or NULL
  1315. */
  1316. struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
  1317. struct mpi3mr_ioc *mrioc, u16 handle)
  1318. {
  1319. struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL;
  1320. list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) {
  1321. if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle)
  1322. continue;
  1323. r = enclosure_dev;
  1324. goto out;
  1325. }
  1326. out:
  1327. return r;
  1328. }
  1329. /**
  1330. * mpi3mr_process_trigger_data_event_bh - Process trigger event
  1331. * data
  1332. * @mrioc: Adapter instance reference
  1333. * @event_data: Event data
  1334. *
  1335. * This function releases diage buffers or issues diag fault
  1336. * based on trigger conditions
  1337. *
  1338. * Return: Nothing
  1339. */
  1340. static void mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc *mrioc,
  1341. struct trigger_event_data *event_data)
  1342. {
  1343. struct diag_buffer_desc *trace_hdb = event_data->trace_hdb;
  1344. struct diag_buffer_desc *fw_hdb = event_data->fw_hdb;
  1345. unsigned long flags;
  1346. int retval = 0;
  1347. u8 trigger_type = event_data->trigger_type;
  1348. union mpi3mr_trigger_data *trigger_data =
  1349. &event_data->trigger_specific_data;
  1350. if (event_data->snapdump) {
  1351. if (trace_hdb)
  1352. mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type,
  1353. trigger_data, 1);
  1354. if (fw_hdb)
  1355. mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type,
  1356. trigger_data, 1);
  1357. mpi3mr_soft_reset_handler(mrioc,
  1358. MPI3MR_RESET_FROM_TRIGGER, 1);
  1359. return;
  1360. }
  1361. if (trace_hdb) {
  1362. retval = mpi3mr_issue_diag_buf_release(mrioc, trace_hdb);
  1363. if (!retval) {
  1364. mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type,
  1365. trigger_data, 1);
  1366. }
  1367. spin_lock_irqsave(&mrioc->trigger_lock, flags);
  1368. mrioc->trace_release_trigger_active = false;
  1369. spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
  1370. }
  1371. if (fw_hdb) {
  1372. retval = mpi3mr_issue_diag_buf_release(mrioc, fw_hdb);
  1373. if (!retval) {
  1374. mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type,
  1375. trigger_data, 1);
  1376. }
  1377. spin_lock_irqsave(&mrioc->trigger_lock, flags);
  1378. mrioc->fw_release_trigger_active = false;
  1379. spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
  1380. }
  1381. }
  1382. /**
  1383. * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event
  1384. * @mrioc: Adapter instance reference
  1385. * @encl_pg0: Enclosure page 0.
  1386. * @is_added: Added event or not
  1387. *
  1388. * Return nothing.
  1389. */
  1390. static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc,
  1391. struct mpi3_enclosure_page0 *encl_pg0, u8 is_added)
  1392. {
  1393. char *reason_str = NULL;
  1394. if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK))
  1395. return;
  1396. if (is_added)
  1397. reason_str = "enclosure added";
  1398. else
  1399. reason_str = "enclosure dev status changed";
  1400. ioc_info(mrioc,
  1401. "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n",
  1402. reason_str, le16_to_cpu(encl_pg0->enclosure_handle),
  1403. (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id));
  1404. ioc_info(mrioc,
  1405. "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n",
  1406. le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port,
  1407. le16_to_cpu(encl_pg0->flags),
  1408. ((le16_to_cpu(encl_pg0->flags) &
  1409. MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4));
  1410. }
  1411. /**
  1412. * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf
  1413. * @mrioc: Adapter instance reference
  1414. * @fwevt: Firmware event reference
  1415. *
  1416. * Prints information about the Enclosure device status or
  1417. * Enclosure add events if logging is enabled and add or remove
  1418. * the enclosure from the controller's internal list of
  1419. * enclosures.
  1420. *
  1421. * Return: Nothing.
  1422. */
  1423. static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc,
  1424. struct mpi3mr_fwevt *fwevt)
  1425. {
  1426. struct mpi3mr_enclosure_node *enclosure_dev = NULL;
  1427. struct mpi3_enclosure_page0 *encl_pg0;
  1428. u16 encl_handle;
  1429. u8 added, present;
  1430. encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data;
  1431. added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0;
  1432. mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added);
  1433. encl_handle = le16_to_cpu(encl_pg0->enclosure_handle);
  1434. present = ((le16_to_cpu(encl_pg0->flags) &
  1435. MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4);
  1436. if (encl_handle)
  1437. enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
  1438. encl_handle);
  1439. if (!enclosure_dev && present) {
  1440. enclosure_dev =
  1441. kzalloc(sizeof(struct mpi3mr_enclosure_node),
  1442. GFP_KERNEL);
  1443. if (!enclosure_dev)
  1444. return;
  1445. list_add_tail(&enclosure_dev->list,
  1446. &mrioc->enclosure_list);
  1447. }
  1448. if (enclosure_dev) {
  1449. if (!present) {
  1450. list_del(&enclosure_dev->list);
  1451. kfree(enclosure_dev);
  1452. } else
  1453. memcpy(&enclosure_dev->pg0, encl_pg0,
  1454. sizeof(enclosure_dev->pg0));
  1455. }
  1456. }
  1457. /**
  1458. * mpi3mr_sastopochg_evt_debug - SASTopoChange details
  1459. * @mrioc: Adapter instance reference
  1460. * @event_data: SAS topology change list event data
  1461. *
  1462. * Prints information about the SAS topology change event.
  1463. *
  1464. * Return: Nothing.
  1465. */
  1466. static void
  1467. mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
  1468. struct mpi3_event_data_sas_topology_change_list *event_data)
  1469. {
  1470. int i;
  1471. u16 handle;
  1472. u8 reason_code, phy_number;
  1473. char *status_str = NULL;
  1474. u8 link_rate, prev_link_rate;
  1475. switch (event_data->exp_status) {
  1476. case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
  1477. status_str = "remove";
  1478. break;
  1479. case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
  1480. status_str = "responding";
  1481. break;
  1482. case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
  1483. status_str = "remove delay";
  1484. break;
  1485. case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
  1486. status_str = "direct attached";
  1487. break;
  1488. default:
  1489. status_str = "unknown status";
  1490. break;
  1491. }
  1492. ioc_info(mrioc, "%s :sas topology change: (%s)\n",
  1493. __func__, status_str);
  1494. ioc_info(mrioc,
  1495. "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
  1496. __func__, le16_to_cpu(event_data->expander_dev_handle),
  1497. event_data->io_unit_port,
  1498. le16_to_cpu(event_data->enclosure_handle),
  1499. event_data->start_phy_num, event_data->num_entries);
  1500. for (i = 0; i < event_data->num_entries; i++) {
  1501. handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
  1502. if (!handle)
  1503. continue;
  1504. phy_number = event_data->start_phy_num + i;
  1505. reason_code = event_data->phy_entry[i].status &
  1506. MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
  1507. switch (reason_code) {
  1508. case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
  1509. status_str = "target remove";
  1510. break;
  1511. case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
  1512. status_str = "delay target remove";
  1513. break;
  1514. case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
  1515. status_str = "link status change";
  1516. break;
  1517. case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
  1518. status_str = "link status no change";
  1519. break;
  1520. case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
  1521. status_str = "target responding";
  1522. break;
  1523. default:
  1524. status_str = "unknown";
  1525. break;
  1526. }
  1527. link_rate = event_data->phy_entry[i].link_rate >> 4;
  1528. prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
  1529. ioc_info(mrioc,
  1530. "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
  1531. __func__, phy_number, handle, status_str, link_rate,
  1532. prev_link_rate);
  1533. }
  1534. }
  1535. /**
  1536. * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf
  1537. * @mrioc: Adapter instance reference
  1538. * @fwevt: Firmware event reference
  1539. *
  1540. * Prints information about the SAS topology change event and
  1541. * for "not responding" event code, removes the device from the
  1542. * upper layers.
  1543. *
  1544. * Return: Nothing.
  1545. */
  1546. static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
  1547. struct mpi3mr_fwevt *fwevt)
  1548. {
  1549. struct mpi3_event_data_sas_topology_change_list *event_data =
  1550. (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data;
  1551. int i;
  1552. u16 handle;
  1553. u8 reason_code;
  1554. u64 exp_sas_address = 0, parent_sas_address = 0;
  1555. struct mpi3mr_hba_port *hba_port = NULL;
  1556. struct mpi3mr_tgt_dev *tgtdev = NULL;
  1557. struct mpi3mr_sas_node *sas_expander = NULL;
  1558. unsigned long flags;
  1559. u8 link_rate, prev_link_rate, parent_phy_number;
  1560. mpi3mr_sastopochg_evt_debug(mrioc, event_data);
  1561. if (mrioc->sas_transport_enabled) {
  1562. hba_port = mpi3mr_get_hba_port_by_id(mrioc,
  1563. event_data->io_unit_port);
  1564. if (le16_to_cpu(event_data->expander_dev_handle)) {
  1565. spin_lock_irqsave(&mrioc->sas_node_lock, flags);
  1566. sas_expander = __mpi3mr_expander_find_by_handle(mrioc,
  1567. le16_to_cpu(event_data->expander_dev_handle));
  1568. if (sas_expander) {
  1569. exp_sas_address = sas_expander->sas_address;
  1570. hba_port = sas_expander->hba_port;
  1571. }
  1572. spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
  1573. parent_sas_address = exp_sas_address;
  1574. } else
  1575. parent_sas_address = mrioc->sas_hba.sas_address;
  1576. }
  1577. for (i = 0; i < event_data->num_entries; i++) {
  1578. if (fwevt->discard)
  1579. return;
  1580. handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
  1581. if (!handle)
  1582. continue;
  1583. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
  1584. if (!tgtdev)
  1585. continue;
  1586. reason_code = event_data->phy_entry[i].status &
  1587. MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
  1588. switch (reason_code) {
  1589. case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
  1590. if (tgtdev->host_exposed)
  1591. mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
  1592. mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
  1593. mpi3mr_tgtdev_put(tgtdev);
  1594. break;
  1595. case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
  1596. case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
  1597. case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
  1598. {
  1599. if (!mrioc->sas_transport_enabled || tgtdev->non_stl
  1600. || tgtdev->is_hidden)
  1601. break;
  1602. link_rate = event_data->phy_entry[i].link_rate >> 4;
  1603. prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
  1604. if (link_rate == prev_link_rate)
  1605. break;
  1606. if (!parent_sas_address)
  1607. break;
  1608. parent_phy_number = event_data->start_phy_num + i;
  1609. mpi3mr_update_links(mrioc, parent_sas_address, handle,
  1610. parent_phy_number, link_rate, hba_port);
  1611. break;
  1612. }
  1613. default:
  1614. break;
  1615. }
  1616. if (tgtdev)
  1617. mpi3mr_tgtdev_put(tgtdev);
  1618. }
  1619. if (mrioc->sas_transport_enabled && (event_data->exp_status ==
  1620. MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) {
  1621. if (sas_expander)
  1622. mpi3mr_expander_remove(mrioc, exp_sas_address,
  1623. hba_port);
  1624. }
  1625. }
  1626. /**
  1627. * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details
  1628. * @mrioc: Adapter instance reference
  1629. * @event_data: PCIe topology change list event data
  1630. *
  1631. * Prints information about the PCIe topology change event.
  1632. *
  1633. * Return: Nothing.
  1634. */
  1635. static void
  1636. mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc,
  1637. struct mpi3_event_data_pcie_topology_change_list *event_data)
  1638. {
  1639. int i;
  1640. u16 handle;
  1641. u16 reason_code;
  1642. u8 port_number;
  1643. char *status_str = NULL;
  1644. u8 link_rate, prev_link_rate;
  1645. switch (event_data->switch_status) {
  1646. case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
  1647. status_str = "remove";
  1648. break;
  1649. case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
  1650. status_str = "responding";
  1651. break;
  1652. case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
  1653. status_str = "remove delay";
  1654. break;
  1655. case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
  1656. status_str = "direct attached";
  1657. break;
  1658. default:
  1659. status_str = "unknown status";
  1660. break;
  1661. }
  1662. ioc_info(mrioc, "%s :pcie topology change: (%s)\n",
  1663. __func__, status_str);
  1664. ioc_info(mrioc,
  1665. "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n",
  1666. __func__, le16_to_cpu(event_data->switch_dev_handle),
  1667. le16_to_cpu(event_data->enclosure_handle),
  1668. event_data->start_port_num, event_data->num_entries);
  1669. for (i = 0; i < event_data->num_entries; i++) {
  1670. handle =
  1671. le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
  1672. if (!handle)
  1673. continue;
  1674. port_number = event_data->start_port_num + i;
  1675. reason_code = event_data->port_entry[i].port_status;
  1676. switch (reason_code) {
  1677. case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
  1678. status_str = "target remove";
  1679. break;
  1680. case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
  1681. status_str = "delay target remove";
  1682. break;
  1683. case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
  1684. status_str = "link status change";
  1685. break;
  1686. case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
  1687. status_str = "link status no change";
  1688. break;
  1689. case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
  1690. status_str = "target responding";
  1691. break;
  1692. default:
  1693. status_str = "unknown";
  1694. break;
  1695. }
  1696. link_rate = event_data->port_entry[i].current_port_info &
  1697. MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
  1698. prev_link_rate = event_data->port_entry[i].previous_port_info &
  1699. MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
  1700. ioc_info(mrioc,
  1701. "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
  1702. __func__, port_number, handle, status_str, link_rate,
  1703. prev_link_rate);
  1704. }
  1705. }
  1706. /**
  1707. * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf
  1708. * @mrioc: Adapter instance reference
  1709. * @fwevt: Firmware event reference
  1710. *
  1711. * Prints information about the PCIe topology change event and
  1712. * for "not responding" event code, removes the device from the
  1713. * upper layers.
  1714. *
  1715. * Return: Nothing.
  1716. */
  1717. static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
  1718. struct mpi3mr_fwevt *fwevt)
  1719. {
  1720. struct mpi3_event_data_pcie_topology_change_list *event_data =
  1721. (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data;
  1722. int i;
  1723. u16 handle;
  1724. u8 reason_code;
  1725. struct mpi3mr_tgt_dev *tgtdev = NULL;
  1726. mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
  1727. for (i = 0; i < event_data->num_entries; i++) {
  1728. if (fwevt->discard)
  1729. return;
  1730. handle =
  1731. le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
  1732. if (!handle)
  1733. continue;
  1734. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
  1735. if (!tgtdev)
  1736. continue;
  1737. reason_code = event_data->port_entry[i].port_status;
  1738. switch (reason_code) {
  1739. case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
  1740. if (tgtdev->host_exposed)
  1741. mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
  1742. mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
  1743. mpi3mr_tgtdev_put(tgtdev);
  1744. break;
  1745. default:
  1746. break;
  1747. }
  1748. if (tgtdev)
  1749. mpi3mr_tgtdev_put(tgtdev);
  1750. }
  1751. }
  1752. /**
  1753. * mpi3mr_logdata_evt_bh - Log data event bottomhalf
  1754. * @mrioc: Adapter instance reference
  1755. * @fwevt: Firmware event reference
  1756. *
  1757. * Extracts the event data and calls application interfacing
  1758. * function to process the event further.
  1759. *
  1760. * Return: Nothing.
  1761. */
  1762. static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
  1763. struct mpi3mr_fwevt *fwevt)
  1764. {
  1765. mpi3mr_app_save_logdata(mrioc, fwevt->event_data,
  1766. fwevt->event_data_size);
  1767. }
  1768. /**
  1769. * mpi3mr_update_sdev_qd - Update SCSI device queue depath
  1770. * @sdev: SCSI device reference
  1771. * @data: Queue depth reference
  1772. *
  1773. * This is an iterator function called for each SCSI device in a
  1774. * target to update the QD of each SCSI device.
  1775. *
  1776. * Return: Nothing.
  1777. */
  1778. static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data)
  1779. {
  1780. u16 *q_depth = (u16 *)data;
  1781. scsi_change_queue_depth(sdev, (int)*q_depth);
  1782. sdev->max_queue_depth = sdev->queue_depth;
  1783. }
  1784. /**
  1785. * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs
  1786. * @mrioc: Adapter instance reference
  1787. * @tg: Throttle group information pointer
  1788. *
  1789. * Accessor to reduce QD for each device associated with the
  1790. * given throttle group.
  1791. *
  1792. * Return: None.
  1793. */
  1794. static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
  1795. struct mpi3mr_throttle_group_info *tg)
  1796. {
  1797. unsigned long flags;
  1798. struct mpi3mr_tgt_dev *tgtdev;
  1799. struct mpi3mr_stgt_priv_data *tgt_priv;
  1800. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  1801. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
  1802. if (tgtdev->starget && tgtdev->starget->hostdata) {
  1803. tgt_priv = tgtdev->starget->hostdata;
  1804. if (tgt_priv->throttle_group == tg) {
  1805. dprint_event_bh(mrioc,
  1806. "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n",
  1807. tgt_priv->perst_id, tgtdev->q_depth,
  1808. tg->modified_qd);
  1809. starget_for_each_device(tgtdev->starget,
  1810. (void *)&tg->modified_qd,
  1811. mpi3mr_update_sdev_qd);
  1812. }
  1813. }
  1814. }
  1815. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  1816. }
  1817. /**
  1818. * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
  1819. * @mrioc: Adapter instance reference
  1820. * @fwevt: Firmware event reference
  1821. *
  1822. * Identifies the firmware event and calls corresponding bottomg
  1823. * half handler and sends event acknowledgment if required.
  1824. *
  1825. * Return: Nothing.
  1826. */
  1827. static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
  1828. struct mpi3mr_fwevt *fwevt)
  1829. {
  1830. struct mpi3_device_page0 *dev_pg0 = NULL;
  1831. u16 perst_id, handle, dev_info;
  1832. struct mpi3_device0_sas_sata_format *sasinf = NULL;
  1833. unsigned int timeout;
  1834. mpi3mr_fwevt_del_from_list(mrioc, fwevt);
  1835. mrioc->current_event = fwevt;
  1836. if (mrioc->stop_drv_processing)
  1837. goto out;
  1838. if (mrioc->unrecoverable) {
  1839. dprint_event_bh(mrioc,
  1840. "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n",
  1841. fwevt->event_id);
  1842. goto out;
  1843. }
  1844. if (!fwevt->process_evt)
  1845. goto evt_ack;
  1846. switch (fwevt->event_id) {
  1847. case MPI3_EVENT_DEVICE_ADDED:
  1848. {
  1849. dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
  1850. perst_id = le16_to_cpu(dev_pg0->persistent_id);
  1851. handle = le16_to_cpu(dev_pg0->dev_handle);
  1852. if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
  1853. mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
  1854. else if (mrioc->sas_transport_enabled &&
  1855. (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
  1856. sasinf = &dev_pg0->device_specific.sas_sata_format;
  1857. dev_info = le16_to_cpu(sasinf->device_info);
  1858. if (!mrioc->sas_hba.num_phys)
  1859. mpi3mr_sas_host_add(mrioc);
  1860. else
  1861. mpi3mr_sas_host_refresh(mrioc);
  1862. if (mpi3mr_is_expander_device(dev_info))
  1863. mpi3mr_expander_add(mrioc, handle);
  1864. }
  1865. break;
  1866. }
  1867. case MPI3_EVENT_DEVICE_INFO_CHANGED:
  1868. {
  1869. dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
  1870. perst_id = le16_to_cpu(dev_pg0->persistent_id);
  1871. if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
  1872. mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0);
  1873. break;
  1874. }
  1875. case MPI3_EVENT_DEVICE_STATUS_CHANGE:
  1876. {
  1877. mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
  1878. break;
  1879. }
  1880. case MPI3_EVENT_ENCL_DEVICE_ADDED:
  1881. case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
  1882. {
  1883. mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt);
  1884. break;
  1885. }
  1886. case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
  1887. {
  1888. mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
  1889. break;
  1890. }
  1891. case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
  1892. {
  1893. mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
  1894. break;
  1895. }
  1896. case MPI3_EVENT_LOG_DATA:
  1897. {
  1898. mpi3mr_logdata_evt_bh(mrioc, fwevt);
  1899. break;
  1900. }
  1901. case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION:
  1902. {
  1903. struct mpi3mr_throttle_group_info *tg;
  1904. tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data;
  1905. dprint_event_bh(mrioc,
  1906. "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n",
  1907. tg->id, tg->need_qd_reduction);
  1908. if (tg->need_qd_reduction) {
  1909. mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg);
  1910. tg->need_qd_reduction = 0;
  1911. }
  1912. break;
  1913. }
  1914. case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH:
  1915. {
  1916. timeout = MPI3MR_RESET_TIMEOUT * 2;
  1917. while ((mrioc->device_refresh_on || mrioc->block_on_pci_err) &&
  1918. !mrioc->unrecoverable && !mrioc->pci_err_recovery) {
  1919. msleep(500);
  1920. if (!timeout--) {
  1921. mrioc->unrecoverable = 1;
  1922. break;
  1923. }
  1924. }
  1925. if (mrioc->unrecoverable || mrioc->pci_err_recovery)
  1926. break;
  1927. dprint_event_bh(mrioc,
  1928. "scan for non responding and newly added devices after soft reset started\n");
  1929. if (mrioc->sas_transport_enabled) {
  1930. mpi3mr_refresh_sas_ports(mrioc);
  1931. mpi3mr_refresh_expanders(mrioc);
  1932. }
  1933. mpi3mr_refresh_tgtdevs(mrioc);
  1934. ioc_info(mrioc,
  1935. "scan for non responding and newly added devices after soft reset completed\n");
  1936. break;
  1937. }
  1938. case MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER:
  1939. {
  1940. mpi3mr_process_trigger_data_event_bh(mrioc,
  1941. (struct trigger_event_data *)fwevt->event_data);
  1942. break;
  1943. }
  1944. default:
  1945. break;
  1946. }
  1947. evt_ack:
  1948. if (fwevt->send_ack)
  1949. mpi3mr_process_event_ack(mrioc, fwevt->event_id,
  1950. fwevt->evt_ctx);
  1951. out:
  1952. /* Put fwevt reference count to neutralize kref_init increment */
  1953. mpi3mr_fwevt_put(fwevt);
  1954. mrioc->current_event = NULL;
  1955. }
  1956. /**
  1957. * mpi3mr_fwevt_worker - Firmware event worker
  1958. * @work: Work struct containing firmware event
  1959. *
  1960. * Extracts the firmware event and calls mpi3mr_fwevt_bh.
  1961. *
  1962. * Return: Nothing.
  1963. */
  1964. static void mpi3mr_fwevt_worker(struct work_struct *work)
  1965. {
  1966. struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
  1967. work);
  1968. mpi3mr_fwevt_bh(fwevt->mrioc, fwevt);
  1969. /*
  1970. * Put fwevt reference count after
  1971. * dequeuing it from worker queue
  1972. */
  1973. mpi3mr_fwevt_put(fwevt);
  1974. }
  1975. /**
  1976. * mpi3mr_create_tgtdev - Create and add a target device
  1977. * @mrioc: Adapter instance reference
  1978. * @dev_pg0: Device Page 0 data
  1979. *
  1980. * If the device specified by the device page 0 data is not
  1981. * present in the driver's internal list, allocate the memory
  1982. * for the device, populate the data and add to the list, else
  1983. * update the device data. The key is persistent ID.
  1984. *
  1985. * Return: 0 on success, -ENOMEM on memory allocation failure
  1986. */
  1987. static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
  1988. struct mpi3_device_page0 *dev_pg0)
  1989. {
  1990. int retval = 0;
  1991. struct mpi3mr_tgt_dev *tgtdev = NULL;
  1992. u16 perst_id = 0;
  1993. unsigned long flags;
  1994. perst_id = le16_to_cpu(dev_pg0->persistent_id);
  1995. if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID)
  1996. return retval;
  1997. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  1998. tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
  1999. if (tgtdev)
  2000. tgtdev->state = MPI3MR_DEV_CREATED;
  2001. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  2002. if (tgtdev) {
  2003. mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
  2004. mpi3mr_tgtdev_put(tgtdev);
  2005. } else {
  2006. tgtdev = mpi3mr_alloc_tgtdev();
  2007. if (!tgtdev)
  2008. return -ENOMEM;
  2009. mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
  2010. mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
  2011. }
  2012. return retval;
  2013. }
  2014. /**
  2015. * mpi3mr_flush_delayed_cmd_lists - Flush pending commands
  2016. * @mrioc: Adapter instance reference
  2017. *
  2018. * Flush pending commands in the delayed lists due to a
  2019. * controller reset or driver removal as a cleanup.
  2020. *
  2021. * Return: Nothing
  2022. */
  2023. void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc)
  2024. {
  2025. struct delayed_dev_rmhs_node *_rmhs_node;
  2026. struct delayed_evt_ack_node *_evtack_node;
  2027. dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n");
  2028. while (!list_empty(&mrioc->delayed_rmhs_list)) {
  2029. _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
  2030. struct delayed_dev_rmhs_node, list);
  2031. list_del(&_rmhs_node->list);
  2032. kfree(_rmhs_node);
  2033. }
  2034. dprint_reset(mrioc, "flushing delayed event ack commands\n");
  2035. while (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
  2036. _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next,
  2037. struct delayed_evt_ack_node, list);
  2038. list_del(&_evtack_node->list);
  2039. kfree(_evtack_node);
  2040. }
  2041. }
  2042. /**
  2043. * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
  2044. * @mrioc: Adapter instance reference
  2045. * @drv_cmd: Internal command tracker
  2046. *
  2047. * Issues a target reset TM to the firmware from the device
  2048. * removal TM pend list or retry the removal handshake sequence
  2049. * based on the IOU control request IOC status.
  2050. *
  2051. * Return: Nothing
  2052. */
  2053. static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
  2054. struct mpi3mr_drv_cmd *drv_cmd)
  2055. {
  2056. u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
  2057. struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
  2058. if (drv_cmd->state & MPI3MR_CMD_RESET)
  2059. goto clear_drv_cmd;
  2060. ioc_info(mrioc,
  2061. "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
  2062. __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
  2063. drv_cmd->ioc_loginfo);
  2064. if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
  2065. if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) {
  2066. drv_cmd->retry_count++;
  2067. ioc_info(mrioc,
  2068. "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
  2069. __func__, drv_cmd->dev_handle,
  2070. drv_cmd->retry_count);
  2071. mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle,
  2072. drv_cmd, drv_cmd->iou_rc);
  2073. return;
  2074. }
  2075. ioc_err(mrioc,
  2076. "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
  2077. __func__, drv_cmd->dev_handle);
  2078. } else {
  2079. ioc_info(mrioc,
  2080. "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
  2081. __func__, drv_cmd->dev_handle);
  2082. clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap);
  2083. }
  2084. if (!list_empty(&mrioc->delayed_rmhs_list)) {
  2085. delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next,
  2086. struct delayed_dev_rmhs_node, list);
  2087. drv_cmd->dev_handle = delayed_dev_rmhs->handle;
  2088. drv_cmd->retry_count = 0;
  2089. drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
  2090. ioc_info(mrioc,
  2091. "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
  2092. __func__, drv_cmd->dev_handle);
  2093. mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd,
  2094. drv_cmd->iou_rc);
  2095. list_del(&delayed_dev_rmhs->list);
  2096. kfree(delayed_dev_rmhs);
  2097. return;
  2098. }
  2099. clear_drv_cmd:
  2100. drv_cmd->state = MPI3MR_CMD_NOTUSED;
  2101. drv_cmd->callback = NULL;
  2102. drv_cmd->retry_count = 0;
  2103. drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  2104. clear_bit(cmd_idx, mrioc->devrem_bitmap);
  2105. }
  2106. /**
  2107. * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
  2108. * @mrioc: Adapter instance reference
  2109. * @drv_cmd: Internal command tracker
  2110. *
  2111. * Issues a target reset TM to the firmware from the device
  2112. * removal TM pend list or issue IO unit control request as
  2113. * part of device removal or hidden acknowledgment handshake.
  2114. *
  2115. * Return: Nothing
  2116. */
  2117. static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
  2118. struct mpi3mr_drv_cmd *drv_cmd)
  2119. {
  2120. struct mpi3_iounit_control_request iou_ctrl;
  2121. u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
  2122. struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
  2123. int retval;
  2124. if (drv_cmd->state & MPI3MR_CMD_RESET)
  2125. goto clear_drv_cmd;
  2126. if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
  2127. tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
  2128. if (tm_reply)
  2129. pr_info(IOCNAME
  2130. "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
  2131. mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
  2132. drv_cmd->ioc_loginfo,
  2133. le32_to_cpu(tm_reply->termination_count));
  2134. pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
  2135. mrioc->name, drv_cmd->dev_handle, cmd_idx);
  2136. memset(&iou_ctrl, 0, sizeof(iou_ctrl));
  2137. drv_cmd->state = MPI3MR_CMD_PENDING;
  2138. drv_cmd->is_waiting = 0;
  2139. drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
  2140. iou_ctrl.operation = drv_cmd->iou_rc;
  2141. iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle);
  2142. iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag);
  2143. iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
  2144. retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl),
  2145. 1);
  2146. if (retval) {
  2147. pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
  2148. mrioc->name);
  2149. goto clear_drv_cmd;
  2150. }
  2151. return;
  2152. clear_drv_cmd:
  2153. drv_cmd->state = MPI3MR_CMD_NOTUSED;
  2154. drv_cmd->callback = NULL;
  2155. drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  2156. drv_cmd->retry_count = 0;
  2157. clear_bit(cmd_idx, mrioc->devrem_bitmap);
  2158. }
  2159. /**
  2160. * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
  2161. * @mrioc: Adapter instance reference
  2162. * @handle: Device handle
  2163. * @cmdparam: Internal command tracker
  2164. * @iou_rc: IO unit reason code
  2165. *
  2166. * Issues a target reset TM to the firmware or add it to a pend
  2167. * list as part of device removal or hidden acknowledgment
  2168. * handshake.
  2169. *
  2170. * Return: Nothing
  2171. */
  2172. static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
  2173. struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc)
  2174. {
  2175. struct mpi3_scsi_task_mgmt_request tm_req;
  2176. int retval = 0;
  2177. u16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
  2178. u8 retrycount = 5;
  2179. struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
  2180. struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
  2181. struct mpi3mr_tgt_dev *tgtdev = NULL;
  2182. unsigned long flags;
  2183. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  2184. tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
  2185. if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE))
  2186. tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED;
  2187. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  2188. if (drv_cmd)
  2189. goto issue_cmd;
  2190. do {
  2191. cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap,
  2192. MPI3MR_NUM_DEVRMCMD);
  2193. if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
  2194. if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap))
  2195. break;
  2196. cmd_idx = MPI3MR_NUM_DEVRMCMD;
  2197. }
  2198. } while (retrycount--);
  2199. if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
  2200. delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs),
  2201. GFP_ATOMIC);
  2202. if (!delayed_dev_rmhs)
  2203. return;
  2204. INIT_LIST_HEAD(&delayed_dev_rmhs->list);
  2205. delayed_dev_rmhs->handle = handle;
  2206. delayed_dev_rmhs->iou_rc = iou_rc;
  2207. list_add_tail(&delayed_dev_rmhs->list,
  2208. &mrioc->delayed_rmhs_list);
  2209. ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
  2210. __func__, handle);
  2211. return;
  2212. }
  2213. drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx];
  2214. issue_cmd:
  2215. cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
  2216. ioc_info(mrioc,
  2217. "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
  2218. __func__, handle, cmd_idx);
  2219. memset(&tm_req, 0, sizeof(tm_req));
  2220. if (drv_cmd->state & MPI3MR_CMD_PENDING) {
  2221. ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
  2222. goto out;
  2223. }
  2224. drv_cmd->state = MPI3MR_CMD_PENDING;
  2225. drv_cmd->is_waiting = 0;
  2226. drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
  2227. drv_cmd->dev_handle = handle;
  2228. drv_cmd->iou_rc = iou_rc;
  2229. tm_req.dev_handle = cpu_to_le16(handle);
  2230. tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  2231. tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
  2232. tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID);
  2233. tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
  2234. set_bit(handle, mrioc->removepend_bitmap);
  2235. retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
  2236. if (retval) {
  2237. ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n",
  2238. __func__);
  2239. goto out_failed;
  2240. }
  2241. out:
  2242. return;
  2243. out_failed:
  2244. drv_cmd->state = MPI3MR_CMD_NOTUSED;
  2245. drv_cmd->callback = NULL;
  2246. drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  2247. drv_cmd->retry_count = 0;
  2248. clear_bit(cmd_idx, mrioc->devrem_bitmap);
  2249. }
  2250. /**
  2251. * mpi3mr_complete_evt_ack - event ack request completion
  2252. * @mrioc: Adapter instance reference
  2253. * @drv_cmd: Internal command tracker
  2254. *
  2255. * This is the completion handler for non blocking event
  2256. * acknowledgment sent to the firmware and this will issue any
  2257. * pending event acknowledgment request.
  2258. *
  2259. * Return: Nothing
  2260. */
  2261. static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc,
  2262. struct mpi3mr_drv_cmd *drv_cmd)
  2263. {
  2264. u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
  2265. struct delayed_evt_ack_node *delayed_evtack = NULL;
  2266. if (drv_cmd->state & MPI3MR_CMD_RESET)
  2267. goto clear_drv_cmd;
  2268. if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
  2269. dprint_event_th(mrioc,
  2270. "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
  2271. (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
  2272. drv_cmd->ioc_loginfo);
  2273. }
  2274. if (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
  2275. delayed_evtack =
  2276. list_entry(mrioc->delayed_evtack_cmds_list.next,
  2277. struct delayed_evt_ack_node, list);
  2278. mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd,
  2279. delayed_evtack->event_ctx);
  2280. list_del(&delayed_evtack->list);
  2281. kfree(delayed_evtack);
  2282. return;
  2283. }
  2284. clear_drv_cmd:
  2285. drv_cmd->state = MPI3MR_CMD_NOTUSED;
  2286. drv_cmd->callback = NULL;
  2287. clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
  2288. }
  2289. /**
  2290. * mpi3mr_send_event_ack - Issue event acknwoledgment request
  2291. * @mrioc: Adapter instance reference
  2292. * @event: MPI3 event id
  2293. * @cmdparam: Internal command tracker
  2294. * @event_ctx: event context
  2295. *
  2296. * Issues event acknowledgment request to the firmware if there
  2297. * is a free command to send the event ack else it to a pend
  2298. * list so that it will be processed on a completion of a prior
  2299. * event acknowledgment .
  2300. *
  2301. * Return: Nothing
  2302. */
  2303. static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
  2304. struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx)
  2305. {
  2306. struct mpi3_event_ack_request evtack_req;
  2307. int retval = 0;
  2308. u8 retrycount = 5;
  2309. u16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
  2310. struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
  2311. struct delayed_evt_ack_node *delayed_evtack = NULL;
  2312. if (drv_cmd) {
  2313. dprint_event_th(mrioc,
  2314. "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
  2315. event, event_ctx);
  2316. goto issue_cmd;
  2317. }
  2318. dprint_event_th(mrioc,
  2319. "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
  2320. event, event_ctx);
  2321. do {
  2322. cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap,
  2323. MPI3MR_NUM_EVTACKCMD);
  2324. if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
  2325. if (!test_and_set_bit(cmd_idx,
  2326. mrioc->evtack_cmds_bitmap))
  2327. break;
  2328. cmd_idx = MPI3MR_NUM_EVTACKCMD;
  2329. }
  2330. } while (retrycount--);
  2331. if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
  2332. delayed_evtack = kzalloc(sizeof(*delayed_evtack),
  2333. GFP_ATOMIC);
  2334. if (!delayed_evtack)
  2335. return;
  2336. INIT_LIST_HEAD(&delayed_evtack->list);
  2337. delayed_evtack->event = event;
  2338. delayed_evtack->event_ctx = event_ctx;
  2339. list_add_tail(&delayed_evtack->list,
  2340. &mrioc->delayed_evtack_cmds_list);
  2341. dprint_event_th(mrioc,
  2342. "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n",
  2343. event, event_ctx);
  2344. return;
  2345. }
  2346. drv_cmd = &mrioc->evtack_cmds[cmd_idx];
  2347. issue_cmd:
  2348. cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
  2349. memset(&evtack_req, 0, sizeof(evtack_req));
  2350. if (drv_cmd->state & MPI3MR_CMD_PENDING) {
  2351. dprint_event_th(mrioc,
  2352. "sending event ack failed due to command in use\n");
  2353. goto out;
  2354. }
  2355. drv_cmd->state = MPI3MR_CMD_PENDING;
  2356. drv_cmd->is_waiting = 0;
  2357. drv_cmd->callback = mpi3mr_complete_evt_ack;
  2358. evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
  2359. evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
  2360. evtack_req.event = event;
  2361. evtack_req.event_context = cpu_to_le32(event_ctx);
  2362. retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
  2363. sizeof(evtack_req), 1);
  2364. if (retval) {
  2365. dprint_event_th(mrioc,
  2366. "posting event ack request is failed\n");
  2367. goto out_failed;
  2368. }
  2369. dprint_event_th(mrioc,
  2370. "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n",
  2371. event, event_ctx);
  2372. out:
  2373. return;
  2374. out_failed:
  2375. drv_cmd->state = MPI3MR_CMD_NOTUSED;
  2376. drv_cmd->callback = NULL;
  2377. clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
  2378. }
  2379. /**
  2380. * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
  2381. * @mrioc: Adapter instance reference
  2382. * @event_reply: event data
  2383. *
  2384. * Checks for the reason code and based on that either block I/O
  2385. * to device, or unblock I/O to the device, or start the device
  2386. * removal handshake with reason as remove with the firmware for
  2387. * PCIe devices.
  2388. *
  2389. * Return: Nothing
  2390. */
  2391. static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc,
  2392. struct mpi3_event_notification_reply *event_reply)
  2393. {
  2394. struct mpi3_event_data_pcie_topology_change_list *topo_evt =
  2395. (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data;
  2396. int i;
  2397. u16 handle;
  2398. u8 reason_code;
  2399. struct mpi3mr_tgt_dev *tgtdev = NULL;
  2400. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
  2401. for (i = 0; i < topo_evt->num_entries; i++) {
  2402. handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle);
  2403. if (!handle)
  2404. continue;
  2405. reason_code = topo_evt->port_entry[i].port_status;
  2406. scsi_tgt_priv_data = NULL;
  2407. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
  2408. if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
  2409. scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
  2410. tgtdev->starget->hostdata;
  2411. switch (reason_code) {
  2412. case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
  2413. if (scsi_tgt_priv_data) {
  2414. scsi_tgt_priv_data->dev_removed = 1;
  2415. scsi_tgt_priv_data->dev_removedelay = 0;
  2416. atomic_set(&scsi_tgt_priv_data->block_io, 0);
  2417. }
  2418. mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
  2419. MPI3_CTRL_OP_REMOVE_DEVICE);
  2420. break;
  2421. case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
  2422. if (scsi_tgt_priv_data) {
  2423. scsi_tgt_priv_data->dev_removedelay = 1;
  2424. atomic_inc(&scsi_tgt_priv_data->block_io);
  2425. }
  2426. break;
  2427. case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
  2428. if (scsi_tgt_priv_data &&
  2429. scsi_tgt_priv_data->dev_removedelay) {
  2430. scsi_tgt_priv_data->dev_removedelay = 0;
  2431. atomic_dec_if_positive
  2432. (&scsi_tgt_priv_data->block_io);
  2433. }
  2434. break;
  2435. case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
  2436. default:
  2437. break;
  2438. }
  2439. if (tgtdev)
  2440. mpi3mr_tgtdev_put(tgtdev);
  2441. }
  2442. }
  2443. /**
  2444. * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
  2445. * @mrioc: Adapter instance reference
  2446. * @event_reply: event data
  2447. *
  2448. * Checks for the reason code and based on that either block I/O
  2449. * to device, or unblock I/O to the device, or start the device
  2450. * removal handshake with reason as remove with the firmware for
  2451. * SAS/SATA devices.
  2452. *
  2453. * Return: Nothing
  2454. */
  2455. static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc,
  2456. struct mpi3_event_notification_reply *event_reply)
  2457. {
  2458. struct mpi3_event_data_sas_topology_change_list *topo_evt =
  2459. (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data;
  2460. int i;
  2461. u16 handle;
  2462. u8 reason_code;
  2463. struct mpi3mr_tgt_dev *tgtdev = NULL;
  2464. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
  2465. for (i = 0; i < topo_evt->num_entries; i++) {
  2466. handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle);
  2467. if (!handle)
  2468. continue;
  2469. reason_code = topo_evt->phy_entry[i].status &
  2470. MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
  2471. scsi_tgt_priv_data = NULL;
  2472. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
  2473. if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
  2474. scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
  2475. tgtdev->starget->hostdata;
  2476. switch (reason_code) {
  2477. case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
  2478. if (scsi_tgt_priv_data) {
  2479. scsi_tgt_priv_data->dev_removed = 1;
  2480. scsi_tgt_priv_data->dev_removedelay = 0;
  2481. atomic_set(&scsi_tgt_priv_data->block_io, 0);
  2482. }
  2483. mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
  2484. MPI3_CTRL_OP_REMOVE_DEVICE);
  2485. break;
  2486. case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
  2487. if (scsi_tgt_priv_data) {
  2488. scsi_tgt_priv_data->dev_removedelay = 1;
  2489. atomic_inc(&scsi_tgt_priv_data->block_io);
  2490. }
  2491. break;
  2492. case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
  2493. if (scsi_tgt_priv_data &&
  2494. scsi_tgt_priv_data->dev_removedelay) {
  2495. scsi_tgt_priv_data->dev_removedelay = 0;
  2496. atomic_dec_if_positive
  2497. (&scsi_tgt_priv_data->block_io);
  2498. }
  2499. break;
  2500. case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
  2501. default:
  2502. break;
  2503. }
  2504. if (tgtdev)
  2505. mpi3mr_tgtdev_put(tgtdev);
  2506. }
  2507. }
  2508. /**
  2509. * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
  2510. * @mrioc: Adapter instance reference
  2511. * @event_reply: event data
  2512. *
  2513. * Checks for the reason code and based on that either block I/O
  2514. * to device, or unblock I/O to the device, or start the device
  2515. * removal handshake with reason as remove/hide acknowledgment
  2516. * with the firmware.
  2517. *
  2518. * Return: Nothing
  2519. */
  2520. static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
  2521. struct mpi3_event_notification_reply *event_reply)
  2522. {
  2523. u16 dev_handle = 0;
  2524. u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0;
  2525. struct mpi3mr_tgt_dev *tgtdev = NULL;
  2526. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
  2527. struct mpi3_event_data_device_status_change *evtdata =
  2528. (struct mpi3_event_data_device_status_change *)event_reply->event_data;
  2529. if (mrioc->stop_drv_processing)
  2530. goto out;
  2531. dev_handle = le16_to_cpu(evtdata->dev_handle);
  2532. switch (evtdata->reason_code) {
  2533. case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
  2534. case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
  2535. block = 1;
  2536. break;
  2537. case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
  2538. delete = 1;
  2539. hide = 1;
  2540. break;
  2541. case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
  2542. delete = 1;
  2543. remove = 1;
  2544. break;
  2545. case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
  2546. case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
  2547. ublock = 1;
  2548. break;
  2549. default:
  2550. break;
  2551. }
  2552. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
  2553. if (!tgtdev)
  2554. goto out;
  2555. if (hide)
  2556. tgtdev->is_hidden = hide;
  2557. if (tgtdev->starget && tgtdev->starget->hostdata) {
  2558. scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
  2559. tgtdev->starget->hostdata;
  2560. if (block)
  2561. atomic_inc(&scsi_tgt_priv_data->block_io);
  2562. if (delete)
  2563. scsi_tgt_priv_data->dev_removed = 1;
  2564. if (ublock)
  2565. atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
  2566. }
  2567. if (remove)
  2568. mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
  2569. MPI3_CTRL_OP_REMOVE_DEVICE);
  2570. if (hide)
  2571. mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
  2572. MPI3_CTRL_OP_HIDDEN_ACK);
  2573. out:
  2574. if (tgtdev)
  2575. mpi3mr_tgtdev_put(tgtdev);
  2576. }
  2577. /**
  2578. * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf
  2579. * @mrioc: Adapter instance reference
  2580. * @event_reply: event data
  2581. *
  2582. * Blocks and unblocks host level I/O based on the reason code
  2583. *
  2584. * Return: Nothing
  2585. */
  2586. static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
  2587. struct mpi3_event_notification_reply *event_reply)
  2588. {
  2589. struct mpi3_event_data_prepare_for_reset *evtdata =
  2590. (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data;
  2591. if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) {
  2592. dprint_event_th(mrioc,
  2593. "prepare for reset event top half with rc=start\n");
  2594. if (mrioc->prepare_for_reset)
  2595. return;
  2596. mrioc->prepare_for_reset = 1;
  2597. mrioc->prepare_for_reset_timeout_counter = 0;
  2598. } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
  2599. dprint_event_th(mrioc,
  2600. "prepare for reset top half with rc=abort\n");
  2601. mrioc->prepare_for_reset = 0;
  2602. mrioc->prepare_for_reset_timeout_counter = 0;
  2603. }
  2604. if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
  2605. == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
  2606. mpi3mr_send_event_ack(mrioc, event_reply->event, NULL,
  2607. le32_to_cpu(event_reply->event_context));
  2608. }
  2609. /**
  2610. * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
  2611. * @mrioc: Adapter instance reference
  2612. * @event_reply: event data
  2613. *
  2614. * Identifies the new shutdown timeout value and update.
  2615. *
  2616. * Return: Nothing
  2617. */
  2618. static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
  2619. struct mpi3_event_notification_reply *event_reply)
  2620. {
  2621. struct mpi3_event_data_energy_pack_change *evtdata =
  2622. (struct mpi3_event_data_energy_pack_change *)event_reply->event_data;
  2623. u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
  2624. if (shutdown_timeout <= 0) {
  2625. ioc_warn(mrioc,
  2626. "%s :Invalid Shutdown Timeout received = %d\n",
  2627. __func__, shutdown_timeout);
  2628. return;
  2629. }
  2630. ioc_info(mrioc,
  2631. "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
  2632. __func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
  2633. mrioc->facts.shutdown_timeout = shutdown_timeout;
  2634. }
  2635. /**
  2636. * mpi3mr_cablemgmt_evt_th - Cable management event tophalf
  2637. * @mrioc: Adapter instance reference
  2638. * @event_reply: event data
  2639. *
  2640. * Displays Cable manegemt event details.
  2641. *
  2642. * Return: Nothing
  2643. */
  2644. static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
  2645. struct mpi3_event_notification_reply *event_reply)
  2646. {
  2647. struct mpi3_event_data_cable_management *evtdata =
  2648. (struct mpi3_event_data_cable_management *)event_reply->event_data;
  2649. switch (evtdata->status) {
  2650. case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
  2651. {
  2652. ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n"
  2653. "Devices connected to this cable are not detected.\n"
  2654. "This cable requires %d mW of power.\n",
  2655. evtdata->receptacle_id,
  2656. le32_to_cpu(evtdata->active_cable_power_requirement));
  2657. break;
  2658. }
  2659. case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
  2660. {
  2661. ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n",
  2662. evtdata->receptacle_id);
  2663. break;
  2664. }
  2665. default:
  2666. break;
  2667. }
  2668. }
  2669. /**
  2670. * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event
  2671. * @mrioc: Adapter instance reference
  2672. *
  2673. * Add driver specific event to make sure that the driver won't process the
  2674. * events until all the devices are refreshed during soft reset.
  2675. *
  2676. * Return: Nothing
  2677. */
  2678. void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc)
  2679. {
  2680. struct mpi3mr_fwevt *fwevt = NULL;
  2681. fwevt = mpi3mr_alloc_fwevt(0);
  2682. if (!fwevt) {
  2683. dprint_event_th(mrioc,
  2684. "failed to schedule bottom half handler for event(0x%02x)\n",
  2685. MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH);
  2686. return;
  2687. }
  2688. fwevt->mrioc = mrioc;
  2689. fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH;
  2690. fwevt->send_ack = 0;
  2691. fwevt->process_evt = 1;
  2692. fwevt->evt_ctx = 0;
  2693. fwevt->event_data_size = 0;
  2694. mpi3mr_fwevt_add_to_list(mrioc, fwevt);
  2695. }
  2696. /**
  2697. * mpi3mr_os_handle_events - Firmware event handler
  2698. * @mrioc: Adapter instance reference
  2699. * @event_reply: event data
  2700. *
  2701. * Identify whteher the event has to handled and acknowledged
  2702. * and either process the event in the tophalf and/or schedule a
  2703. * bottom half through mpi3mr_fwevt_worker.
  2704. *
  2705. * Return: Nothing
  2706. */
  2707. void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
  2708. struct mpi3_event_notification_reply *event_reply)
  2709. {
  2710. u16 evt_type, sz;
  2711. struct mpi3mr_fwevt *fwevt = NULL;
  2712. bool ack_req = 0, process_evt_bh = 0;
  2713. if (mrioc->stop_drv_processing)
  2714. return;
  2715. if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
  2716. == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
  2717. ack_req = 1;
  2718. evt_type = event_reply->event;
  2719. mpi3mr_event_trigger(mrioc, event_reply->event);
  2720. switch (evt_type) {
  2721. case MPI3_EVENT_DEVICE_ADDED:
  2722. {
  2723. struct mpi3_device_page0 *dev_pg0 =
  2724. (struct mpi3_device_page0 *)event_reply->event_data;
  2725. if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
  2726. ioc_err(mrioc,
  2727. "%s :Failed to add device in the device add event\n",
  2728. __func__);
  2729. else
  2730. process_evt_bh = 1;
  2731. break;
  2732. }
  2733. case MPI3_EVENT_DEVICE_STATUS_CHANGE:
  2734. {
  2735. process_evt_bh = 1;
  2736. mpi3mr_devstatuschg_evt_th(mrioc, event_reply);
  2737. break;
  2738. }
  2739. case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
  2740. {
  2741. process_evt_bh = 1;
  2742. mpi3mr_sastopochg_evt_th(mrioc, event_reply);
  2743. break;
  2744. }
  2745. case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
  2746. {
  2747. process_evt_bh = 1;
  2748. mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
  2749. break;
  2750. }
  2751. case MPI3_EVENT_PREPARE_FOR_RESET:
  2752. {
  2753. mpi3mr_preparereset_evt_th(mrioc, event_reply);
  2754. ack_req = 0;
  2755. break;
  2756. }
  2757. case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
  2758. {
  2759. mpi3mr_hdbstatuschg_evt_th(mrioc, event_reply);
  2760. break;
  2761. }
  2762. case MPI3_EVENT_DEVICE_INFO_CHANGED:
  2763. case MPI3_EVENT_LOG_DATA:
  2764. case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
  2765. case MPI3_EVENT_ENCL_DEVICE_ADDED:
  2766. {
  2767. process_evt_bh = 1;
  2768. break;
  2769. }
  2770. case MPI3_EVENT_ENERGY_PACK_CHANGE:
  2771. {
  2772. mpi3mr_energypackchg_evt_th(mrioc, event_reply);
  2773. break;
  2774. }
  2775. case MPI3_EVENT_CABLE_MGMT:
  2776. {
  2777. mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
  2778. break;
  2779. }
  2780. case MPI3_EVENT_SAS_DISCOVERY:
  2781. case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
  2782. case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
  2783. case MPI3_EVENT_PCIE_ENUMERATION:
  2784. break;
  2785. default:
  2786. ioc_info(mrioc, "%s :event 0x%02x is not handled\n",
  2787. __func__, evt_type);
  2788. break;
  2789. }
  2790. if (process_evt_bh || ack_req) {
  2791. sz = event_reply->event_data_length * 4;
  2792. fwevt = mpi3mr_alloc_fwevt(sz);
  2793. if (!fwevt) {
  2794. ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
  2795. __func__, __FILE__, __LINE__, __func__);
  2796. return;
  2797. }
  2798. memcpy(fwevt->event_data, event_reply->event_data, sz);
  2799. fwevt->mrioc = mrioc;
  2800. fwevt->event_id = evt_type;
  2801. fwevt->send_ack = ack_req;
  2802. fwevt->process_evt = process_evt_bh;
  2803. fwevt->evt_ctx = le32_to_cpu(event_reply->event_context);
  2804. mpi3mr_fwevt_add_to_list(mrioc, fwevt);
  2805. }
  2806. }
  2807. /**
  2808. * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO
  2809. * @mrioc: Adapter instance reference
  2810. * @scmd: SCSI command reference
  2811. * @scsiio_req: MPI3 SCSI IO request
  2812. *
  2813. * Identifies the protection information flags from the SCSI
  2814. * command and set appropriate flags in the MPI3 SCSI IO
  2815. * request.
  2816. *
  2817. * Return: Nothing
  2818. */
  2819. static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
  2820. struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
  2821. {
  2822. u16 eedp_flags = 0;
  2823. unsigned char prot_op = scsi_get_prot_op(scmd);
  2824. switch (prot_op) {
  2825. case SCSI_PROT_NORMAL:
  2826. return;
  2827. case SCSI_PROT_READ_STRIP:
  2828. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
  2829. break;
  2830. case SCSI_PROT_WRITE_INSERT:
  2831. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
  2832. break;
  2833. case SCSI_PROT_READ_INSERT:
  2834. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
  2835. scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
  2836. break;
  2837. case SCSI_PROT_WRITE_STRIP:
  2838. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
  2839. scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
  2840. break;
  2841. case SCSI_PROT_READ_PASS:
  2842. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
  2843. scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
  2844. break;
  2845. case SCSI_PROT_WRITE_PASS:
  2846. if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) {
  2847. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN;
  2848. scsiio_req->sgl[0].eedp.application_tag_translation_mask =
  2849. 0xffff;
  2850. } else
  2851. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
  2852. scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
  2853. break;
  2854. default:
  2855. return;
  2856. }
  2857. if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
  2858. eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD;
  2859. if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
  2860. eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
  2861. if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
  2862. eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG |
  2863. MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
  2864. scsiio_req->cdb.eedp32.primary_reference_tag =
  2865. cpu_to_be32(scsi_prot_ref_tag(scmd));
  2866. }
  2867. if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT)
  2868. eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
  2869. eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
  2870. switch (scsi_prot_interval(scmd)) {
  2871. case 512:
  2872. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
  2873. break;
  2874. case 520:
  2875. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520;
  2876. break;
  2877. case 4080:
  2878. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080;
  2879. break;
  2880. case 4088:
  2881. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088;
  2882. break;
  2883. case 4096:
  2884. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096;
  2885. break;
  2886. case 4104:
  2887. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104;
  2888. break;
  2889. case 4160:
  2890. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160;
  2891. break;
  2892. default:
  2893. break;
  2894. }
  2895. scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags);
  2896. scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED;
  2897. }
  2898. /**
  2899. * mpi3mr_build_sense_buffer - Map sense information
  2900. * @desc: Sense type
  2901. * @buf: Sense buffer to populate
  2902. * @key: Sense key
  2903. * @asc: Additional sense code
  2904. * @ascq: Additional sense code qualifier
  2905. *
  2906. * Maps the given sense information into either descriptor or
  2907. * fixed format sense data.
  2908. *
  2909. * Return: Nothing
  2910. */
  2911. static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key,
  2912. u8 asc, u8 ascq)
  2913. {
  2914. if (desc) {
  2915. buf[0] = 0x72; /* descriptor, current */
  2916. buf[1] = key;
  2917. buf[2] = asc;
  2918. buf[3] = ascq;
  2919. buf[7] = 0;
  2920. } else {
  2921. buf[0] = 0x70; /* fixed, current */
  2922. buf[2] = key;
  2923. buf[7] = 0xa;
  2924. buf[12] = asc;
  2925. buf[13] = ascq;
  2926. }
  2927. }
  2928. /**
  2929. * mpi3mr_map_eedp_error - Map EEDP errors from IOC status
  2930. * @scmd: SCSI command reference
  2931. * @ioc_status: status of MPI3 request
  2932. *
  2933. * Maps the EEDP error status of the SCSI IO request to sense
  2934. * data.
  2935. *
  2936. * Return: Nothing
  2937. */
  2938. static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd,
  2939. u16 ioc_status)
  2940. {
  2941. u8 ascq = 0;
  2942. switch (ioc_status) {
  2943. case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
  2944. ascq = 0x01;
  2945. break;
  2946. case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
  2947. ascq = 0x02;
  2948. break;
  2949. case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
  2950. ascq = 0x03;
  2951. break;
  2952. default:
  2953. ascq = 0x00;
  2954. break;
  2955. }
  2956. mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
  2957. 0x10, ascq);
  2958. scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
  2959. }
  2960. /**
  2961. * mpi3mr_process_op_reply_desc - reply descriptor handler
  2962. * @mrioc: Adapter instance reference
  2963. * @reply_desc: Operational reply descriptor
  2964. * @reply_dma: place holder for reply DMA address
  2965. * @qidx: Operational queue index
  2966. *
  2967. * Process the operational reply descriptor and identifies the
  2968. * descriptor type. Based on the descriptor map the MPI3 request
  2969. * status to a SCSI command status and calls scsi_done call
  2970. * back.
  2971. *
  2972. * Return: Nothing
  2973. */
  2974. void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
  2975. struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx)
  2976. {
  2977. u16 reply_desc_type, host_tag = 0;
  2978. u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
  2979. u32 ioc_loginfo = 0;
  2980. struct mpi3_status_reply_descriptor *status_desc = NULL;
  2981. struct mpi3_address_reply_descriptor *addr_desc = NULL;
  2982. struct mpi3_success_reply_descriptor *success_desc = NULL;
  2983. struct mpi3_scsi_io_reply *scsi_reply = NULL;
  2984. struct scsi_cmnd *scmd = NULL;
  2985. struct scmd_priv *priv = NULL;
  2986. u8 *sense_buf = NULL;
  2987. u8 scsi_state = 0, scsi_status = 0, sense_state = 0;
  2988. u32 xfer_count = 0, sense_count = 0, resp_data = 0;
  2989. u16 dev_handle = 0xFFFF;
  2990. struct scsi_sense_hdr sshdr;
  2991. struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL;
  2992. struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
  2993. u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
  2994. struct mpi3mr_throttle_group_info *tg = NULL;
  2995. u8 throttle_enabled_dev = 0;
  2996. *reply_dma = 0;
  2997. reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
  2998. MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
  2999. switch (reply_desc_type) {
  3000. case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
  3001. status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
  3002. host_tag = le16_to_cpu(status_desc->host_tag);
  3003. ioc_status = le16_to_cpu(status_desc->ioc_status);
  3004. if (ioc_status &
  3005. MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
  3006. ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
  3007. ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
  3008. mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
  3009. break;
  3010. case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
  3011. addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
  3012. *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
  3013. scsi_reply = mpi3mr_get_reply_virt_addr(mrioc,
  3014. *reply_dma);
  3015. if (!scsi_reply) {
  3016. panic("%s: scsi_reply is NULL, this shouldn't happen\n",
  3017. mrioc->name);
  3018. goto out;
  3019. }
  3020. host_tag = le16_to_cpu(scsi_reply->host_tag);
  3021. ioc_status = le16_to_cpu(scsi_reply->ioc_status);
  3022. scsi_status = scsi_reply->scsi_status;
  3023. scsi_state = scsi_reply->scsi_state;
  3024. dev_handle = le16_to_cpu(scsi_reply->dev_handle);
  3025. sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
  3026. xfer_count = le32_to_cpu(scsi_reply->transfer_count);
  3027. sense_count = le32_to_cpu(scsi_reply->sense_count);
  3028. resp_data = le32_to_cpu(scsi_reply->response_data);
  3029. sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
  3030. le64_to_cpu(scsi_reply->sense_data_buffer_address));
  3031. if (ioc_status &
  3032. MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
  3033. ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info);
  3034. ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
  3035. if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
  3036. panic("%s: Ran out of sense buffers\n", mrioc->name);
  3037. if (sense_buf) {
  3038. scsi_normalize_sense(sense_buf, sense_count, &sshdr);
  3039. mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
  3040. sshdr.asc, sshdr.ascq);
  3041. }
  3042. mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
  3043. break;
  3044. case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
  3045. success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
  3046. host_tag = le16_to_cpu(success_desc->host_tag);
  3047. break;
  3048. default:
  3049. break;
  3050. }
  3051. scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
  3052. if (!scmd) {
  3053. panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
  3054. mrioc->name, host_tag);
  3055. goto out;
  3056. }
  3057. priv = scsi_cmd_priv(scmd);
  3058. data_len_blks = scsi_bufflen(scmd) >> 9;
  3059. sdev_priv_data = scmd->device->hostdata;
  3060. if (sdev_priv_data) {
  3061. stgt_priv_data = sdev_priv_data->tgt_priv_data;
  3062. if (stgt_priv_data) {
  3063. tg = stgt_priv_data->throttle_group;
  3064. throttle_enabled_dev =
  3065. stgt_priv_data->io_throttle_enabled;
  3066. dev_handle = stgt_priv_data->dev_handle;
  3067. }
  3068. }
  3069. if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&
  3070. throttle_enabled_dev)) {
  3071. ioc_pend_data_len = atomic_sub_return(data_len_blks,
  3072. &mrioc->pend_large_data_sz);
  3073. if (tg) {
  3074. tg_pend_data_len = atomic_sub_return(data_len_blks,
  3075. &tg->pend_large_data_sz);
  3076. if (tg->io_divert && ((ioc_pend_data_len <=
  3077. mrioc->io_throttle_low) &&
  3078. (tg_pend_data_len <= tg->low))) {
  3079. tg->io_divert = 0;
  3080. mpi3mr_set_io_divert_for_all_vd_in_tg(
  3081. mrioc, tg, 0);
  3082. }
  3083. } else {
  3084. if (ioc_pend_data_len <= mrioc->io_throttle_low)
  3085. stgt_priv_data->io_divert = 0;
  3086. }
  3087. } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) {
  3088. ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz);
  3089. if (!tg) {
  3090. if (ioc_pend_data_len <= mrioc->io_throttle_low)
  3091. stgt_priv_data->io_divert = 0;
  3092. } else if (ioc_pend_data_len <= mrioc->io_throttle_low) {
  3093. tg_pend_data_len = atomic_read(&tg->pend_large_data_sz);
  3094. if (tg->io_divert && (tg_pend_data_len <= tg->low)) {
  3095. tg->io_divert = 0;
  3096. mpi3mr_set_io_divert_for_all_vd_in_tg(
  3097. mrioc, tg, 0);
  3098. }
  3099. }
  3100. }
  3101. if (success_desc) {
  3102. scmd->result = DID_OK << 16;
  3103. goto out_success;
  3104. }
  3105. scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count);
  3106. if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
  3107. xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
  3108. scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
  3109. scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
  3110. ioc_status = MPI3_IOCSTATUS_SUCCESS;
  3111. if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count &&
  3112. sense_buf) {
  3113. u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count);
  3114. memcpy(scmd->sense_buffer, sense_buf, sz);
  3115. }
  3116. switch (ioc_status) {
  3117. case MPI3_IOCSTATUS_BUSY:
  3118. case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
  3119. scmd->result = SAM_STAT_BUSY;
  3120. break;
  3121. case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
  3122. scmd->result = DID_NO_CONNECT << 16;
  3123. break;
  3124. case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
  3125. scmd->result = DID_SOFT_ERROR << 16;
  3126. break;
  3127. case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
  3128. case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
  3129. scmd->result = DID_RESET << 16;
  3130. break;
  3131. case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
  3132. if ((xfer_count == 0) || (scmd->underflow > xfer_count))
  3133. scmd->result = DID_SOFT_ERROR << 16;
  3134. else
  3135. scmd->result = (DID_OK << 16) | scsi_status;
  3136. break;
  3137. case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
  3138. scmd->result = (DID_OK << 16) | scsi_status;
  3139. if (sense_state == MPI3_SCSI_STATE_SENSE_VALID)
  3140. break;
  3141. if (xfer_count < scmd->underflow) {
  3142. if (scsi_status == SAM_STAT_BUSY)
  3143. scmd->result = SAM_STAT_BUSY;
  3144. else
  3145. scmd->result = DID_SOFT_ERROR << 16;
  3146. } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
  3147. (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE))
  3148. scmd->result = DID_SOFT_ERROR << 16;
  3149. else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
  3150. scmd->result = DID_RESET << 16;
  3151. break;
  3152. case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
  3153. scsi_set_resid(scmd, 0);
  3154. fallthrough;
  3155. case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
  3156. case MPI3_IOCSTATUS_SUCCESS:
  3157. scmd->result = (DID_OK << 16) | scsi_status;
  3158. if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
  3159. (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) ||
  3160. (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY))
  3161. scmd->result = DID_SOFT_ERROR << 16;
  3162. else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
  3163. scmd->result = DID_RESET << 16;
  3164. break;
  3165. case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
  3166. case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
  3167. case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
  3168. mpi3mr_map_eedp_error(scmd, ioc_status);
  3169. break;
  3170. case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
  3171. case MPI3_IOCSTATUS_INVALID_FUNCTION:
  3172. case MPI3_IOCSTATUS_INVALID_SGL:
  3173. case MPI3_IOCSTATUS_INTERNAL_ERROR:
  3174. case MPI3_IOCSTATUS_INVALID_FIELD:
  3175. case MPI3_IOCSTATUS_INVALID_STATE:
  3176. case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
  3177. case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
  3178. case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
  3179. default:
  3180. scmd->result = DID_SOFT_ERROR << 16;
  3181. break;
  3182. }
  3183. if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
  3184. (scmd->cmnd[0] != ATA_16) &&
  3185. mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) {
  3186. ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
  3187. scmd->result);
  3188. scsi_print_command(scmd);
  3189. ioc_info(mrioc,
  3190. "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n",
  3191. __func__, dev_handle, ioc_status, ioc_loginfo,
  3192. priv->req_q_idx + 1);
  3193. ioc_info(mrioc,
  3194. " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n",
  3195. host_tag, scsi_state, scsi_status, xfer_count, resp_data);
  3196. if (sense_buf) {
  3197. scsi_normalize_sense(sense_buf, sense_count, &sshdr);
  3198. ioc_info(mrioc,
  3199. "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n",
  3200. __func__, sense_count, sshdr.sense_key,
  3201. sshdr.asc, sshdr.ascq);
  3202. }
  3203. }
  3204. out_success:
  3205. if (priv->meta_sg_valid) {
  3206. dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
  3207. scsi_prot_sg_count(scmd), scmd->sc_data_direction);
  3208. }
  3209. mpi3mr_clear_scmd_priv(mrioc, scmd);
  3210. scsi_dma_unmap(scmd);
  3211. scsi_done(scmd);
  3212. out:
  3213. if (sense_buf)
  3214. mpi3mr_repost_sense_buf(mrioc,
  3215. le64_to_cpu(scsi_reply->sense_data_buffer_address));
  3216. }
  3217. /**
  3218. * mpi3mr_get_chain_idx - get free chain buffer index
  3219. * @mrioc: Adapter instance reference
  3220. *
  3221. * Try to get a free chain buffer index from the free pool.
  3222. *
  3223. * Return: -1 on failure or the free chain buffer index
  3224. */
  3225. static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
  3226. {
  3227. u8 retry_count = 5;
  3228. int cmd_idx = -1;
  3229. unsigned long flags;
  3230. spin_lock_irqsave(&mrioc->chain_buf_lock, flags);
  3231. do {
  3232. cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
  3233. mrioc->chain_buf_count);
  3234. if (cmd_idx < mrioc->chain_buf_count) {
  3235. set_bit(cmd_idx, mrioc->chain_bitmap);
  3236. break;
  3237. }
  3238. cmd_idx = -1;
  3239. } while (retry_count--);
  3240. spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags);
  3241. return cmd_idx;
  3242. }
  3243. /**
  3244. * mpi3mr_prepare_sg_scmd - build scatter gather list
  3245. * @mrioc: Adapter instance reference
  3246. * @scmd: SCSI command reference
  3247. * @scsiio_req: MPI3 SCSI IO request
  3248. *
  3249. * This function maps SCSI command's data and protection SGEs to
  3250. * MPI request SGEs. If required additional 4K chain buffer is
  3251. * used to send the SGEs.
  3252. *
  3253. * Return: 0 on success, -ENOMEM on dma_map_sg failure
  3254. */
  3255. static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
  3256. struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
  3257. {
  3258. dma_addr_t chain_dma;
  3259. struct scatterlist *sg_scmd;
  3260. void *sg_local, *chain;
  3261. u32 chain_length;
  3262. int sges_left, chain_idx;
  3263. u32 sges_in_segment;
  3264. u8 simple_sgl_flags;
  3265. u8 simple_sgl_flags_last;
  3266. u8 last_chain_sgl_flags;
  3267. struct chain_element *chain_req;
  3268. struct scmd_priv *priv = NULL;
  3269. u32 meta_sg = le32_to_cpu(scsiio_req->flags) &
  3270. MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI;
  3271. priv = scsi_cmd_priv(scmd);
  3272. simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
  3273. MPI3_SGE_FLAGS_DLAS_SYSTEM;
  3274. simple_sgl_flags_last = simple_sgl_flags |
  3275. MPI3_SGE_FLAGS_END_OF_LIST;
  3276. last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
  3277. MPI3_SGE_FLAGS_DLAS_SYSTEM;
  3278. if (meta_sg)
  3279. sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX];
  3280. else
  3281. sg_local = &scsiio_req->sgl;
  3282. if (!scsiio_req->data_length && !meta_sg) {
  3283. mpi3mr_build_zero_len_sge(sg_local);
  3284. return 0;
  3285. }
  3286. if (meta_sg) {
  3287. sg_scmd = scsi_prot_sglist(scmd);
  3288. sges_left = dma_map_sg(&mrioc->pdev->dev,
  3289. scsi_prot_sglist(scmd),
  3290. scsi_prot_sg_count(scmd),
  3291. scmd->sc_data_direction);
  3292. priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
  3293. } else {
  3294. /*
  3295. * Some firmware versions byte-swap the REPORT ZONES command
  3296. * reply from ATA-ZAC devices by directly accessing in the host
  3297. * buffer. This does not respect the default command DMA
  3298. * direction and causes IOMMU page faults on some architectures
  3299. * with an IOMMU enforcing write mappings (e.g. AMD hosts).
  3300. * Avoid such issue by making the REPORT ZONES buffer mapping
  3301. * bi-directional.
  3302. */
  3303. if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES)
  3304. scmd->sc_data_direction = DMA_BIDIRECTIONAL;
  3305. sg_scmd = scsi_sglist(scmd);
  3306. sges_left = scsi_dma_map(scmd);
  3307. }
  3308. if (sges_left < 0) {
  3309. sdev_printk(KERN_ERR, scmd->device,
  3310. "scsi_dma_map failed: request for %d bytes!\n",
  3311. scsi_bufflen(scmd));
  3312. return -ENOMEM;
  3313. }
  3314. if (sges_left > mrioc->max_sgl_entries) {
  3315. sdev_printk(KERN_ERR, scmd->device,
  3316. "scsi_dma_map returned unsupported sge count %d!\n",
  3317. sges_left);
  3318. return -ENOMEM;
  3319. }
  3320. sges_in_segment = (mrioc->facts.op_req_sz -
  3321. offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common);
  3322. if (scsiio_req->sgl[0].eedp.flags ==
  3323. MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) {
  3324. sg_local += sizeof(struct mpi3_sge_common);
  3325. sges_in_segment--;
  3326. /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */
  3327. }
  3328. if (scsiio_req->msg_flags ==
  3329. MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) {
  3330. sges_in_segment--;
  3331. /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */
  3332. }
  3333. if (meta_sg)
  3334. sges_in_segment = 1;
  3335. if (sges_left <= sges_in_segment)
  3336. goto fill_in_last_segment;
  3337. /* fill in main message segment when there is a chain following */
  3338. while (sges_in_segment > 1) {
  3339. mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
  3340. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  3341. sg_scmd = sg_next(sg_scmd);
  3342. sg_local += sizeof(struct mpi3_sge_common);
  3343. sges_left--;
  3344. sges_in_segment--;
  3345. }
  3346. chain_idx = mpi3mr_get_chain_idx(mrioc);
  3347. if (chain_idx < 0)
  3348. return -1;
  3349. chain_req = &mrioc->chain_sgl_list[chain_idx];
  3350. if (meta_sg)
  3351. priv->meta_chain_idx = chain_idx;
  3352. else
  3353. priv->chain_idx = chain_idx;
  3354. chain = chain_req->addr;
  3355. chain_dma = chain_req->dma_addr;
  3356. sges_in_segment = sges_left;
  3357. chain_length = sges_in_segment * sizeof(struct mpi3_sge_common);
  3358. mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
  3359. chain_length, chain_dma);
  3360. sg_local = chain;
  3361. fill_in_last_segment:
  3362. while (sges_left > 0) {
  3363. if (sges_left == 1)
  3364. mpi3mr_add_sg_single(sg_local,
  3365. simple_sgl_flags_last, sg_dma_len(sg_scmd),
  3366. sg_dma_address(sg_scmd));
  3367. else
  3368. mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
  3369. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  3370. sg_scmd = sg_next(sg_scmd);
  3371. sg_local += sizeof(struct mpi3_sge_common);
  3372. sges_left--;
  3373. }
  3374. return 0;
  3375. }
  3376. /**
  3377. * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO
  3378. * @mrioc: Adapter instance reference
  3379. * @scmd: SCSI command reference
  3380. * @scsiio_req: MPI3 SCSI IO request
  3381. *
  3382. * This function calls mpi3mr_prepare_sg_scmd for constructing
  3383. * both data SGEs and protection information SGEs in the MPI
  3384. * format from the SCSI Command as appropriate .
  3385. *
  3386. * Return: return value of mpi3mr_prepare_sg_scmd.
  3387. */
  3388. static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
  3389. struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
  3390. {
  3391. int ret;
  3392. ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
  3393. if (ret)
  3394. return ret;
  3395. if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) {
  3396. /* There is a valid meta sg */
  3397. scsiio_req->flags |=
  3398. cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI);
  3399. ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
  3400. }
  3401. return ret;
  3402. }
  3403. /**
  3404. * mpi3mr_tm_response_name - get TM response as a string
  3405. * @resp_code: TM response code
  3406. *
  3407. * Convert known task management response code as a readable
  3408. * string.
  3409. *
  3410. * Return: response code string.
  3411. */
  3412. static const char *mpi3mr_tm_response_name(u8 resp_code)
  3413. {
  3414. char *desc;
  3415. switch (resp_code) {
  3416. case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
  3417. desc = "task management request completed";
  3418. break;
  3419. case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
  3420. desc = "invalid frame";
  3421. break;
  3422. case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
  3423. desc = "task management request not supported";
  3424. break;
  3425. case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
  3426. desc = "task management request failed";
  3427. break;
  3428. case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
  3429. desc = "task management request succeeded";
  3430. break;
  3431. case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
  3432. desc = "invalid LUN";
  3433. break;
  3434. case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
  3435. desc = "overlapped tag attempted";
  3436. break;
  3437. case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
  3438. desc = "task queued, however not sent to target";
  3439. break;
  3440. case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
  3441. desc = "task management request denied by NVMe device";
  3442. break;
  3443. default:
  3444. desc = "unknown";
  3445. break;
  3446. }
  3447. return desc;
  3448. }
  3449. inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
  3450. {
  3451. int i;
  3452. int num_of_reply_queues =
  3453. mrioc->num_op_reply_q + mrioc->op_reply_q_offset;
  3454. for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++)
  3455. mpi3mr_process_op_reply_q(mrioc,
  3456. mrioc->intr_info[i].op_reply_q);
  3457. }
  3458. /**
  3459. * mpi3mr_issue_tm - Issue Task Management request
  3460. * @mrioc: Adapter instance reference
  3461. * @tm_type: Task Management type
  3462. * @handle: Device handle
  3463. * @lun: lun ID
  3464. * @htag: Host tag of the TM request
  3465. * @timeout: TM timeout value
  3466. * @drv_cmd: Internal command tracker
  3467. * @resp_code: Response code place holder
  3468. * @scmd: SCSI command
  3469. *
  3470. * Issues a Task Management Request to the controller for a
  3471. * specified target, lun and command and wait for its completion
  3472. * and check TM response. Recover the TM if it timed out by
  3473. * issuing controller reset.
  3474. *
  3475. * Return: 0 on success, non-zero on errors
  3476. */
  3477. int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
  3478. u16 handle, uint lun, u16 htag, ulong timeout,
  3479. struct mpi3mr_drv_cmd *drv_cmd,
  3480. u8 *resp_code, struct scsi_cmnd *scmd)
  3481. {
  3482. struct mpi3_scsi_task_mgmt_request tm_req;
  3483. struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
  3484. int retval = 0;
  3485. struct mpi3mr_tgt_dev *tgtdev = NULL;
  3486. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
  3487. struct scmd_priv *cmd_priv = NULL;
  3488. struct scsi_device *sdev = NULL;
  3489. struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
  3490. ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
  3491. __func__, tm_type, handle);
  3492. if (mrioc->unrecoverable) {
  3493. retval = -1;
  3494. ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n",
  3495. __func__);
  3496. goto out;
  3497. }
  3498. memset(&tm_req, 0, sizeof(tm_req));
  3499. mutex_lock(&drv_cmd->mutex);
  3500. if (drv_cmd->state & MPI3MR_CMD_PENDING) {
  3501. retval = -1;
  3502. ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
  3503. mutex_unlock(&drv_cmd->mutex);
  3504. goto out;
  3505. }
  3506. if (mrioc->reset_in_progress) {
  3507. retval = -1;
  3508. ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__);
  3509. mutex_unlock(&drv_cmd->mutex);
  3510. goto out;
  3511. }
  3512. if (mrioc->block_on_pci_err) {
  3513. retval = -1;
  3514. dprint_tm(mrioc, "sending task management failed due to\n"
  3515. "pci error recovery in progress\n");
  3516. mutex_unlock(&drv_cmd->mutex);
  3517. goto out;
  3518. }
  3519. drv_cmd->state = MPI3MR_CMD_PENDING;
  3520. drv_cmd->is_waiting = 1;
  3521. drv_cmd->callback = NULL;
  3522. tm_req.dev_handle = cpu_to_le16(handle);
  3523. tm_req.task_type = tm_type;
  3524. tm_req.host_tag = cpu_to_le16(htag);
  3525. int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun);
  3526. tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
  3527. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
  3528. if (scmd) {
  3529. sdev = scmd->device;
  3530. sdev_priv_data = sdev->hostdata;
  3531. scsi_tgt_priv_data = ((sdev_priv_data) ?
  3532. sdev_priv_data->tgt_priv_data : NULL);
  3533. } else {
  3534. if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
  3535. scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
  3536. tgtdev->starget->hostdata;
  3537. }
  3538. if (scsi_tgt_priv_data)
  3539. atomic_inc(&scsi_tgt_priv_data->block_io);
  3540. if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
  3541. if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
  3542. timeout = tgtdev->dev_spec.pcie_inf.abort_to;
  3543. else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
  3544. timeout = tgtdev->dev_spec.pcie_inf.reset_to;
  3545. }
  3546. init_completion(&drv_cmd->done);
  3547. retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
  3548. if (retval) {
  3549. ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__);
  3550. goto out_unlock;
  3551. }
  3552. wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
  3553. if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
  3554. drv_cmd->is_waiting = 0;
  3555. retval = -1;
  3556. if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
  3557. dprint_tm(mrioc,
  3558. "task management request timed out after %ld seconds\n",
  3559. timeout);
  3560. if (mrioc->logging_level & MPI3_DEBUG_TM)
  3561. dprint_dump_req(&tm_req, sizeof(tm_req)/4);
  3562. mpi3mr_soft_reset_handler(mrioc,
  3563. MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
  3564. }
  3565. goto out_unlock;
  3566. }
  3567. if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) {
  3568. dprint_tm(mrioc, "invalid task management reply message\n");
  3569. retval = -1;
  3570. goto out_unlock;
  3571. }
  3572. tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
  3573. switch (drv_cmd->ioc_status) {
  3574. case MPI3_IOCSTATUS_SUCCESS:
  3575. *resp_code = le32_to_cpu(tm_reply->response_data) &
  3576. MPI3MR_RI_MASK_RESPCODE;
  3577. break;
  3578. case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
  3579. *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
  3580. break;
  3581. default:
  3582. dprint_tm(mrioc,
  3583. "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
  3584. handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
  3585. retval = -1;
  3586. goto out_unlock;
  3587. }
  3588. switch (*resp_code) {
  3589. case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
  3590. case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
  3591. break;
  3592. case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
  3593. if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
  3594. retval = -1;
  3595. break;
  3596. default:
  3597. retval = -1;
  3598. break;
  3599. }
  3600. dprint_tm(mrioc,
  3601. "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n",
  3602. tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
  3603. le32_to_cpu(tm_reply->termination_count),
  3604. mpi3mr_tm_response_name(*resp_code), *resp_code);
  3605. if (!retval) {
  3606. mpi3mr_ioc_disable_intr(mrioc);
  3607. mpi3mr_poll_pend_io_completions(mrioc);
  3608. mpi3mr_ioc_enable_intr(mrioc);
  3609. mpi3mr_poll_pend_io_completions(mrioc);
  3610. mpi3mr_process_admin_reply_q(mrioc);
  3611. }
  3612. switch (tm_type) {
  3613. case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
  3614. if (!scsi_tgt_priv_data)
  3615. break;
  3616. scsi_tgt_priv_data->pend_count = 0;
  3617. blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
  3618. mpi3mr_count_tgt_pending,
  3619. (void *)scsi_tgt_priv_data->starget);
  3620. break;
  3621. case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
  3622. if (!sdev_priv_data)
  3623. break;
  3624. sdev_priv_data->pend_count = 0;
  3625. blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
  3626. mpi3mr_count_dev_pending, (void *)sdev);
  3627. break;
  3628. default:
  3629. break;
  3630. }
  3631. mpi3mr_global_trigger(mrioc,
  3632. MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED);
  3633. out_unlock:
  3634. drv_cmd->state = MPI3MR_CMD_NOTUSED;
  3635. mutex_unlock(&drv_cmd->mutex);
  3636. if (scsi_tgt_priv_data)
  3637. atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
  3638. if (tgtdev)
  3639. mpi3mr_tgtdev_put(tgtdev);
  3640. out:
  3641. return retval;
  3642. }
  3643. /**
  3644. * mpi3mr_bios_param - BIOS param callback
  3645. * @sdev: SCSI device reference
  3646. * @bdev: Block device reference
  3647. * @capacity: Capacity in logical sectors
  3648. * @params: Parameter array
  3649. *
  3650. * Just the parameters with heads/secots/cylinders.
  3651. *
  3652. * Return: 0 always
  3653. */
  3654. static int mpi3mr_bios_param(struct scsi_device *sdev,
  3655. struct block_device *bdev, sector_t capacity, int params[])
  3656. {
  3657. int heads;
  3658. int sectors;
  3659. sector_t cylinders;
  3660. ulong dummy;
  3661. heads = 64;
  3662. sectors = 32;
  3663. dummy = heads * sectors;
  3664. cylinders = capacity;
  3665. sector_div(cylinders, dummy);
  3666. if ((ulong)capacity >= 0x200000) {
  3667. heads = 255;
  3668. sectors = 63;
  3669. dummy = heads * sectors;
  3670. cylinders = capacity;
  3671. sector_div(cylinders, dummy);
  3672. }
  3673. params[0] = heads;
  3674. params[1] = sectors;
  3675. params[2] = cylinders;
  3676. return 0;
  3677. }
  3678. /**
  3679. * mpi3mr_map_queues - Map queues callback handler
  3680. * @shost: SCSI host reference
  3681. *
  3682. * Maps default and poll queues.
  3683. *
  3684. * Return: return zero.
  3685. */
  3686. static void mpi3mr_map_queues(struct Scsi_Host *shost)
  3687. {
  3688. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  3689. int i, qoff, offset;
  3690. struct blk_mq_queue_map *map = NULL;
  3691. offset = mrioc->op_reply_q_offset;
  3692. for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
  3693. map = &shost->tag_set.map[i];
  3694. map->nr_queues = 0;
  3695. if (i == HCTX_TYPE_DEFAULT)
  3696. map->nr_queues = mrioc->default_qcount;
  3697. else if (i == HCTX_TYPE_POLL)
  3698. map->nr_queues = mrioc->active_poll_qcount;
  3699. if (!map->nr_queues) {
  3700. BUG_ON(i == HCTX_TYPE_DEFAULT);
  3701. continue;
  3702. }
  3703. /*
  3704. * The poll queue(s) doesn't have an IRQ (and hence IRQ
  3705. * affinity), so use the regular blk-mq cpu mapping
  3706. */
  3707. map->queue_offset = qoff;
  3708. if (i != HCTX_TYPE_POLL)
  3709. blk_mq_pci_map_queues(map, mrioc->pdev, offset);
  3710. else
  3711. blk_mq_map_queues(map);
  3712. qoff += map->nr_queues;
  3713. offset += map->nr_queues;
  3714. }
  3715. }
  3716. /**
  3717. * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
  3718. * @mrioc: Adapter instance reference
  3719. *
  3720. * Calculate the pending I/Os for the controller and return.
  3721. *
  3722. * Return: Number of pending I/Os
  3723. */
  3724. static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc)
  3725. {
  3726. u16 i;
  3727. uint pend_ios = 0;
  3728. for (i = 0; i < mrioc->num_op_reply_q; i++)
  3729. pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios);
  3730. return pend_ios;
  3731. }
  3732. /**
  3733. * mpi3mr_print_pending_host_io - print pending I/Os
  3734. * @mrioc: Adapter instance reference
  3735. *
  3736. * Print number of pending I/Os and each I/O details prior to
  3737. * reset for debug purpose.
  3738. *
  3739. * Return: Nothing
  3740. */
  3741. static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc)
  3742. {
  3743. struct Scsi_Host *shost = mrioc->shost;
  3744. ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n",
  3745. __func__, mpi3mr_get_fw_pending_ios(mrioc));
  3746. blk_mq_tagset_busy_iter(&shost->tag_set,
  3747. mpi3mr_print_scmd, (void *)mrioc);
  3748. }
  3749. /**
  3750. * mpi3mr_wait_for_host_io - block for I/Os to complete
  3751. * @mrioc: Adapter instance reference
  3752. * @timeout: time out in seconds
  3753. * Waits for pending I/Os for the given adapter to complete or
  3754. * to hit the timeout.
  3755. *
  3756. * Return: Nothing
  3757. */
  3758. void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
  3759. {
  3760. enum mpi3mr_iocstate iocstate;
  3761. int i = 0;
  3762. iocstate = mpi3mr_get_iocstate(mrioc);
  3763. if (iocstate != MRIOC_STATE_READY)
  3764. return;
  3765. if (!mpi3mr_get_fw_pending_ios(mrioc))
  3766. return;
  3767. ioc_info(mrioc,
  3768. "%s :Waiting for %d seconds prior to reset for %d I/O\n",
  3769. __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc));
  3770. for (i = 0; i < timeout; i++) {
  3771. if (!mpi3mr_get_fw_pending_ios(mrioc))
  3772. break;
  3773. iocstate = mpi3mr_get_iocstate(mrioc);
  3774. if (iocstate != MRIOC_STATE_READY)
  3775. break;
  3776. msleep(1000);
  3777. }
  3778. ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__,
  3779. mpi3mr_get_fw_pending_ios(mrioc));
  3780. }
  3781. /**
  3782. * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same
  3783. * @mrioc: Adapter instance reference
  3784. * @scmd: SCSI command reference
  3785. * @scsiio_req: MPI3 SCSI IO request
  3786. * @scsiio_flags: Pointer to MPI3 SCSI IO Flags
  3787. * @wslen: write same max length
  3788. *
  3789. * Gets values of unmap, ndob and number of blocks from write
  3790. * same scsi io and based on these values it sets divert IO flag
  3791. * and reason for diverting IO to firmware.
  3792. *
  3793. * Return: Nothing
  3794. */
  3795. static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc,
  3796. struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req,
  3797. u32 *scsiio_flags, u16 wslen)
  3798. {
  3799. u8 unmap = 0, ndob = 0;
  3800. u8 opcode = scmd->cmnd[0];
  3801. u32 num_blocks = 0;
  3802. u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]);
  3803. if (opcode == WRITE_SAME_16) {
  3804. unmap = scmd->cmnd[1] & 0x08;
  3805. ndob = scmd->cmnd[1] & 0x01;
  3806. num_blocks = get_unaligned_be32(scmd->cmnd + 10);
  3807. } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) {
  3808. unmap = scmd->cmnd[10] & 0x08;
  3809. ndob = scmd->cmnd[10] & 0x01;
  3810. num_blocks = get_unaligned_be32(scmd->cmnd + 28);
  3811. } else
  3812. return;
  3813. if ((unmap) && (ndob) && (num_blocks > wslen)) {
  3814. scsiio_req->msg_flags |=
  3815. MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
  3816. *scsiio_flags |=
  3817. MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE;
  3818. }
  3819. }
  3820. /**
  3821. * mpi3mr_eh_host_reset - Host reset error handling callback
  3822. * @scmd: SCSI command reference
  3823. *
  3824. * Issue controller reset
  3825. *
  3826. * Return: SUCCESS of successful reset else FAILED
  3827. */
  3828. static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
  3829. {
  3830. struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
  3831. int retval = FAILED, ret;
  3832. ret = mpi3mr_soft_reset_handler(mrioc,
  3833. MPI3MR_RESET_FROM_EH_HOS, 1);
  3834. if (ret)
  3835. goto out;
  3836. retval = SUCCESS;
  3837. out:
  3838. sdev_printk(KERN_INFO, scmd->device,
  3839. "Host reset is %s for scmd(%p)\n",
  3840. ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  3841. return retval;
  3842. }
  3843. /**
  3844. * mpi3mr_eh_bus_reset - Bus reset error handling callback
  3845. * @scmd: SCSI command reference
  3846. *
  3847. * Checks whether pending I/Os are present for the RAID volume;
  3848. * if not there's no need to reset the adapter.
  3849. *
  3850. * Return: SUCCESS of successful reset else FAILED
  3851. */
  3852. static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd)
  3853. {
  3854. struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
  3855. struct mpi3mr_stgt_priv_data *stgt_priv_data;
  3856. struct mpi3mr_sdev_priv_data *sdev_priv_data;
  3857. u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
  3858. int retval = FAILED;
  3859. unsigned int timeout = MPI3MR_RESET_TIMEOUT;
  3860. sdev_priv_data = scmd->device->hostdata;
  3861. if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
  3862. stgt_priv_data = sdev_priv_data->tgt_priv_data;
  3863. dev_type = stgt_priv_data->dev_type;
  3864. }
  3865. if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
  3866. mpi3mr_wait_for_host_io(mrioc,
  3867. MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
  3868. if (!mpi3mr_get_fw_pending_ios(mrioc)) {
  3869. while (mrioc->reset_in_progress ||
  3870. mrioc->prepare_for_reset ||
  3871. mrioc->block_on_pci_err) {
  3872. ssleep(1);
  3873. if (!timeout--) {
  3874. retval = FAILED;
  3875. goto out;
  3876. }
  3877. }
  3878. retval = SUCCESS;
  3879. goto out;
  3880. }
  3881. }
  3882. if (retval == FAILED)
  3883. mpi3mr_print_pending_host_io(mrioc);
  3884. out:
  3885. sdev_printk(KERN_INFO, scmd->device,
  3886. "Bus reset is %s for scmd(%p)\n",
  3887. ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  3888. return retval;
  3889. }
  3890. /**
  3891. * mpi3mr_eh_target_reset - Target reset error handling callback
  3892. * @scmd: SCSI command reference
  3893. *
  3894. * Issue Target reset Task Management and verify the scmd is
  3895. * terminated successfully and return status accordingly.
  3896. *
  3897. * Return: SUCCESS of successful termination of the scmd else
  3898. * FAILED
  3899. */
  3900. static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
  3901. {
  3902. struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
  3903. struct mpi3mr_stgt_priv_data *stgt_priv_data;
  3904. struct mpi3mr_sdev_priv_data *sdev_priv_data;
  3905. u16 dev_handle;
  3906. u8 resp_code = 0;
  3907. int retval = FAILED, ret = 0;
  3908. sdev_printk(KERN_INFO, scmd->device,
  3909. "Attempting Target Reset! scmd(%p)\n", scmd);
  3910. scsi_print_command(scmd);
  3911. sdev_priv_data = scmd->device->hostdata;
  3912. if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
  3913. sdev_printk(KERN_INFO, scmd->device,
  3914. "SCSI device is not available\n");
  3915. retval = SUCCESS;
  3916. goto out;
  3917. }
  3918. stgt_priv_data = sdev_priv_data->tgt_priv_data;
  3919. dev_handle = stgt_priv_data->dev_handle;
  3920. if (stgt_priv_data->dev_removed) {
  3921. struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd);
  3922. sdev_printk(KERN_INFO, scmd->device,
  3923. "%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
  3924. mrioc->name, dev_handle);
  3925. if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID)
  3926. retval = SUCCESS;
  3927. else
  3928. retval = FAILED;
  3929. goto out;
  3930. }
  3931. sdev_printk(KERN_INFO, scmd->device,
  3932. "Target Reset is issued to handle(0x%04x)\n",
  3933. dev_handle);
  3934. ret = mpi3mr_issue_tm(mrioc,
  3935. MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
  3936. sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
  3937. MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
  3938. if (ret)
  3939. goto out;
  3940. if (stgt_priv_data->pend_count) {
  3941. sdev_printk(KERN_INFO, scmd->device,
  3942. "%s: target has %d pending commands, target reset is failed\n",
  3943. mrioc->name, stgt_priv_data->pend_count);
  3944. goto out;
  3945. }
  3946. retval = SUCCESS;
  3947. out:
  3948. sdev_printk(KERN_INFO, scmd->device,
  3949. "%s: target reset is %s for scmd(%p)\n", mrioc->name,
  3950. ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  3951. return retval;
  3952. }
  3953. /**
  3954. * mpi3mr_eh_dev_reset- Device reset error handling callback
  3955. * @scmd: SCSI command reference
  3956. *
  3957. * Issue lun reset Task Management and verify the scmd is
  3958. * terminated successfully and return status accordingly.
  3959. *
  3960. * Return: SUCCESS of successful termination of the scmd else
  3961. * FAILED
  3962. */
  3963. static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
  3964. {
  3965. struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
  3966. struct mpi3mr_stgt_priv_data *stgt_priv_data;
  3967. struct mpi3mr_sdev_priv_data *sdev_priv_data;
  3968. u16 dev_handle;
  3969. u8 resp_code = 0;
  3970. int retval = FAILED, ret = 0;
  3971. sdev_printk(KERN_INFO, scmd->device,
  3972. "Attempting Device(lun) Reset! scmd(%p)\n", scmd);
  3973. scsi_print_command(scmd);
  3974. sdev_priv_data = scmd->device->hostdata;
  3975. if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
  3976. sdev_printk(KERN_INFO, scmd->device,
  3977. "SCSI device is not available\n");
  3978. retval = SUCCESS;
  3979. goto out;
  3980. }
  3981. stgt_priv_data = sdev_priv_data->tgt_priv_data;
  3982. dev_handle = stgt_priv_data->dev_handle;
  3983. if (stgt_priv_data->dev_removed) {
  3984. struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd);
  3985. sdev_printk(KERN_INFO, scmd->device,
  3986. "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
  3987. mrioc->name, dev_handle);
  3988. if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID)
  3989. retval = SUCCESS;
  3990. else
  3991. retval = FAILED;
  3992. goto out;
  3993. }
  3994. sdev_printk(KERN_INFO, scmd->device,
  3995. "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
  3996. ret = mpi3mr_issue_tm(mrioc,
  3997. MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
  3998. sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
  3999. MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
  4000. if (ret)
  4001. goto out;
  4002. if (sdev_priv_data->pend_count) {
  4003. sdev_printk(KERN_INFO, scmd->device,
  4004. "%s: device has %d pending commands, device(LUN) reset is failed\n",
  4005. mrioc->name, sdev_priv_data->pend_count);
  4006. goto out;
  4007. }
  4008. retval = SUCCESS;
  4009. out:
  4010. sdev_printk(KERN_INFO, scmd->device,
  4011. "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name,
  4012. ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  4013. return retval;
  4014. }
  4015. /**
  4016. * mpi3mr_scan_start - Scan start callback handler
  4017. * @shost: SCSI host reference
  4018. *
  4019. * Issue port enable request asynchronously.
  4020. *
  4021. * Return: Nothing
  4022. */
  4023. static void mpi3mr_scan_start(struct Scsi_Host *shost)
  4024. {
  4025. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  4026. mrioc->scan_started = 1;
  4027. ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__);
  4028. if (mpi3mr_issue_port_enable(mrioc, 1)) {
  4029. ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__);
  4030. mrioc->scan_started = 0;
  4031. mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
  4032. }
  4033. }
  4034. /**
  4035. * mpi3mr_scan_finished - Scan finished callback handler
  4036. * @shost: SCSI host reference
  4037. * @time: Jiffies from the scan start
  4038. *
  4039. * Checks whether the port enable is completed or timedout or
  4040. * failed and set the scan status accordingly after taking any
  4041. * recovery if required.
  4042. *
  4043. * Return: 1 on scan finished or timed out, 0 for in progress
  4044. */
  4045. static int mpi3mr_scan_finished(struct Scsi_Host *shost,
  4046. unsigned long time)
  4047. {
  4048. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  4049. u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
  4050. u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
  4051. if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
  4052. (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
  4053. ioc_err(mrioc, "port enable failed due to fault or reset\n");
  4054. mpi3mr_print_fault_info(mrioc);
  4055. mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
  4056. mrioc->scan_started = 0;
  4057. mrioc->init_cmds.is_waiting = 0;
  4058. mrioc->init_cmds.callback = NULL;
  4059. mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
  4060. }
  4061. if (time >= (pe_timeout * HZ)) {
  4062. ioc_err(mrioc, "port enable failed due to time out\n");
  4063. mpi3mr_check_rh_fault_ioc(mrioc,
  4064. MPI3MR_RESET_FROM_PE_TIMEOUT);
  4065. mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
  4066. mrioc->scan_started = 0;
  4067. mrioc->init_cmds.is_waiting = 0;
  4068. mrioc->init_cmds.callback = NULL;
  4069. mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
  4070. }
  4071. if (mrioc->scan_started)
  4072. return 0;
  4073. if (mrioc->scan_failed) {
  4074. ioc_err(mrioc,
  4075. "port enable failed with status=0x%04x\n",
  4076. mrioc->scan_failed);
  4077. } else
  4078. ioc_info(mrioc, "port enable is successfully completed\n");
  4079. mpi3mr_start_watchdog(mrioc);
  4080. mrioc->is_driver_loading = 0;
  4081. mrioc->stop_bsgs = 0;
  4082. return 1;
  4083. }
  4084. /**
  4085. * mpi3mr_slave_destroy - Slave destroy callback handler
  4086. * @sdev: SCSI device reference
  4087. *
  4088. * Cleanup and free per device(lun) private data.
  4089. *
  4090. * Return: Nothing.
  4091. */
  4092. static void mpi3mr_slave_destroy(struct scsi_device *sdev)
  4093. {
  4094. struct Scsi_Host *shost;
  4095. struct mpi3mr_ioc *mrioc;
  4096. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
  4097. struct mpi3mr_tgt_dev *tgt_dev = NULL;
  4098. unsigned long flags;
  4099. struct scsi_target *starget;
  4100. struct sas_rphy *rphy = NULL;
  4101. if (!sdev->hostdata)
  4102. return;
  4103. starget = scsi_target(sdev);
  4104. shost = dev_to_shost(&starget->dev);
  4105. mrioc = shost_priv(shost);
  4106. scsi_tgt_priv_data = starget->hostdata;
  4107. scsi_tgt_priv_data->num_luns--;
  4108. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  4109. if (starget->channel == mrioc->scsi_device_channel)
  4110. tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
  4111. else if (mrioc->sas_transport_enabled && !starget->channel) {
  4112. rphy = dev_to_rphy(starget->dev.parent);
  4113. tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
  4114. rphy->identify.sas_address, rphy);
  4115. }
  4116. if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
  4117. tgt_dev->starget = NULL;
  4118. if (tgt_dev)
  4119. mpi3mr_tgtdev_put(tgt_dev);
  4120. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  4121. kfree(sdev->hostdata);
  4122. sdev->hostdata = NULL;
  4123. }
  4124. /**
  4125. * mpi3mr_target_destroy - Target destroy callback handler
  4126. * @starget: SCSI target reference
  4127. *
  4128. * Cleanup and free per target private data.
  4129. *
  4130. * Return: Nothing.
  4131. */
  4132. static void mpi3mr_target_destroy(struct scsi_target *starget)
  4133. {
  4134. struct Scsi_Host *shost;
  4135. struct mpi3mr_ioc *mrioc;
  4136. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
  4137. struct mpi3mr_tgt_dev *tgt_dev;
  4138. unsigned long flags;
  4139. if (!starget->hostdata)
  4140. return;
  4141. shost = dev_to_shost(&starget->dev);
  4142. mrioc = shost_priv(shost);
  4143. scsi_tgt_priv_data = starget->hostdata;
  4144. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  4145. tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data);
  4146. if (tgt_dev && (tgt_dev->starget == starget) &&
  4147. (tgt_dev->perst_id == starget->id))
  4148. tgt_dev->starget = NULL;
  4149. if (tgt_dev) {
  4150. scsi_tgt_priv_data->tgt_dev = NULL;
  4151. scsi_tgt_priv_data->perst_id = 0;
  4152. mpi3mr_tgtdev_put(tgt_dev);
  4153. mpi3mr_tgtdev_put(tgt_dev);
  4154. }
  4155. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  4156. kfree(starget->hostdata);
  4157. starget->hostdata = NULL;
  4158. }
  4159. /**
  4160. * mpi3mr_device_configure - Slave configure callback handler
  4161. * @sdev: SCSI device reference
  4162. * @lim: queue limits
  4163. *
  4164. * Configure queue depth, max hardware sectors and virt boundary
  4165. * as required
  4166. *
  4167. * Return: 0 always.
  4168. */
  4169. static int mpi3mr_device_configure(struct scsi_device *sdev,
  4170. struct queue_limits *lim)
  4171. {
  4172. struct scsi_target *starget;
  4173. struct Scsi_Host *shost;
  4174. struct mpi3mr_ioc *mrioc;
  4175. struct mpi3mr_tgt_dev *tgt_dev = NULL;
  4176. unsigned long flags;
  4177. int retval = 0;
  4178. struct sas_rphy *rphy = NULL;
  4179. starget = scsi_target(sdev);
  4180. shost = dev_to_shost(&starget->dev);
  4181. mrioc = shost_priv(shost);
  4182. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  4183. if (starget->channel == mrioc->scsi_device_channel)
  4184. tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
  4185. else if (mrioc->sas_transport_enabled && !starget->channel) {
  4186. rphy = dev_to_rphy(starget->dev.parent);
  4187. tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
  4188. rphy->identify.sas_address, rphy);
  4189. }
  4190. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  4191. if (!tgt_dev)
  4192. return -ENXIO;
  4193. mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
  4194. sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT;
  4195. blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT);
  4196. mpi3mr_configure_tgt_dev(tgt_dev, lim);
  4197. mpi3mr_tgtdev_put(tgt_dev);
  4198. return retval;
  4199. }
  4200. /**
  4201. * mpi3mr_slave_alloc -Slave alloc callback handler
  4202. * @sdev: SCSI device reference
  4203. *
  4204. * Allocate per device(lun) private data and initialize it.
  4205. *
  4206. * Return: 0 on success -ENOMEM on memory allocation failure.
  4207. */
  4208. static int mpi3mr_slave_alloc(struct scsi_device *sdev)
  4209. {
  4210. struct Scsi_Host *shost;
  4211. struct mpi3mr_ioc *mrioc;
  4212. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
  4213. struct mpi3mr_tgt_dev *tgt_dev = NULL;
  4214. struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
  4215. unsigned long flags;
  4216. struct scsi_target *starget;
  4217. int retval = 0;
  4218. struct sas_rphy *rphy = NULL;
  4219. starget = scsi_target(sdev);
  4220. shost = dev_to_shost(&starget->dev);
  4221. mrioc = shost_priv(shost);
  4222. scsi_tgt_priv_data = starget->hostdata;
  4223. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  4224. if (starget->channel == mrioc->scsi_device_channel)
  4225. tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
  4226. else if (mrioc->sas_transport_enabled && !starget->channel) {
  4227. rphy = dev_to_rphy(starget->dev.parent);
  4228. tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
  4229. rphy->identify.sas_address, rphy);
  4230. }
  4231. if (tgt_dev) {
  4232. if (tgt_dev->starget == NULL)
  4233. tgt_dev->starget = starget;
  4234. mpi3mr_tgtdev_put(tgt_dev);
  4235. retval = 0;
  4236. } else {
  4237. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  4238. return -ENXIO;
  4239. }
  4240. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  4241. scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL);
  4242. if (!scsi_dev_priv_data)
  4243. return -ENOMEM;
  4244. scsi_dev_priv_data->lun_id = sdev->lun;
  4245. scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data;
  4246. sdev->hostdata = scsi_dev_priv_data;
  4247. scsi_tgt_priv_data->num_luns++;
  4248. return retval;
  4249. }
  4250. /**
  4251. * mpi3mr_target_alloc - Target alloc callback handler
  4252. * @starget: SCSI target reference
  4253. *
  4254. * Allocate per target private data and initialize it.
  4255. *
  4256. * Return: 0 on success -ENOMEM on memory allocation failure.
  4257. */
  4258. static int mpi3mr_target_alloc(struct scsi_target *starget)
  4259. {
  4260. struct Scsi_Host *shost = dev_to_shost(&starget->dev);
  4261. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  4262. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
  4263. struct mpi3mr_tgt_dev *tgt_dev;
  4264. unsigned long flags;
  4265. int retval = 0;
  4266. struct sas_rphy *rphy = NULL;
  4267. scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
  4268. if (!scsi_tgt_priv_data)
  4269. return -ENOMEM;
  4270. starget->hostdata = scsi_tgt_priv_data;
  4271. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  4272. if (starget->channel == mrioc->scsi_device_channel) {
  4273. tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
  4274. if (tgt_dev && !tgt_dev->is_hidden) {
  4275. scsi_tgt_priv_data->starget = starget;
  4276. scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
  4277. scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
  4278. scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
  4279. scsi_tgt_priv_data->tgt_dev = tgt_dev;
  4280. tgt_dev->starget = starget;
  4281. atomic_set(&scsi_tgt_priv_data->block_io, 0);
  4282. retval = 0;
  4283. if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
  4284. ((tgt_dev->dev_spec.pcie_inf.dev_info &
  4285. MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
  4286. MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
  4287. ((tgt_dev->dev_spec.pcie_inf.dev_info &
  4288. MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) !=
  4289. MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0))
  4290. scsi_tgt_priv_data->dev_nvme_dif = 1;
  4291. scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled;
  4292. scsi_tgt_priv_data->wslen = tgt_dev->wslen;
  4293. if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
  4294. scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg;
  4295. } else
  4296. retval = -ENXIO;
  4297. } else if (mrioc->sas_transport_enabled && !starget->channel) {
  4298. rphy = dev_to_rphy(starget->dev.parent);
  4299. tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
  4300. rphy->identify.sas_address, rphy);
  4301. if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl &&
  4302. (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
  4303. scsi_tgt_priv_data->starget = starget;
  4304. scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
  4305. scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
  4306. scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
  4307. scsi_tgt_priv_data->tgt_dev = tgt_dev;
  4308. scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled;
  4309. scsi_tgt_priv_data->wslen = tgt_dev->wslen;
  4310. tgt_dev->starget = starget;
  4311. atomic_set(&scsi_tgt_priv_data->block_io, 0);
  4312. retval = 0;
  4313. } else
  4314. retval = -ENXIO;
  4315. }
  4316. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  4317. return retval;
  4318. }
  4319. /**
  4320. * mpi3mr_check_return_unmap - Whether an unmap is allowed
  4321. * @mrioc: Adapter instance reference
  4322. * @scmd: SCSI Command reference
  4323. *
  4324. * The controller hardware cannot handle certain unmap commands
  4325. * for NVMe drives, this routine checks those and return true
  4326. * and completes the SCSI command with proper status and sense
  4327. * data.
  4328. *
  4329. * Return: TRUE for not allowed unmap, FALSE otherwise.
  4330. */
  4331. static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
  4332. struct scsi_cmnd *scmd)
  4333. {
  4334. unsigned char *buf;
  4335. u16 param_len, desc_len, trunc_param_len;
  4336. trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7);
  4337. if (mrioc->pdev->revision) {
  4338. if ((param_len > 24) && ((param_len - 8) & 0xF)) {
  4339. trunc_param_len -= (param_len - 8) & 0xF;
  4340. dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
  4341. dprint_scsi_err(mrioc,
  4342. "truncating param_len from (%d) to (%d)\n",
  4343. param_len, trunc_param_len);
  4344. put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
  4345. dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
  4346. }
  4347. return false;
  4348. }
  4349. if (!param_len) {
  4350. ioc_warn(mrioc,
  4351. "%s: cdb received with zero parameter length\n",
  4352. __func__);
  4353. scsi_print_command(scmd);
  4354. scmd->result = DID_OK << 16;
  4355. scsi_done(scmd);
  4356. return true;
  4357. }
  4358. if (param_len < 24) {
  4359. ioc_warn(mrioc,
  4360. "%s: cdb received with invalid param_len: %d\n",
  4361. __func__, param_len);
  4362. scsi_print_command(scmd);
  4363. scmd->result = SAM_STAT_CHECK_CONDITION;
  4364. scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
  4365. 0x1A, 0);
  4366. scsi_done(scmd);
  4367. return true;
  4368. }
  4369. if (param_len != scsi_bufflen(scmd)) {
  4370. ioc_warn(mrioc,
  4371. "%s: cdb received with param_len: %d bufflen: %d\n",
  4372. __func__, param_len, scsi_bufflen(scmd));
  4373. scsi_print_command(scmd);
  4374. scmd->result = SAM_STAT_CHECK_CONDITION;
  4375. scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
  4376. 0x1A, 0);
  4377. scsi_done(scmd);
  4378. return true;
  4379. }
  4380. buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC);
  4381. if (!buf) {
  4382. scsi_print_command(scmd);
  4383. scmd->result = SAM_STAT_CHECK_CONDITION;
  4384. scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
  4385. 0x55, 0x03);
  4386. scsi_done(scmd);
  4387. return true;
  4388. }
  4389. scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
  4390. desc_len = get_unaligned_be16(&buf[2]);
  4391. if (desc_len < 16) {
  4392. ioc_warn(mrioc,
  4393. "%s: Invalid descriptor length in param list: %d\n",
  4394. __func__, desc_len);
  4395. scsi_print_command(scmd);
  4396. scmd->result = SAM_STAT_CHECK_CONDITION;
  4397. scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
  4398. 0x26, 0);
  4399. scsi_done(scmd);
  4400. kfree(buf);
  4401. return true;
  4402. }
  4403. if (param_len > (desc_len + 8)) {
  4404. trunc_param_len = desc_len + 8;
  4405. scsi_print_command(scmd);
  4406. dprint_scsi_err(mrioc,
  4407. "truncating param_len(%d) to desc_len+8(%d)\n",
  4408. param_len, trunc_param_len);
  4409. put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
  4410. scsi_print_command(scmd);
  4411. }
  4412. kfree(buf);
  4413. return false;
  4414. }
  4415. /**
  4416. * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown
  4417. * @scmd: SCSI Command reference
  4418. *
  4419. * Checks whether a cdb is allowed during shutdown or not.
  4420. *
  4421. * Return: TRUE for allowed commands, FALSE otherwise.
  4422. */
  4423. inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd)
  4424. {
  4425. switch (scmd->cmnd[0]) {
  4426. case SYNCHRONIZE_CACHE:
  4427. case START_STOP:
  4428. return true;
  4429. default:
  4430. return false;
  4431. }
  4432. }
  4433. /**
  4434. * mpi3mr_qcmd - I/O request despatcher
  4435. * @shost: SCSI Host reference
  4436. * @scmd: SCSI Command reference
  4437. *
  4438. * Issues the SCSI Command as an MPI3 request.
  4439. *
  4440. * Return: 0 on successful queueing of the request or if the
  4441. * request is completed with failure.
  4442. * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy.
  4443. * SCSI_MLQUEUE_HOST_BUSY when the host queue is full.
  4444. */
  4445. static int mpi3mr_qcmd(struct Scsi_Host *shost,
  4446. struct scsi_cmnd *scmd)
  4447. {
  4448. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  4449. struct mpi3mr_stgt_priv_data *stgt_priv_data;
  4450. struct mpi3mr_sdev_priv_data *sdev_priv_data;
  4451. struct scmd_priv *scmd_priv_data = NULL;
  4452. struct mpi3_scsi_io_request *scsiio_req = NULL;
  4453. struct op_req_qinfo *op_req_q = NULL;
  4454. int retval = 0;
  4455. u16 dev_handle;
  4456. u16 host_tag;
  4457. u32 scsiio_flags = 0, data_len_blks = 0;
  4458. struct request *rq = scsi_cmd_to_rq(scmd);
  4459. int iprio_class;
  4460. u8 is_pcie_dev = 0;
  4461. u32 tracked_io_sz = 0;
  4462. u32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
  4463. struct mpi3mr_throttle_group_info *tg = NULL;
  4464. if (mrioc->unrecoverable) {
  4465. scmd->result = DID_ERROR << 16;
  4466. scsi_done(scmd);
  4467. goto out;
  4468. }
  4469. sdev_priv_data = scmd->device->hostdata;
  4470. if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
  4471. scmd->result = DID_NO_CONNECT << 16;
  4472. scsi_done(scmd);
  4473. goto out;
  4474. }
  4475. if (mrioc->stop_drv_processing &&
  4476. !(mpi3mr_allow_scmd_to_fw(scmd))) {
  4477. scmd->result = DID_NO_CONNECT << 16;
  4478. scsi_done(scmd);
  4479. goto out;
  4480. }
  4481. stgt_priv_data = sdev_priv_data->tgt_priv_data;
  4482. dev_handle = stgt_priv_data->dev_handle;
  4483. /* Avoid error handling escalation when device is removed or blocked */
  4484. if (scmd->device->host->shost_state == SHOST_RECOVERY &&
  4485. scmd->cmnd[0] == TEST_UNIT_READY &&
  4486. (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) {
  4487. scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
  4488. scsi_done(scmd);
  4489. goto out;
  4490. }
  4491. if (mrioc->reset_in_progress || mrioc->prepare_for_reset
  4492. || mrioc->block_on_pci_err) {
  4493. retval = SCSI_MLQUEUE_HOST_BUSY;
  4494. goto out;
  4495. }
  4496. if (atomic_read(&stgt_priv_data->block_io)) {
  4497. if (mrioc->stop_drv_processing) {
  4498. scmd->result = DID_NO_CONNECT << 16;
  4499. scsi_done(scmd);
  4500. goto out;
  4501. }
  4502. retval = SCSI_MLQUEUE_DEVICE_BUSY;
  4503. goto out;
  4504. }
  4505. if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
  4506. scmd->result = DID_NO_CONNECT << 16;
  4507. scsi_done(scmd);
  4508. goto out;
  4509. }
  4510. if (stgt_priv_data->dev_removed) {
  4511. scmd->result = DID_NO_CONNECT << 16;
  4512. scsi_done(scmd);
  4513. goto out;
  4514. }
  4515. if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
  4516. is_pcie_dev = 1;
  4517. if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
  4518. (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
  4519. mpi3mr_check_return_unmap(mrioc, scmd))
  4520. goto out;
  4521. host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
  4522. if (host_tag == MPI3MR_HOSTTAG_INVALID) {
  4523. scmd->result = DID_ERROR << 16;
  4524. scsi_done(scmd);
  4525. goto out;
  4526. }
  4527. if (scmd->sc_data_direction == DMA_FROM_DEVICE)
  4528. scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
  4529. else if (scmd->sc_data_direction == DMA_TO_DEVICE)
  4530. scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
  4531. else
  4532. scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
  4533. scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
  4534. if (sdev_priv_data->ncq_prio_enable) {
  4535. iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
  4536. if (iprio_class == IOPRIO_CLASS_RT)
  4537. scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT;
  4538. }
  4539. if (scmd->cmd_len > 16)
  4540. scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
  4541. scmd_priv_data = scsi_cmd_priv(scmd);
  4542. memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
  4543. scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req;
  4544. scsiio_req->function = MPI3_FUNCTION_SCSI_IO;
  4545. scsiio_req->host_tag = cpu_to_le16(host_tag);
  4546. mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
  4547. if (stgt_priv_data->wslen)
  4548. mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags,
  4549. stgt_priv_data->wslen);
  4550. memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
  4551. scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
  4552. scsiio_req->dev_handle = cpu_to_le16(dev_handle);
  4553. scsiio_req->flags = cpu_to_le32(scsiio_flags);
  4554. int_to_scsilun(sdev_priv_data->lun_id,
  4555. (struct scsi_lun *)scsiio_req->lun);
  4556. if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) {
  4557. mpi3mr_clear_scmd_priv(mrioc, scmd);
  4558. retval = SCSI_MLQUEUE_HOST_BUSY;
  4559. goto out;
  4560. }
  4561. op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
  4562. data_len_blks = scsi_bufflen(scmd) >> 9;
  4563. if ((data_len_blks >= mrioc->io_throttle_data_length) &&
  4564. stgt_priv_data->io_throttle_enabled) {
  4565. tracked_io_sz = data_len_blks;
  4566. tg = stgt_priv_data->throttle_group;
  4567. if (tg) {
  4568. ioc_pend_data_len = atomic_add_return(data_len_blks,
  4569. &mrioc->pend_large_data_sz);
  4570. tg_pend_data_len = atomic_add_return(data_len_blks,
  4571. &tg->pend_large_data_sz);
  4572. if (!tg->io_divert && ((ioc_pend_data_len >=
  4573. mrioc->io_throttle_high) ||
  4574. (tg_pend_data_len >= tg->high))) {
  4575. tg->io_divert = 1;
  4576. tg->need_qd_reduction = 1;
  4577. mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc,
  4578. tg, 1);
  4579. mpi3mr_queue_qd_reduction_event(mrioc, tg);
  4580. }
  4581. } else {
  4582. ioc_pend_data_len = atomic_add_return(data_len_blks,
  4583. &mrioc->pend_large_data_sz);
  4584. if (ioc_pend_data_len >= mrioc->io_throttle_high)
  4585. stgt_priv_data->io_divert = 1;
  4586. }
  4587. }
  4588. if (stgt_priv_data->io_divert) {
  4589. scsiio_req->msg_flags |=
  4590. MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
  4591. scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
  4592. }
  4593. scsiio_req->flags |= cpu_to_le32(scsiio_flags);
  4594. if (mpi3mr_op_request_post(mrioc, op_req_q,
  4595. scmd_priv_data->mpi3mr_scsiio_req)) {
  4596. mpi3mr_clear_scmd_priv(mrioc, scmd);
  4597. retval = SCSI_MLQUEUE_HOST_BUSY;
  4598. if (tracked_io_sz) {
  4599. atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz);
  4600. if (tg)
  4601. atomic_sub(tracked_io_sz,
  4602. &tg->pend_large_data_sz);
  4603. }
  4604. goto out;
  4605. }
  4606. out:
  4607. return retval;
  4608. }
  4609. static const struct scsi_host_template mpi3mr_driver_template = {
  4610. .module = THIS_MODULE,
  4611. .name = "MPI3 Storage Controller",
  4612. .proc_name = MPI3MR_DRIVER_NAME,
  4613. .queuecommand = mpi3mr_qcmd,
  4614. .target_alloc = mpi3mr_target_alloc,
  4615. .slave_alloc = mpi3mr_slave_alloc,
  4616. .device_configure = mpi3mr_device_configure,
  4617. .target_destroy = mpi3mr_target_destroy,
  4618. .slave_destroy = mpi3mr_slave_destroy,
  4619. .scan_finished = mpi3mr_scan_finished,
  4620. .scan_start = mpi3mr_scan_start,
  4621. .change_queue_depth = mpi3mr_change_queue_depth,
  4622. .eh_device_reset_handler = mpi3mr_eh_dev_reset,
  4623. .eh_target_reset_handler = mpi3mr_eh_target_reset,
  4624. .eh_bus_reset_handler = mpi3mr_eh_bus_reset,
  4625. .eh_host_reset_handler = mpi3mr_eh_host_reset,
  4626. .bios_param = mpi3mr_bios_param,
  4627. .map_queues = mpi3mr_map_queues,
  4628. .mq_poll = mpi3mr_blk_mq_poll,
  4629. .no_write_same = 1,
  4630. .can_queue = 1,
  4631. .this_id = -1,
  4632. .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES,
  4633. /* max xfer supported is 1M (2K in 512 byte sized sectors)
  4634. */
  4635. .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512),
  4636. .cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
  4637. .max_segment_size = 0xffffffff,
  4638. .track_queue_depth = 1,
  4639. .cmd_size = sizeof(struct scmd_priv),
  4640. .shost_groups = mpi3mr_host_groups,
  4641. .sdev_groups = mpi3mr_dev_groups,
  4642. };
  4643. /**
  4644. * mpi3mr_init_drv_cmd - Initialize internal command tracker
  4645. * @cmdptr: Internal command tracker
  4646. * @host_tag: Host tag used for the specific command
  4647. *
  4648. * Initialize the internal command tracker structure with
  4649. * specified host tag.
  4650. *
  4651. * Return: Nothing.
  4652. */
  4653. static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr,
  4654. u16 host_tag)
  4655. {
  4656. mutex_init(&cmdptr->mutex);
  4657. cmdptr->reply = NULL;
  4658. cmdptr->state = MPI3MR_CMD_NOTUSED;
  4659. cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  4660. cmdptr->host_tag = host_tag;
  4661. }
  4662. /**
  4663. * osintfc_mrioc_security_status -Check controller secure status
  4664. * @pdev: PCI device instance
  4665. *
  4666. * Read the Device Serial Number capability from PCI config
  4667. * space and decide whether the controller is secure or not.
  4668. *
  4669. * Return: 0 on success, non-zero on failure.
  4670. */
  4671. static int
  4672. osintfc_mrioc_security_status(struct pci_dev *pdev)
  4673. {
  4674. u32 cap_data;
  4675. int base;
  4676. u32 ctlr_status;
  4677. u32 debug_status;
  4678. int retval = 0;
  4679. base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
  4680. if (!base) {
  4681. dev_err(&pdev->dev,
  4682. "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__);
  4683. return -1;
  4684. }
  4685. pci_read_config_dword(pdev, base + 4, &cap_data);
  4686. debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK;
  4687. ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK;
  4688. switch (ctlr_status) {
  4689. case MPI3MR_INVALID_DEVICE:
  4690. dev_err(&pdev->dev,
  4691. "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
  4692. __func__, pdev->device, pdev->subsystem_vendor,
  4693. pdev->subsystem_device);
  4694. retval = -1;
  4695. break;
  4696. case MPI3MR_CONFIG_SECURE_DEVICE:
  4697. if (!debug_status)
  4698. dev_info(&pdev->dev,
  4699. "%s: Config secure ctlr is detected\n",
  4700. __func__);
  4701. break;
  4702. case MPI3MR_HARD_SECURE_DEVICE:
  4703. break;
  4704. case MPI3MR_TAMPERED_DEVICE:
  4705. dev_err(&pdev->dev,
  4706. "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
  4707. __func__, pdev->device, pdev->subsystem_vendor,
  4708. pdev->subsystem_device);
  4709. retval = -1;
  4710. break;
  4711. default:
  4712. retval = -1;
  4713. break;
  4714. }
  4715. if (!retval && debug_status) {
  4716. dev_err(&pdev->dev,
  4717. "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
  4718. __func__, pdev->device, pdev->subsystem_vendor,
  4719. pdev->subsystem_device);
  4720. retval = -1;
  4721. }
  4722. return retval;
  4723. }
  4724. /**
  4725. * mpi3mr_probe - PCI probe callback
  4726. * @pdev: PCI device instance
  4727. * @id: PCI device ID details
  4728. *
  4729. * controller initialization routine. Checks the security status
  4730. * of the controller and if it is invalid or tampered return the
  4731. * probe without initializing the controller. Otherwise,
  4732. * allocate per adapter instance through shost_priv and
  4733. * initialize controller specific data structures, initializae
  4734. * the controller hardware, add shost to the SCSI subsystem.
  4735. *
  4736. * Return: 0 on success, non-zero on failure.
  4737. */
  4738. static int
  4739. mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  4740. {
  4741. struct mpi3mr_ioc *mrioc = NULL;
  4742. struct Scsi_Host *shost = NULL;
  4743. int retval = 0, i;
  4744. if (osintfc_mrioc_security_status(pdev)) {
  4745. warn_non_secure_ctlr = 1;
  4746. return 1; /* For Invalid and Tampered device */
  4747. }
  4748. shost = scsi_host_alloc(&mpi3mr_driver_template,
  4749. sizeof(struct mpi3mr_ioc));
  4750. if (!shost) {
  4751. retval = -ENODEV;
  4752. goto shost_failed;
  4753. }
  4754. mrioc = shost_priv(shost);
  4755. retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL);
  4756. if (retval < 0)
  4757. goto id_alloc_failed;
  4758. mrioc->id = (u8)retval;
  4759. sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
  4760. sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
  4761. INIT_LIST_HEAD(&mrioc->list);
  4762. spin_lock(&mrioc_list_lock);
  4763. list_add_tail(&mrioc->list, &mrioc_list);
  4764. spin_unlock(&mrioc_list_lock);
  4765. spin_lock_init(&mrioc->admin_req_lock);
  4766. spin_lock_init(&mrioc->reply_free_queue_lock);
  4767. spin_lock_init(&mrioc->sbq_lock);
  4768. spin_lock_init(&mrioc->fwevt_lock);
  4769. spin_lock_init(&mrioc->tgtdev_lock);
  4770. spin_lock_init(&mrioc->watchdog_lock);
  4771. spin_lock_init(&mrioc->chain_buf_lock);
  4772. spin_lock_init(&mrioc->sas_node_lock);
  4773. spin_lock_init(&mrioc->trigger_lock);
  4774. INIT_LIST_HEAD(&mrioc->fwevt_list);
  4775. INIT_LIST_HEAD(&mrioc->tgtdev_list);
  4776. INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
  4777. INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
  4778. INIT_LIST_HEAD(&mrioc->sas_expander_list);
  4779. INIT_LIST_HEAD(&mrioc->hba_port_table_list);
  4780. INIT_LIST_HEAD(&mrioc->enclosure_list);
  4781. mutex_init(&mrioc->reset_mutex);
  4782. mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
  4783. mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
  4784. mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS);
  4785. mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS);
  4786. mpi3mr_init_drv_cmd(&mrioc->transport_cmds,
  4787. MPI3MR_HOSTTAG_TRANSPORT_CMDS);
  4788. for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
  4789. mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
  4790. MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
  4791. for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
  4792. mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i],
  4793. MPI3MR_HOSTTAG_EVTACKCMD_MIN + i);
  4794. if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
  4795. !pdev->revision)
  4796. mrioc->enable_segqueue = false;
  4797. else
  4798. mrioc->enable_segqueue = true;
  4799. init_waitqueue_head(&mrioc->reset_waitq);
  4800. mrioc->logging_level = logging_level;
  4801. mrioc->shost = shost;
  4802. mrioc->pdev = pdev;
  4803. mrioc->stop_bsgs = 1;
  4804. mrioc->max_sgl_entries = max_sgl_entries;
  4805. if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES)
  4806. mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES;
  4807. else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES)
  4808. mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
  4809. else {
  4810. mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES;
  4811. mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES;
  4812. }
  4813. /* init shost parameters */
  4814. shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
  4815. shost->max_lun = -1;
  4816. shost->unique_id = mrioc->id;
  4817. shost->max_channel = 0;
  4818. shost->max_id = 0xFFFFFFFF;
  4819. shost->host_tagset = 1;
  4820. if (prot_mask >= 0)
  4821. scsi_host_set_prot(shost, prot_mask);
  4822. else {
  4823. prot_mask = SHOST_DIF_TYPE1_PROTECTION
  4824. | SHOST_DIF_TYPE2_PROTECTION
  4825. | SHOST_DIF_TYPE3_PROTECTION;
  4826. scsi_host_set_prot(shost, prot_mask);
  4827. }
  4828. ioc_info(mrioc,
  4829. "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n",
  4830. __func__,
  4831. (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
  4832. (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
  4833. (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
  4834. (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
  4835. (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
  4836. (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
  4837. (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
  4838. if (prot_guard_mask)
  4839. scsi_host_set_guard(shost, (prot_guard_mask & 3));
  4840. else
  4841. scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
  4842. mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
  4843. "%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id);
  4844. if (!mrioc->fwevt_worker_thread) {
  4845. ioc_err(mrioc, "failure at %s:%d/%s()!\n",
  4846. __FILE__, __LINE__, __func__);
  4847. retval = -ENODEV;
  4848. goto fwevtthread_failed;
  4849. }
  4850. mrioc->is_driver_loading = 1;
  4851. mrioc->cpu_count = num_online_cpus();
  4852. if (mpi3mr_setup_resources(mrioc)) {
  4853. ioc_err(mrioc, "setup resources failed\n");
  4854. retval = -ENODEV;
  4855. goto resource_alloc_failed;
  4856. }
  4857. if (mpi3mr_init_ioc(mrioc)) {
  4858. ioc_err(mrioc, "initializing IOC failed\n");
  4859. retval = -ENODEV;
  4860. goto init_ioc_failed;
  4861. }
  4862. shost->nr_hw_queues = mrioc->num_op_reply_q;
  4863. if (mrioc->active_poll_qcount)
  4864. shost->nr_maps = 3;
  4865. shost->can_queue = mrioc->max_host_ios;
  4866. shost->sg_tablesize = mrioc->max_sgl_entries;
  4867. shost->max_id = mrioc->facts.max_perids + 1;
  4868. retval = scsi_add_host(shost, &pdev->dev);
  4869. if (retval) {
  4870. ioc_err(mrioc, "failure at %s:%d/%s()!\n",
  4871. __FILE__, __LINE__, __func__);
  4872. goto addhost_failed;
  4873. }
  4874. scsi_scan_host(shost);
  4875. mpi3mr_bsg_init(mrioc);
  4876. return retval;
  4877. addhost_failed:
  4878. mpi3mr_stop_watchdog(mrioc);
  4879. mpi3mr_cleanup_ioc(mrioc);
  4880. init_ioc_failed:
  4881. mpi3mr_free_mem(mrioc);
  4882. mpi3mr_cleanup_resources(mrioc);
  4883. resource_alloc_failed:
  4884. destroy_workqueue(mrioc->fwevt_worker_thread);
  4885. fwevtthread_failed:
  4886. ida_free(&mrioc_ida, mrioc->id);
  4887. spin_lock(&mrioc_list_lock);
  4888. list_del(&mrioc->list);
  4889. spin_unlock(&mrioc_list_lock);
  4890. id_alloc_failed:
  4891. scsi_host_put(shost);
  4892. shost_failed:
  4893. return retval;
  4894. }
  4895. /**
  4896. * mpi3mr_remove - PCI remove callback
  4897. * @pdev: PCI device instance
  4898. *
  4899. * Cleanup the IOC by issuing MUR and shutdown notification.
  4900. * Free up all memory and resources associated with the
  4901. * controllerand target devices, unregister the shost.
  4902. *
  4903. * Return: Nothing.
  4904. */
  4905. static void mpi3mr_remove(struct pci_dev *pdev)
  4906. {
  4907. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  4908. struct mpi3mr_ioc *mrioc;
  4909. struct workqueue_struct *wq;
  4910. unsigned long flags;
  4911. struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
  4912. struct mpi3mr_hba_port *port, *hba_port_next;
  4913. struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
  4914. if (!shost)
  4915. return;
  4916. mrioc = shost_priv(shost);
  4917. while (mrioc->reset_in_progress || mrioc->is_driver_loading)
  4918. ssleep(1);
  4919. if (mrioc->block_on_pci_err) {
  4920. mrioc->block_on_pci_err = false;
  4921. scsi_unblock_requests(shost);
  4922. mrioc->unrecoverable = 1;
  4923. }
  4924. if (!pci_device_is_present(mrioc->pdev) ||
  4925. mrioc->pci_err_recovery) {
  4926. mrioc->unrecoverable = 1;
  4927. mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
  4928. }
  4929. mpi3mr_bsg_exit(mrioc);
  4930. mrioc->stop_drv_processing = 1;
  4931. mpi3mr_cleanup_fwevt_list(mrioc);
  4932. spin_lock_irqsave(&mrioc->fwevt_lock, flags);
  4933. wq = mrioc->fwevt_worker_thread;
  4934. mrioc->fwevt_worker_thread = NULL;
  4935. spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
  4936. if (wq)
  4937. destroy_workqueue(wq);
  4938. if (mrioc->sas_transport_enabled)
  4939. sas_remove_host(shost);
  4940. else
  4941. scsi_remove_host(shost);
  4942. list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
  4943. list) {
  4944. mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
  4945. mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
  4946. mpi3mr_tgtdev_put(tgtdev);
  4947. }
  4948. mpi3mr_stop_watchdog(mrioc);
  4949. mpi3mr_cleanup_ioc(mrioc);
  4950. mpi3mr_free_mem(mrioc);
  4951. mpi3mr_cleanup_resources(mrioc);
  4952. spin_lock_irqsave(&mrioc->sas_node_lock, flags);
  4953. list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
  4954. &mrioc->sas_expander_list, list) {
  4955. spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
  4956. mpi3mr_expander_node_remove(mrioc, sas_expander);
  4957. spin_lock_irqsave(&mrioc->sas_node_lock, flags);
  4958. }
  4959. list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
  4960. ioc_info(mrioc,
  4961. "removing hba_port entry: %p port: %d from hba_port list\n",
  4962. port, port->port_id);
  4963. list_del(&port->list);
  4964. kfree(port);
  4965. }
  4966. spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
  4967. if (mrioc->sas_hba.num_phys) {
  4968. kfree(mrioc->sas_hba.phy);
  4969. mrioc->sas_hba.phy = NULL;
  4970. mrioc->sas_hba.num_phys = 0;
  4971. }
  4972. ida_free(&mrioc_ida, mrioc->id);
  4973. spin_lock(&mrioc_list_lock);
  4974. list_del(&mrioc->list);
  4975. spin_unlock(&mrioc_list_lock);
  4976. scsi_host_put(shost);
  4977. }
  4978. /**
  4979. * mpi3mr_shutdown - PCI shutdown callback
  4980. * @pdev: PCI device instance
  4981. *
  4982. * Free up all memory and resources associated with the
  4983. * controller
  4984. *
  4985. * Return: Nothing.
  4986. */
  4987. static void mpi3mr_shutdown(struct pci_dev *pdev)
  4988. {
  4989. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  4990. struct mpi3mr_ioc *mrioc;
  4991. struct workqueue_struct *wq;
  4992. unsigned long flags;
  4993. if (!shost)
  4994. return;
  4995. mrioc = shost_priv(shost);
  4996. while (mrioc->reset_in_progress || mrioc->is_driver_loading)
  4997. ssleep(1);
  4998. mrioc->stop_drv_processing = 1;
  4999. mpi3mr_cleanup_fwevt_list(mrioc);
  5000. spin_lock_irqsave(&mrioc->fwevt_lock, flags);
  5001. wq = mrioc->fwevt_worker_thread;
  5002. mrioc->fwevt_worker_thread = NULL;
  5003. spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
  5004. if (wq)
  5005. destroy_workqueue(wq);
  5006. mpi3mr_stop_watchdog(mrioc);
  5007. mpi3mr_cleanup_ioc(mrioc);
  5008. mpi3mr_cleanup_resources(mrioc);
  5009. }
  5010. /**
  5011. * mpi3mr_suspend - PCI power management suspend callback
  5012. * @dev: Device struct
  5013. *
  5014. * Change the power state to the given value and cleanup the IOC
  5015. * by issuing MUR and shutdown notification
  5016. *
  5017. * Return: 0 always.
  5018. */
  5019. static int __maybe_unused
  5020. mpi3mr_suspend(struct device *dev)
  5021. {
  5022. struct pci_dev *pdev = to_pci_dev(dev);
  5023. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  5024. struct mpi3mr_ioc *mrioc;
  5025. if (!shost)
  5026. return 0;
  5027. mrioc = shost_priv(shost);
  5028. while (mrioc->reset_in_progress || mrioc->is_driver_loading)
  5029. ssleep(1);
  5030. mrioc->stop_drv_processing = 1;
  5031. mpi3mr_cleanup_fwevt_list(mrioc);
  5032. scsi_block_requests(shost);
  5033. mpi3mr_stop_watchdog(mrioc);
  5034. mpi3mr_cleanup_ioc(mrioc);
  5035. ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n",
  5036. pdev, pci_name(pdev));
  5037. mpi3mr_cleanup_resources(mrioc);
  5038. return 0;
  5039. }
  5040. /**
  5041. * mpi3mr_resume - PCI power management resume callback
  5042. * @dev: Device struct
  5043. *
  5044. * Restore the power state to D0 and reinitialize the controller
  5045. * and resume I/O operations to the target devices
  5046. *
  5047. * Return: 0 on success, non-zero on failure
  5048. */
  5049. static int __maybe_unused
  5050. mpi3mr_resume(struct device *dev)
  5051. {
  5052. struct pci_dev *pdev = to_pci_dev(dev);
  5053. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  5054. struct mpi3mr_ioc *mrioc;
  5055. pci_power_t device_state = pdev->current_state;
  5056. int r;
  5057. if (!shost)
  5058. return 0;
  5059. mrioc = shost_priv(shost);
  5060. ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
  5061. pdev, pci_name(pdev), device_state);
  5062. mrioc->pdev = pdev;
  5063. mrioc->cpu_count = num_online_cpus();
  5064. r = mpi3mr_setup_resources(mrioc);
  5065. if (r) {
  5066. ioc_info(mrioc, "%s: Setup resources failed[%d]\n",
  5067. __func__, r);
  5068. return r;
  5069. }
  5070. mrioc->stop_drv_processing = 0;
  5071. mpi3mr_invalidate_devhandles(mrioc);
  5072. mpi3mr_free_enclosure_list(mrioc);
  5073. mpi3mr_memset_buffers(mrioc);
  5074. r = mpi3mr_reinit_ioc(mrioc, 1);
  5075. if (r) {
  5076. ioc_err(mrioc, "resuming controller failed[%d]\n", r);
  5077. return r;
  5078. }
  5079. ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
  5080. scsi_unblock_requests(shost);
  5081. mrioc->device_refresh_on = 0;
  5082. mpi3mr_start_watchdog(mrioc);
  5083. return 0;
  5084. }
  5085. /**
  5086. * mpi3mr_pcierr_error_detected - PCI error detected callback
  5087. * @pdev: PCI device instance
  5088. * @state: channel state
  5089. *
  5090. * This function is called by the PCI error recovery driver and
  5091. * based on the state passed the driver decides what actions to
  5092. * be recommended back to PCI driver.
  5093. *
  5094. * For all of the states if there is no valid mrioc or scsi host
  5095. * references in the PCI device then this function will return
  5096. * the result as disconnect.
  5097. *
  5098. * For normal state, this function will return the result as can
  5099. * recover.
  5100. *
  5101. * For frozen state, this function will block for any pending
  5102. * controller initialization or re-initialization to complete,
  5103. * stop any new interactions with the controller and return
  5104. * status as reset required.
  5105. *
  5106. * For permanent failure state, this function will mark the
  5107. * controller as unrecoverable and return status as disconnect.
  5108. *
  5109. * Returns: PCI_ERS_RESULT_NEED_RESET or CAN_RECOVER or
  5110. * DISCONNECT based on the controller state.
  5111. */
  5112. static pci_ers_result_t
  5113. mpi3mr_pcierr_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
  5114. {
  5115. struct Scsi_Host *shost;
  5116. struct mpi3mr_ioc *mrioc;
  5117. unsigned int timeout = MPI3MR_RESET_TIMEOUT;
  5118. dev_info(&pdev->dev, "%s: callback invoked state(%d)\n", __func__,
  5119. state);
  5120. shost = pci_get_drvdata(pdev);
  5121. mrioc = shost_priv(shost);
  5122. switch (state) {
  5123. case pci_channel_io_normal:
  5124. return PCI_ERS_RESULT_CAN_RECOVER;
  5125. case pci_channel_io_frozen:
  5126. mrioc->pci_err_recovery = true;
  5127. mrioc->block_on_pci_err = true;
  5128. do {
  5129. if (mrioc->reset_in_progress || mrioc->is_driver_loading)
  5130. ssleep(1);
  5131. else
  5132. break;
  5133. } while (--timeout);
  5134. if (!timeout) {
  5135. mrioc->pci_err_recovery = true;
  5136. mrioc->block_on_pci_err = true;
  5137. mrioc->unrecoverable = 1;
  5138. mpi3mr_stop_watchdog(mrioc);
  5139. mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
  5140. return PCI_ERS_RESULT_DISCONNECT;
  5141. }
  5142. scsi_block_requests(mrioc->shost);
  5143. mpi3mr_stop_watchdog(mrioc);
  5144. mpi3mr_cleanup_resources(mrioc);
  5145. return PCI_ERS_RESULT_NEED_RESET;
  5146. case pci_channel_io_perm_failure:
  5147. mrioc->pci_err_recovery = true;
  5148. mrioc->block_on_pci_err = true;
  5149. mrioc->unrecoverable = 1;
  5150. mpi3mr_stop_watchdog(mrioc);
  5151. mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
  5152. return PCI_ERS_RESULT_DISCONNECT;
  5153. default:
  5154. return PCI_ERS_RESULT_DISCONNECT;
  5155. }
  5156. }
  5157. /**
  5158. * mpi3mr_pcierr_slot_reset - Post slot reset callback
  5159. * @pdev: PCI device instance
  5160. *
  5161. * This function is called by the PCI error recovery driver
  5162. * after a slot or link reset issued by it for the recovery, the
  5163. * driver is expected to bring back the controller and
  5164. * initialize it.
  5165. *
  5166. * This function restores PCI state and reinitializes controller
  5167. * resources and the controller, this blocks for any pending
  5168. * reset to complete.
  5169. *
  5170. * Returns: PCI_ERS_RESULT_DISCONNECT on failure or
  5171. * PCI_ERS_RESULT_RECOVERED
  5172. */
  5173. static pci_ers_result_t mpi3mr_pcierr_slot_reset(struct pci_dev *pdev)
  5174. {
  5175. struct Scsi_Host *shost;
  5176. struct mpi3mr_ioc *mrioc;
  5177. unsigned int timeout = MPI3MR_RESET_TIMEOUT;
  5178. dev_info(&pdev->dev, "%s: callback invoked\n", __func__);
  5179. shost = pci_get_drvdata(pdev);
  5180. mrioc = shost_priv(shost);
  5181. do {
  5182. if (mrioc->reset_in_progress)
  5183. ssleep(1);
  5184. else
  5185. break;
  5186. } while (--timeout);
  5187. if (!timeout)
  5188. goto out_failed;
  5189. pci_restore_state(pdev);
  5190. if (mpi3mr_setup_resources(mrioc)) {
  5191. ioc_err(mrioc, "setup resources failed\n");
  5192. goto out_failed;
  5193. }
  5194. mrioc->unrecoverable = 0;
  5195. mrioc->pci_err_recovery = false;
  5196. if (mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0))
  5197. goto out_failed;
  5198. return PCI_ERS_RESULT_RECOVERED;
  5199. out_failed:
  5200. mrioc->unrecoverable = 1;
  5201. mrioc->block_on_pci_err = false;
  5202. scsi_unblock_requests(shost);
  5203. mpi3mr_start_watchdog(mrioc);
  5204. return PCI_ERS_RESULT_DISCONNECT;
  5205. }
  5206. /**
  5207. * mpi3mr_pcierr_resume - PCI error recovery resume
  5208. * callback
  5209. * @pdev: PCI device instance
  5210. *
  5211. * This function enables all I/O and IOCTLs post reset issued as
  5212. * part of the PCI error recovery
  5213. *
  5214. * Return: Nothing.
  5215. */
  5216. static void mpi3mr_pcierr_resume(struct pci_dev *pdev)
  5217. {
  5218. struct Scsi_Host *shost;
  5219. struct mpi3mr_ioc *mrioc;
  5220. dev_info(&pdev->dev, "%s: callback invoked\n", __func__);
  5221. shost = pci_get_drvdata(pdev);
  5222. mrioc = shost_priv(shost);
  5223. if (mrioc->block_on_pci_err) {
  5224. mrioc->block_on_pci_err = false;
  5225. scsi_unblock_requests(shost);
  5226. mpi3mr_start_watchdog(mrioc);
  5227. }
  5228. }
  5229. /**
  5230. * mpi3mr_pcierr_mmio_enabled - PCI error recovery callback
  5231. * @pdev: PCI device instance
  5232. *
  5233. * This is called only if mpi3mr_pcierr_error_detected returns
  5234. * PCI_ERS_RESULT_CAN_RECOVER.
  5235. *
  5236. * Return: PCI_ERS_RESULT_DISCONNECT when the controller is
  5237. * unrecoverable or when the shost/mrioc reference cannot be
  5238. * found, else return PCI_ERS_RESULT_RECOVERED
  5239. */
  5240. static pci_ers_result_t mpi3mr_pcierr_mmio_enabled(struct pci_dev *pdev)
  5241. {
  5242. struct Scsi_Host *shost;
  5243. struct mpi3mr_ioc *mrioc;
  5244. dev_info(&pdev->dev, "%s: callback invoked\n", __func__);
  5245. shost = pci_get_drvdata(pdev);
  5246. mrioc = shost_priv(shost);
  5247. if (mrioc->unrecoverable)
  5248. return PCI_ERS_RESULT_DISCONNECT;
  5249. return PCI_ERS_RESULT_RECOVERED;
  5250. }
  5251. static const struct pci_device_id mpi3mr_pci_id_table[] = {
  5252. {
  5253. PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
  5254. MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
  5255. },
  5256. {
  5257. PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
  5258. MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID)
  5259. },
  5260. {
  5261. PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
  5262. MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID)
  5263. },
  5264. { 0 }
  5265. };
  5266. MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
  5267. static struct pci_error_handlers mpi3mr_err_handler = {
  5268. .error_detected = mpi3mr_pcierr_error_detected,
  5269. .mmio_enabled = mpi3mr_pcierr_mmio_enabled,
  5270. .slot_reset = mpi3mr_pcierr_slot_reset,
  5271. .resume = mpi3mr_pcierr_resume,
  5272. };
  5273. static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume);
  5274. static struct pci_driver mpi3mr_pci_driver = {
  5275. .name = MPI3MR_DRIVER_NAME,
  5276. .id_table = mpi3mr_pci_id_table,
  5277. .probe = mpi3mr_probe,
  5278. .remove = mpi3mr_remove,
  5279. .shutdown = mpi3mr_shutdown,
  5280. .err_handler = &mpi3mr_err_handler,
  5281. .driver.pm = &mpi3mr_pm_ops,
  5282. };
  5283. static ssize_t event_counter_show(struct device_driver *dd, char *buf)
  5284. {
  5285. return sprintf(buf, "%llu\n", atomic64_read(&event_counter));
  5286. }
  5287. static DRIVER_ATTR_RO(event_counter);
  5288. static int __init mpi3mr_init(void)
  5289. {
  5290. int ret_val;
  5291. pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
  5292. MPI3MR_DRIVER_VERSION);
  5293. mpi3mr_transport_template =
  5294. sas_attach_transport(&mpi3mr_transport_functions);
  5295. if (!mpi3mr_transport_template) {
  5296. pr_err("%s failed to load due to sas transport attach failure\n",
  5297. MPI3MR_DRIVER_NAME);
  5298. return -ENODEV;
  5299. }
  5300. ret_val = pci_register_driver(&mpi3mr_pci_driver);
  5301. if (ret_val) {
  5302. pr_err("%s failed to load due to pci register driver failure\n",
  5303. MPI3MR_DRIVER_NAME);
  5304. goto err_pci_reg_fail;
  5305. }
  5306. ret_val = driver_create_file(&mpi3mr_pci_driver.driver,
  5307. &driver_attr_event_counter);
  5308. if (ret_val)
  5309. goto err_event_counter;
  5310. return ret_val;
  5311. err_event_counter:
  5312. pci_unregister_driver(&mpi3mr_pci_driver);
  5313. err_pci_reg_fail:
  5314. sas_release_transport(mpi3mr_transport_template);
  5315. return ret_val;
  5316. }
  5317. static void __exit mpi3mr_exit(void)
  5318. {
  5319. if (warn_non_secure_ctlr)
  5320. pr_warn(
  5321. "Unloading %s version %s while managing a non secure controller\n",
  5322. MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION);
  5323. else
  5324. pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
  5325. MPI3MR_DRIVER_VERSION);
  5326. driver_remove_file(&mpi3mr_pci_driver.driver,
  5327. &driver_attr_event_counter);
  5328. pci_unregister_driver(&mpi3mr_pci_driver);
  5329. sas_release_transport(mpi3mr_transport_template);
  5330. ida_destroy(&mrioc_ida);
  5331. }
  5332. module_init(mpi3mr_init);
  5333. module_exit(mpi3mr_exit);