lpfc_sli.c 685 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653186541865518656186571865818659186601866118662186631866418665186661866718668186691867018671186721867318674186751867618677186781867918680186811868218683186841868518686186871868818689186901869118692186931869418695186961869718698186991870018701187021870318704187051870618707187081870918710187111871218713187141871518716187171871818719187201872118722187231872418725187261872718728187291873018731187321873318734187351873618737187381873918740187411874218743187441874518746187471874818749187501875118752187531875418755187561875718758187591876018761187621876318764187651876618767187681876918770187711877218773187741877518776187771877818779187801878118782187831878418785187861878718788187891879018791187921879318794187951879618797187981879918800188011880218803188041880518806188071880818809188101881118812188131881418815188161881718818188191882018821188221882318824188251882618827188281882918830188311883218833188341883518836188371883818839188401884118842188431884418845188461884718848188491885018851188521885318854188551885618857188581885918860188611886218863188641886518866188671886818869188701887118872188731887418875188761887718878188791888018881188821888318884188851888618887188881888918890188911889218893188941889518896188971889818899189001890118902189031890418905189061890718908189091891018911189121891318914189151891618917189181891918920189211892218923189241892518926189271892818929189301893118932189331893418935189361893718938189391894018941189421894318944189451894618947189481894918950189511895218953189541895518956189571895818959189601896118962189631896418965189661896718968189691897018971189721897318974189751897618977189781897918980189811898218983189841898518986189871898818989189901899118992189931899418995189961899718998189991900019001190021900319004190051900619007190081900919010190111901219013190141901519016190171901819019190201902119022190231902419025190261902719028190291903019031190321903319034190351903619037190381903919040190411904219043190441904519046190471904819049190501905119052190531905419055190561905719058190591906019061190621906319064190651906619067190681906919070190711907219073190741907519076190771907819079190801908119082190831908419085190861908719088190891909019091190921909319094190951909619097190981909919100191011910219103191041910519106191071910819109191101911119112191131911419115191161911719118191191912019121191221912319124191251912619127191281912919130191311913219133191341913519136191371913819139191401914119142191431914419145191461914719148191491915019151191521915319154191551915619157191581915919160191611916219163191641916519166191671916819169191701917119172191731917419175191761917719178191791918019181191821918319184191851918619187191881918919190191911919219193191941919519196191971919819199192001920119202192031920419205192061920719208192091921019211192121921319214192151921619217192181921919220192211922219223192241922519226192271922819229192301923119232192331923419235192361923719238192391924019241192421924319244192451924619247192481924919250192511925219253192541925519256192571925819259192601926119262192631926419265192661926719268192691927019271192721927319274192751927619277192781927919280192811928219283192841928519286192871928819289192901929119292192931929419295192961929719298192991930019301193021930319304193051930619307193081930919310193111931219313193141931519316193171931819319193201932119322193231932419325193261932719328193291933019331193321933319334193351933619337193381933919340193411934219343193441934519346193471934819349193501935119352193531935419355193561935719358193591936019361193621936319364193651936619367193681936919370193711937219373193741937519376193771937819379193801938119382193831938419385193861938719388193891939019391193921939319394193951939619397193981939919400194011940219403194041940519406194071940819409194101941119412194131941419415194161941719418194191942019421194221942319424194251942619427194281942919430194311943219433194341943519436194371943819439194401944119442194431944419445194461944719448194491945019451194521945319454194551945619457194581945919460194611946219463194641946519466194671946819469194701947119472194731947419475194761947719478194791948019481194821948319484194851948619487194881948919490194911949219493194941949519496194971949819499195001950119502195031950419505195061950719508195091951019511195121951319514195151951619517195181951919520195211952219523195241952519526195271952819529195301953119532195331953419535195361953719538195391954019541195421954319544195451954619547195481954919550195511955219553195541955519556195571955819559195601956119562195631956419565195661956719568195691957019571195721957319574195751957619577195781957919580195811958219583195841958519586195871958819589195901959119592195931959419595195961959719598195991960019601196021960319604196051960619607196081960919610196111961219613196141961519616196171961819619196201962119622196231962419625196261962719628196291963019631196321963319634196351963619637196381963919640196411964219643196441964519646196471964819649196501965119652196531965419655196561965719658196591966019661196621966319664196651966619667196681966919670196711967219673196741967519676196771967819679196801968119682196831968419685196861968719688196891969019691196921969319694196951969619697196981969919700197011970219703197041970519706197071970819709197101971119712197131971419715197161971719718197191972019721197221972319724197251972619727197281972919730197311973219733197341973519736197371973819739197401974119742197431974419745197461974719748197491975019751197521975319754197551975619757197581975919760197611976219763197641976519766197671976819769197701977119772197731977419775197761977719778197791978019781197821978319784197851978619787197881978919790197911979219793197941979519796197971979819799198001980119802198031980419805198061980719808198091981019811198121981319814198151981619817198181981919820198211982219823198241982519826198271982819829198301983119832198331983419835198361983719838198391984019841198421984319844198451984619847198481984919850198511985219853198541985519856198571985819859198601986119862198631986419865198661986719868198691987019871198721987319874198751987619877198781987919880198811988219883198841988519886198871988819889198901989119892198931989419895198961989719898198991990019901199021990319904199051990619907199081990919910199111991219913199141991519916199171991819919199201992119922199231992419925199261992719928199291993019931199321993319934199351993619937199381993919940199411994219943199441994519946199471994819949199501995119952199531995419955199561995719958199591996019961199621996319964199651996619967199681996919970199711997219973199741997519976199771997819979199801998119982199831998419985199861998719988199891999019991199921999319994199951999619997199981999920000200012000220003200042000520006200072000820009200102001120012200132001420015200162001720018200192002020021200222002320024200252002620027200282002920030200312003220033200342003520036200372003820039200402004120042200432004420045200462004720048200492005020051200522005320054200552005620057200582005920060200612006220063200642006520066200672006820069200702007120072200732007420075200762007720078200792008020081200822008320084200852008620087200882008920090200912009220093200942009520096200972009820099201002010120102201032010420105201062010720108201092011020111201122011320114201152011620117201182011920120201212012220123201242012520126201272012820129201302013120132201332013420135201362013720138201392014020141201422014320144201452014620147201482014920150201512015220153201542015520156201572015820159201602016120162201632016420165201662016720168201692017020171201722017320174201752017620177201782017920180201812018220183201842018520186201872018820189201902019120192201932019420195201962019720198201992020020201202022020320204202052020620207202082020920210202112021220213202142021520216202172021820219202202022120222202232022420225202262022720228202292023020231202322023320234202352023620237202382023920240202412024220243202442024520246202472024820249202502025120252202532025420255202562025720258202592026020261202622026320264202652026620267202682026920270202712027220273202742027520276202772027820279202802028120282202832028420285202862028720288202892029020291202922029320294202952029620297202982029920300203012030220303203042030520306203072030820309203102031120312203132031420315203162031720318203192032020321203222032320324203252032620327203282032920330203312033220333203342033520336203372033820339203402034120342203432034420345203462034720348203492035020351203522035320354203552035620357203582035920360203612036220363203642036520366203672036820369203702037120372203732037420375203762037720378203792038020381203822038320384203852038620387203882038920390203912039220393203942039520396203972039820399204002040120402204032040420405204062040720408204092041020411204122041320414204152041620417204182041920420204212042220423204242042520426204272042820429204302043120432204332043420435204362043720438204392044020441204422044320444204452044620447204482044920450204512045220453204542045520456204572045820459204602046120462204632046420465204662046720468204692047020471204722047320474204752047620477204782047920480204812048220483204842048520486204872048820489204902049120492204932049420495204962049720498204992050020501205022050320504205052050620507205082050920510205112051220513205142051520516205172051820519205202052120522205232052420525205262052720528205292053020531205322053320534205352053620537205382053920540205412054220543205442054520546205472054820549205502055120552205532055420555205562055720558205592056020561205622056320564205652056620567205682056920570205712057220573205742057520576205772057820579205802058120582205832058420585205862058720588205892059020591205922059320594205952059620597205982059920600206012060220603206042060520606206072060820609206102061120612206132061420615206162061720618206192062020621206222062320624206252062620627206282062920630206312063220633206342063520636206372063820639206402064120642206432064420645206462064720648206492065020651206522065320654206552065620657206582065920660206612066220663206642066520666206672066820669206702067120672206732067420675206762067720678206792068020681206822068320684206852068620687206882068920690206912069220693206942069520696206972069820699207002070120702207032070420705207062070720708207092071020711207122071320714207152071620717207182071920720207212072220723207242072520726207272072820729207302073120732207332073420735207362073720738207392074020741207422074320744207452074620747207482074920750207512075220753207542075520756207572075820759207602076120762207632076420765207662076720768207692077020771207722077320774207752077620777207782077920780207812078220783207842078520786207872078820789207902079120792207932079420795207962079720798207992080020801208022080320804208052080620807208082080920810208112081220813208142081520816208172081820819208202082120822208232082420825208262082720828208292083020831208322083320834208352083620837208382083920840208412084220843208442084520846208472084820849208502085120852208532085420855208562085720858208592086020861208622086320864208652086620867208682086920870208712087220873208742087520876208772087820879208802088120882208832088420885208862088720888208892089020891208922089320894208952089620897208982089920900209012090220903209042090520906209072090820909209102091120912209132091420915209162091720918209192092020921209222092320924209252092620927209282092920930209312093220933209342093520936209372093820939209402094120942209432094420945209462094720948209492095020951209522095320954209552095620957209582095920960209612096220963209642096520966209672096820969209702097120972209732097420975209762097720978209792098020981209822098320984209852098620987209882098920990209912099220993209942099520996209972099820999210002100121002210032100421005210062100721008210092101021011210122101321014210152101621017210182101921020210212102221023210242102521026210272102821029210302103121032210332103421035210362103721038210392104021041210422104321044210452104621047210482104921050210512105221053210542105521056210572105821059210602106121062210632106421065210662106721068210692107021071210722107321074210752107621077210782107921080210812108221083210842108521086210872108821089210902109121092210932109421095210962109721098210992110021101211022110321104211052110621107211082110921110211112111221113211142111521116211172111821119211202112121122211232112421125211262112721128211292113021131211322113321134211352113621137211382113921140211412114221143211442114521146211472114821149211502115121152211532115421155211562115721158211592116021161211622116321164211652116621167211682116921170211712117221173211742117521176211772117821179211802118121182211832118421185211862118721188211892119021191211922119321194211952119621197211982119921200212012120221203212042120521206212072120821209212102121121212212132121421215212162121721218212192122021221212222122321224212252122621227212282122921230212312123221233212342123521236212372123821239212402124121242212432124421245212462124721248212492125021251212522125321254212552125621257212582125921260212612126221263212642126521266212672126821269212702127121272212732127421275212762127721278212792128021281212822128321284212852128621287212882128921290212912129221293212942129521296212972129821299213002130121302213032130421305213062130721308213092131021311213122131321314213152131621317213182131921320213212132221323213242132521326213272132821329213302133121332213332133421335213362133721338213392134021341213422134321344213452134621347213482134921350213512135221353213542135521356213572135821359213602136121362213632136421365213662136721368213692137021371213722137321374213752137621377213782137921380213812138221383213842138521386213872138821389213902139121392213932139421395213962139721398213992140021401214022140321404214052140621407214082140921410214112141221413214142141521416214172141821419214202142121422214232142421425214262142721428214292143021431214322143321434214352143621437214382143921440214412144221443214442144521446214472144821449214502145121452214532145421455214562145721458214592146021461214622146321464214652146621467214682146921470214712147221473214742147521476214772147821479214802148121482214832148421485214862148721488214892149021491214922149321494214952149621497214982149921500215012150221503215042150521506215072150821509215102151121512215132151421515215162151721518215192152021521215222152321524215252152621527215282152921530215312153221533215342153521536215372153821539215402154121542215432154421545215462154721548215492155021551215522155321554215552155621557215582155921560215612156221563215642156521566215672156821569215702157121572215732157421575215762157721578215792158021581215822158321584215852158621587215882158921590215912159221593215942159521596215972159821599216002160121602216032160421605216062160721608216092161021611216122161321614216152161621617216182161921620216212162221623216242162521626216272162821629216302163121632216332163421635216362163721638216392164021641216422164321644216452164621647216482164921650216512165221653216542165521656216572165821659216602166121662216632166421665216662166721668216692167021671216722167321674216752167621677216782167921680216812168221683216842168521686216872168821689216902169121692216932169421695216962169721698216992170021701217022170321704217052170621707217082170921710217112171221713217142171521716217172171821719217202172121722217232172421725217262172721728217292173021731217322173321734217352173621737217382173921740217412174221743217442174521746217472174821749217502175121752217532175421755217562175721758217592176021761217622176321764217652176621767217682176921770217712177221773217742177521776217772177821779217802178121782217832178421785217862178721788217892179021791217922179321794217952179621797217982179921800218012180221803218042180521806218072180821809218102181121812218132181421815218162181721818218192182021821218222182321824218252182621827218282182921830218312183221833218342183521836218372183821839218402184121842218432184421845218462184721848218492185021851218522185321854218552185621857218582185921860218612186221863218642186521866218672186821869218702187121872218732187421875218762187721878218792188021881218822188321884218852188621887218882188921890218912189221893218942189521896218972189821899219002190121902219032190421905219062190721908219092191021911219122191321914219152191621917219182191921920219212192221923219242192521926219272192821929219302193121932219332193421935219362193721938219392194021941219422194321944219452194621947219482194921950219512195221953219542195521956219572195821959219602196121962219632196421965219662196721968219692197021971219722197321974219752197621977219782197921980219812198221983219842198521986219872198821989219902199121992219932199421995219962199721998219992200022001220022200322004220052200622007220082200922010220112201222013220142201522016220172201822019220202202122022220232202422025220262202722028220292203022031220322203322034220352203622037220382203922040220412204222043220442204522046220472204822049220502205122052220532205422055220562205722058220592206022061220622206322064220652206622067220682206922070220712207222073220742207522076220772207822079220802208122082220832208422085220862208722088220892209022091220922209322094220952209622097220982209922100221012210222103221042210522106221072210822109221102211122112221132211422115221162211722118221192212022121221222212322124221252212622127221282212922130221312213222133221342213522136221372213822139221402214122142221432214422145221462214722148221492215022151221522215322154221552215622157221582215922160221612216222163221642216522166221672216822169221702217122172221732217422175221762217722178221792218022181221822218322184221852218622187221882218922190221912219222193221942219522196221972219822199222002220122202222032220422205222062220722208222092221022211222122221322214222152221622217222182221922220222212222222223222242222522226222272222822229222302223122232222332223422235222362223722238222392224022241222422224322244222452224622247222482224922250222512225222253222542225522256222572225822259222602226122262222632226422265222662226722268222692227022271222722227322274222752227622277222782227922280222812228222283222842228522286222872228822289222902229122292222932229422295222962229722298222992230022301223022230322304223052230622307223082230922310223112231222313223142231522316223172231822319223202232122322223232232422325223262232722328223292233022331223322233322334223352233622337223382233922340223412234222343223442234522346223472234822349223502235122352223532235422355223562235722358223592236022361223622236322364223652236622367223682236922370223712237222373223742237522376223772237822379223802238122382223832238422385223862238722388223892239022391223922239322394223952239622397223982239922400224012240222403224042240522406224072240822409224102241122412224132241422415224162241722418224192242022421224222242322424224252242622427224282242922430224312243222433224342243522436224372243822439224402244122442224432244422445224462244722448224492245022451224522245322454224552245622457224582245922460224612246222463224642246522466224672246822469224702247122472224732247422475224762247722478224792248022481224822248322484224852248622487224882248922490224912249222493224942249522496224972249822499225002250122502225032250422505225062250722508225092251022511225122251322514225152251622517225182251922520225212252222523225242252522526225272252822529225302253122532225332253422535225362253722538225392254022541225422254322544225452254622547225482254922550225512255222553225542255522556225572255822559225602256122562225632256422565225662256722568225692257022571225722257322574225752257622577225782257922580225812258222583225842258522586225872258822589225902259122592225932259422595225962259722598225992260022601226022260322604226052260622607226082260922610226112261222613226142261522616226172261822619226202262122622226232262422625226262262722628226292263022631226322263322634226352263622637226382263922640226412264222643226442264522646226472264822649226502265122652226532265422655226562265722658226592266022661226622266322664226652266622667226682266922670226712267222673226742267522676226772267822679226802268122682226832268422685226862268722688226892269022691226922269322694226952269622697226982269922700227012270222703227042270522706227072270822709227102271122712227132271422715227162271722718227192272022721227222272322724227252272622727227282272922730227312273222733227342273522736227372273822739227402274122742227432274422745227462274722748227492275022751227522275322754227552275622757227582275922760227612276222763227642276522766227672276822769227702277122772227732277422775227762277722778227792278022781227822278322784227852278622787227882278922790227912279222793227942279522796227972279822799228002280122802228032280422805228062280722808228092281022811228122281322814
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
  6. * Copyright (C) 2004-2016 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  10. * *
  11. * This program is free software; you can redistribute it and/or *
  12. * modify it under the terms of version 2 of the GNU General *
  13. * Public License as published by the Free Software Foundation. *
  14. * This program is distributed in the hope that it will be useful. *
  15. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  16. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  17. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  18. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  20. * more details, a copy of which can be found in the file COPYING *
  21. * included with this package. *
  22. *******************************************************************/
  23. #include <linux/blkdev.h>
  24. #include <linux/pci.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/delay.h>
  27. #include <linux/slab.h>
  28. #include <linux/lockdep.h>
  29. #include <scsi/scsi.h>
  30. #include <scsi/scsi_cmnd.h>
  31. #include <scsi/scsi_device.h>
  32. #include <scsi/scsi_host.h>
  33. #include <scsi/scsi_transport_fc.h>
  34. #include <scsi/fc/fc_fs.h>
  35. #include <linux/crash_dump.h>
  36. #ifdef CONFIG_X86
  37. #include <asm/set_memory.h>
  38. #endif
  39. #include "lpfc_hw4.h"
  40. #include "lpfc_hw.h"
  41. #include "lpfc_sli.h"
  42. #include "lpfc_sli4.h"
  43. #include "lpfc_nl.h"
  44. #include "lpfc_disc.h"
  45. #include "lpfc.h"
  46. #include "lpfc_scsi.h"
  47. #include "lpfc_nvme.h"
  48. #include "lpfc_crtn.h"
  49. #include "lpfc_logmsg.h"
  50. #include "lpfc_compat.h"
  51. #include "lpfc_debugfs.h"
  52. #include "lpfc_vport.h"
  53. #include "lpfc_version.h"
  54. /* There are only four IOCB completion types. */
  55. typedef enum _lpfc_iocb_type {
  56. LPFC_UNKNOWN_IOCB,
  57. LPFC_UNSOL_IOCB,
  58. LPFC_SOL_IOCB,
  59. LPFC_ABORT_IOCB
  60. } lpfc_iocb_type;
  61. /* Provide function prototypes local to this module. */
  62. static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
  63. uint32_t);
  64. static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
  65. uint8_t *, uint32_t *);
  66. static struct lpfc_iocbq *
  67. lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
  68. struct lpfc_iocbq *rspiocbq);
  69. static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
  70. struct hbq_dmabuf *);
  71. static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
  72. struct hbq_dmabuf *dmabuf);
  73. static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
  74. struct lpfc_queue *cq, struct lpfc_cqe *cqe);
  75. static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
  76. int);
  77. static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
  78. struct lpfc_queue *eq,
  79. struct lpfc_eqe *eqe,
  80. enum lpfc_poll_mode poll_mode);
  81. static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
  82. static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
  83. static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
  84. static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
  85. struct lpfc_queue *cq,
  86. struct lpfc_cqe *cqe);
  87. static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
  88. struct lpfc_iocbq *pwqeq,
  89. struct lpfc_sglq *sglq);
  90. union lpfc_wqe128 lpfc_iread_cmd_template;
  91. union lpfc_wqe128 lpfc_iwrite_cmd_template;
  92. union lpfc_wqe128 lpfc_icmnd_cmd_template;
  93. /* Setup WQE templates for IOs */
  94. void lpfc_wqe_cmd_template(void)
  95. {
  96. union lpfc_wqe128 *wqe;
  97. /* IREAD template */
  98. wqe = &lpfc_iread_cmd_template;
  99. memset(wqe, 0, sizeof(union lpfc_wqe128));
  100. /* Word 0, 1, 2 - BDE is variable */
  101. /* Word 3 - cmd_buff_len, payload_offset_len is zero */
  102. /* Word 4 - total_xfer_len is variable */
  103. /* Word 5 - is zero */
  104. /* Word 6 - ctxt_tag, xri_tag is variable */
  105. /* Word 7 */
  106. bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
  107. bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
  108. bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
  109. bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
  110. /* Word 8 - abort_tag is variable */
  111. /* Word 9 - reqtag is variable */
  112. /* Word 10 - dbde, wqes is variable */
  113. bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
  114. bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
  115. bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
  116. bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
  117. bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
  118. /* Word 11 - pbde is variable */
  119. bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
  120. bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
  121. bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
  122. /* Word 12 - is zero */
  123. /* Word 13, 14, 15 - PBDE is variable */
  124. /* IWRITE template */
  125. wqe = &lpfc_iwrite_cmd_template;
  126. memset(wqe, 0, sizeof(union lpfc_wqe128));
  127. /* Word 0, 1, 2 - BDE is variable */
  128. /* Word 3 - cmd_buff_len, payload_offset_len is zero */
  129. /* Word 4 - total_xfer_len is variable */
  130. /* Word 5 - initial_xfer_len is variable */
  131. /* Word 6 - ctxt_tag, xri_tag is variable */
  132. /* Word 7 */
  133. bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
  134. bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
  135. bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
  136. bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
  137. /* Word 8 - abort_tag is variable */
  138. /* Word 9 - reqtag is variable */
  139. /* Word 10 - dbde, wqes is variable */
  140. bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
  141. bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
  142. bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
  143. bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
  144. bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
  145. /* Word 11 - pbde is variable */
  146. bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
  147. bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
  148. bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
  149. /* Word 12 - is zero */
  150. /* Word 13, 14, 15 - PBDE is variable */
  151. /* ICMND template */
  152. wqe = &lpfc_icmnd_cmd_template;
  153. memset(wqe, 0, sizeof(union lpfc_wqe128));
  154. /* Word 0, 1, 2 - BDE is variable */
  155. /* Word 3 - payload_offset_len is variable */
  156. /* Word 4, 5 - is zero */
  157. /* Word 6 - ctxt_tag, xri_tag is variable */
  158. /* Word 7 */
  159. bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
  160. bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
  161. bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
  162. bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
  163. /* Word 8 - abort_tag is variable */
  164. /* Word 9 - reqtag is variable */
  165. /* Word 10 - dbde, wqes is variable */
  166. bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
  167. bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
  168. bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
  169. bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
  170. bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
  171. /* Word 11 */
  172. bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
  173. bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
  174. bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
  175. /* Word 12, 13, 14, 15 - is zero */
  176. }
  177. #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
  178. /**
  179. * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
  180. * @srcp: Source memory pointer.
  181. * @destp: Destination memory pointer.
  182. * @cnt: Number of words required to be copied.
  183. * Must be a multiple of sizeof(uint64_t)
  184. *
  185. * This function is used for copying data between driver memory
  186. * and the SLI WQ. This function also changes the endianness
  187. * of each word if native endianness is different from SLI
  188. * endianness. This function can be called with or without
  189. * lock.
  190. **/
  191. static void
  192. lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
  193. {
  194. uint64_t *src = srcp;
  195. uint64_t *dest = destp;
  196. int i;
  197. for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
  198. *dest++ = *src++;
  199. }
  200. #else
  201. #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
  202. #endif
  203. /**
  204. * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
  205. * @q: The Work Queue to operate on.
  206. * @wqe: The work Queue Entry to put on the Work queue.
  207. *
  208. * This routine will copy the contents of @wqe to the next available entry on
  209. * the @q. This function will then ring the Work Queue Doorbell to signal the
  210. * HBA to start processing the Work Queue Entry. This function returns 0 if
  211. * successful. If no entries are available on @q then this function will return
  212. * -ENOMEM.
  213. * The caller is expected to hold the hbalock when calling this routine.
  214. **/
  215. static int
  216. lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
  217. {
  218. union lpfc_wqe *temp_wqe;
  219. struct lpfc_register doorbell;
  220. uint32_t host_index;
  221. uint32_t idx;
  222. uint32_t i = 0;
  223. uint8_t *tmp;
  224. u32 if_type;
  225. /* sanity check on queue memory */
  226. if (unlikely(!q))
  227. return -ENOMEM;
  228. temp_wqe = lpfc_sli4_qe(q, q->host_index);
  229. /* If the host has not yet processed the next entry then we are done */
  230. idx = ((q->host_index + 1) % q->entry_count);
  231. if (idx == q->hba_index) {
  232. q->WQ_overflow++;
  233. return -EBUSY;
  234. }
  235. q->WQ_posted++;
  236. /* set consumption flag every once in a while */
  237. if (!((q->host_index + 1) % q->notify_interval))
  238. bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
  239. else
  240. bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
  241. if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
  242. bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
  243. lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
  244. if (q->dpp_enable && q->phba->cfg_enable_dpp) {
  245. /* write to DPP aperture taking advatage of Combined Writes */
  246. tmp = (uint8_t *)temp_wqe;
  247. #ifdef __raw_writeq
  248. for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
  249. __raw_writeq(*((uint64_t *)(tmp + i)),
  250. q->dpp_regaddr + i);
  251. #else
  252. for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
  253. __raw_writel(*((uint32_t *)(tmp + i)),
  254. q->dpp_regaddr + i);
  255. #endif
  256. }
  257. /* ensure WQE bcopy and DPP flushed before doorbell write */
  258. wmb();
  259. /* Update the host index before invoking device */
  260. host_index = q->host_index;
  261. q->host_index = idx;
  262. /* Ring Doorbell */
  263. doorbell.word0 = 0;
  264. if (q->db_format == LPFC_DB_LIST_FORMAT) {
  265. if (q->dpp_enable && q->phba->cfg_enable_dpp) {
  266. bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
  267. bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
  268. bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
  269. q->dpp_id);
  270. bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
  271. q->queue_id);
  272. } else {
  273. bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
  274. bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
  275. /* Leave bits <23:16> clear for if_type 6 dpp */
  276. if_type = bf_get(lpfc_sli_intf_if_type,
  277. &q->phba->sli4_hba.sli_intf);
  278. if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
  279. bf_set(lpfc_wq_db_list_fm_index, &doorbell,
  280. host_index);
  281. }
  282. } else if (q->db_format == LPFC_DB_RING_FORMAT) {
  283. bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
  284. bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
  285. } else {
  286. return -EINVAL;
  287. }
  288. writel(doorbell.word0, q->db_regaddr);
  289. return 0;
  290. }
  291. /**
  292. * lpfc_sli4_wq_release - Updates internal hba index for WQ
  293. * @q: The Work Queue to operate on.
  294. * @index: The index to advance the hba index to.
  295. *
  296. * This routine will update the HBA index of a queue to reflect consumption of
  297. * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
  298. * an entry the host calls this function to update the queue's internal
  299. * pointers.
  300. **/
  301. static void
  302. lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
  303. {
  304. /* sanity check on queue memory */
  305. if (unlikely(!q))
  306. return;
  307. q->hba_index = index;
  308. }
  309. /**
  310. * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
  311. * @q: The Mailbox Queue to operate on.
  312. * @mqe: The Mailbox Queue Entry to put on the Work queue.
  313. *
  314. * This routine will copy the contents of @mqe to the next available entry on
  315. * the @q. This function will then ring the Work Queue Doorbell to signal the
  316. * HBA to start processing the Work Queue Entry. This function returns 0 if
  317. * successful. If no entries are available on @q then this function will return
  318. * -ENOMEM.
  319. * The caller is expected to hold the hbalock when calling this routine.
  320. **/
  321. static uint32_t
  322. lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
  323. {
  324. struct lpfc_mqe *temp_mqe;
  325. struct lpfc_register doorbell;
  326. /* sanity check on queue memory */
  327. if (unlikely(!q))
  328. return -ENOMEM;
  329. temp_mqe = lpfc_sli4_qe(q, q->host_index);
  330. /* If the host has not yet processed the next entry then we are done */
  331. if (((q->host_index + 1) % q->entry_count) == q->hba_index)
  332. return -ENOMEM;
  333. lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
  334. /* Save off the mailbox pointer for completion */
  335. q->phba->mbox = (MAILBOX_t *)temp_mqe;
  336. /* Update the host index before invoking device */
  337. q->host_index = ((q->host_index + 1) % q->entry_count);
  338. /* Ring Doorbell */
  339. doorbell.word0 = 0;
  340. bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
  341. bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
  342. writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
  343. return 0;
  344. }
  345. /**
  346. * lpfc_sli4_mq_release - Updates internal hba index for MQ
  347. * @q: The Mailbox Queue to operate on.
  348. *
  349. * This routine will update the HBA index of a queue to reflect consumption of
  350. * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
  351. * an entry the host calls this function to update the queue's internal
  352. * pointers. This routine returns the number of entries that were consumed by
  353. * the HBA.
  354. **/
  355. static uint32_t
  356. lpfc_sli4_mq_release(struct lpfc_queue *q)
  357. {
  358. /* sanity check on queue memory */
  359. if (unlikely(!q))
  360. return 0;
  361. /* Clear the mailbox pointer for completion */
  362. q->phba->mbox = NULL;
  363. q->hba_index = ((q->hba_index + 1) % q->entry_count);
  364. return 1;
  365. }
  366. /**
  367. * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
  368. * @q: The Event Queue to get the first valid EQE from
  369. *
  370. * This routine will get the first valid Event Queue Entry from @q, update
  371. * the queue's internal hba index, and return the EQE. If no valid EQEs are in
  372. * the Queue (no more work to do), or the Queue is full of EQEs that have been
  373. * processed, but not popped back to the HBA then this routine will return NULL.
  374. **/
  375. static struct lpfc_eqe *
  376. lpfc_sli4_eq_get(struct lpfc_queue *q)
  377. {
  378. struct lpfc_eqe *eqe;
  379. /* sanity check on queue memory */
  380. if (unlikely(!q))
  381. return NULL;
  382. eqe = lpfc_sli4_qe(q, q->host_index);
  383. /* If the next EQE is not valid then we are done */
  384. if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
  385. return NULL;
  386. /*
  387. * insert barrier for instruction interlock : data from the hardware
  388. * must have the valid bit checked before it can be copied and acted
  389. * upon. Speculative instructions were allowing a bcopy at the start
  390. * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
  391. * after our return, to copy data before the valid bit check above
  392. * was done. As such, some of the copied data was stale. The barrier
  393. * ensures the check is before any data is copied.
  394. */
  395. mb();
  396. return eqe;
  397. }
  398. /**
  399. * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
  400. * @q: The Event Queue to disable interrupts
  401. *
  402. **/
  403. void
  404. lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
  405. {
  406. struct lpfc_register doorbell;
  407. doorbell.word0 = 0;
  408. bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
  409. bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
  410. bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
  411. (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
  412. bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
  413. writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
  414. }
  415. /**
  416. * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
  417. * @q: The Event Queue to disable interrupts
  418. *
  419. **/
  420. void
  421. lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
  422. {
  423. struct lpfc_register doorbell;
  424. doorbell.word0 = 0;
  425. bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
  426. writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
  427. }
  428. /**
  429. * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
  430. * @phba: adapter with EQ
  431. * @q: The Event Queue that the host has completed processing for.
  432. * @count: Number of elements that have been consumed
  433. * @arm: Indicates whether the host wants to arms this CQ.
  434. *
  435. * This routine will notify the HBA, by ringing the doorbell, that count
  436. * number of EQEs have been processed. The @arm parameter indicates whether
  437. * the queue should be rearmed when ringing the doorbell.
  438. **/
  439. void
  440. lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
  441. uint32_t count, bool arm)
  442. {
  443. struct lpfc_register doorbell;
  444. /* sanity check on queue memory */
  445. if (unlikely(!q || (count == 0 && !arm)))
  446. return;
  447. /* ring doorbell for number popped */
  448. doorbell.word0 = 0;
  449. if (arm) {
  450. bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
  451. bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
  452. }
  453. bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
  454. bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
  455. bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
  456. (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
  457. bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
  458. writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
  459. /* PCI read to flush PCI pipeline on re-arming for INTx mode */
  460. if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
  461. readl(q->phba->sli4_hba.EQDBregaddr);
  462. }
  463. /**
  464. * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
  465. * @phba: adapter with EQ
  466. * @q: The Event Queue that the host has completed processing for.
  467. * @count: Number of elements that have been consumed
  468. * @arm: Indicates whether the host wants to arms this CQ.
  469. *
  470. * This routine will notify the HBA, by ringing the doorbell, that count
  471. * number of EQEs have been processed. The @arm parameter indicates whether
  472. * the queue should be rearmed when ringing the doorbell.
  473. **/
  474. void
  475. lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
  476. uint32_t count, bool arm)
  477. {
  478. struct lpfc_register doorbell;
  479. /* sanity check on queue memory */
  480. if (unlikely(!q || (count == 0 && !arm)))
  481. return;
  482. /* ring doorbell for number popped */
  483. doorbell.word0 = 0;
  484. if (arm)
  485. bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
  486. bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
  487. bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
  488. writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
  489. /* PCI read to flush PCI pipeline on re-arming for INTx mode */
  490. if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
  491. readl(q->phba->sli4_hba.EQDBregaddr);
  492. }
  493. static void
  494. __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
  495. struct lpfc_eqe *eqe)
  496. {
  497. if (!phba->sli4_hba.pc_sli4_params.eqav)
  498. bf_set_le32(lpfc_eqe_valid, eqe, 0);
  499. eq->host_index = ((eq->host_index + 1) % eq->entry_count);
  500. /* if the index wrapped around, toggle the valid bit */
  501. if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
  502. eq->qe_valid = (eq->qe_valid) ? 0 : 1;
  503. }
  504. static void
  505. lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
  506. {
  507. struct lpfc_eqe *eqe = NULL;
  508. u32 eq_count = 0, cq_count = 0;
  509. struct lpfc_cqe *cqe = NULL;
  510. struct lpfc_queue *cq = NULL, *childq = NULL;
  511. int cqid = 0;
  512. /* walk all the EQ entries and drop on the floor */
  513. eqe = lpfc_sli4_eq_get(eq);
  514. while (eqe) {
  515. /* Get the reference to the corresponding CQ */
  516. cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
  517. cq = NULL;
  518. list_for_each_entry(childq, &eq->child_list, list) {
  519. if (childq->queue_id == cqid) {
  520. cq = childq;
  521. break;
  522. }
  523. }
  524. /* If CQ is valid, iterate through it and drop all the CQEs */
  525. if (cq) {
  526. cqe = lpfc_sli4_cq_get(cq);
  527. while (cqe) {
  528. __lpfc_sli4_consume_cqe(phba, cq, cqe);
  529. cq_count++;
  530. cqe = lpfc_sli4_cq_get(cq);
  531. }
  532. /* Clear and re-arm the CQ */
  533. phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
  534. LPFC_QUEUE_REARM);
  535. cq_count = 0;
  536. }
  537. __lpfc_sli4_consume_eqe(phba, eq, eqe);
  538. eq_count++;
  539. eqe = lpfc_sli4_eq_get(eq);
  540. }
  541. /* Clear and re-arm the EQ */
  542. phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
  543. }
  544. static int
  545. lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
  546. u8 rearm, enum lpfc_poll_mode poll_mode)
  547. {
  548. struct lpfc_eqe *eqe;
  549. int count = 0, consumed = 0;
  550. if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
  551. goto rearm_and_exit;
  552. eqe = lpfc_sli4_eq_get(eq);
  553. while (eqe) {
  554. lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode);
  555. __lpfc_sli4_consume_eqe(phba, eq, eqe);
  556. consumed++;
  557. if (!(++count % eq->max_proc_limit))
  558. break;
  559. if (!(count % eq->notify_interval)) {
  560. phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
  561. LPFC_QUEUE_NOARM);
  562. consumed = 0;
  563. }
  564. eqe = lpfc_sli4_eq_get(eq);
  565. }
  566. eq->EQ_processed += count;
  567. /* Track the max number of EQEs processed in 1 intr */
  568. if (count > eq->EQ_max_eqe)
  569. eq->EQ_max_eqe = count;
  570. xchg(&eq->queue_claimed, 0);
  571. rearm_and_exit:
  572. /* Always clear the EQ. */
  573. phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
  574. return count;
  575. }
  576. /**
  577. * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
  578. * @q: The Completion Queue to get the first valid CQE from
  579. *
  580. * This routine will get the first valid Completion Queue Entry from @q, update
  581. * the queue's internal hba index, and return the CQE. If no valid CQEs are in
  582. * the Queue (no more work to do), or the Queue is full of CQEs that have been
  583. * processed, but not popped back to the HBA then this routine will return NULL.
  584. **/
  585. static struct lpfc_cqe *
  586. lpfc_sli4_cq_get(struct lpfc_queue *q)
  587. {
  588. struct lpfc_cqe *cqe;
  589. /* sanity check on queue memory */
  590. if (unlikely(!q))
  591. return NULL;
  592. cqe = lpfc_sli4_qe(q, q->host_index);
  593. /* If the next CQE is not valid then we are done */
  594. if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
  595. return NULL;
  596. /*
  597. * insert barrier for instruction interlock : data from the hardware
  598. * must have the valid bit checked before it can be copied and acted
  599. * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
  600. * instructions allowing action on content before valid bit checked,
  601. * add barrier here as well. May not be needed as "content" is a
  602. * single 32-bit entity here (vs multi word structure for cq's).
  603. */
  604. mb();
  605. return cqe;
  606. }
  607. static void
  608. __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  609. struct lpfc_cqe *cqe)
  610. {
  611. if (!phba->sli4_hba.pc_sli4_params.cqav)
  612. bf_set_le32(lpfc_cqe_valid, cqe, 0);
  613. cq->host_index = ((cq->host_index + 1) % cq->entry_count);
  614. /* if the index wrapped around, toggle the valid bit */
  615. if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
  616. cq->qe_valid = (cq->qe_valid) ? 0 : 1;
  617. }
  618. /**
  619. * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
  620. * @phba: the adapter with the CQ
  621. * @q: The Completion Queue that the host has completed processing for.
  622. * @count: the number of elements that were consumed
  623. * @arm: Indicates whether the host wants to arms this CQ.
  624. *
  625. * This routine will notify the HBA, by ringing the doorbell, that the
  626. * CQEs have been processed. The @arm parameter specifies whether the
  627. * queue should be rearmed when ringing the doorbell.
  628. **/
  629. void
  630. lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
  631. uint32_t count, bool arm)
  632. {
  633. struct lpfc_register doorbell;
  634. /* sanity check on queue memory */
  635. if (unlikely(!q || (count == 0 && !arm)))
  636. return;
  637. /* ring doorbell for number popped */
  638. doorbell.word0 = 0;
  639. if (arm)
  640. bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
  641. bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
  642. bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
  643. bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
  644. (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
  645. bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
  646. writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
  647. }
  648. /**
  649. * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
  650. * @phba: the adapter with the CQ
  651. * @q: The Completion Queue that the host has completed processing for.
  652. * @count: the number of elements that were consumed
  653. * @arm: Indicates whether the host wants to arms this CQ.
  654. *
  655. * This routine will notify the HBA, by ringing the doorbell, that the
  656. * CQEs have been processed. The @arm parameter specifies whether the
  657. * queue should be rearmed when ringing the doorbell.
  658. **/
  659. void
  660. lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
  661. uint32_t count, bool arm)
  662. {
  663. struct lpfc_register doorbell;
  664. /* sanity check on queue memory */
  665. if (unlikely(!q || (count == 0 && !arm)))
  666. return;
  667. /* ring doorbell for number popped */
  668. doorbell.word0 = 0;
  669. if (arm)
  670. bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
  671. bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
  672. bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
  673. writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
  674. }
  675. /*
  676. * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
  677. *
  678. * This routine will copy the contents of @wqe to the next available entry on
  679. * the @q. This function will then ring the Receive Queue Doorbell to signal the
  680. * HBA to start processing the Receive Queue Entry. This function returns the
  681. * index that the rqe was copied to if successful. If no entries are available
  682. * on @q then this function will return -ENOMEM.
  683. * The caller is expected to hold the hbalock when calling this routine.
  684. **/
  685. int
  686. lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
  687. struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
  688. {
  689. struct lpfc_rqe *temp_hrqe;
  690. struct lpfc_rqe *temp_drqe;
  691. struct lpfc_register doorbell;
  692. int hq_put_index;
  693. int dq_put_index;
  694. /* sanity check on queue memory */
  695. if (unlikely(!hq) || unlikely(!dq))
  696. return -ENOMEM;
  697. hq_put_index = hq->host_index;
  698. dq_put_index = dq->host_index;
  699. temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
  700. temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
  701. if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
  702. return -EINVAL;
  703. if (hq_put_index != dq_put_index)
  704. return -EINVAL;
  705. /* If the host has not yet processed the next entry then we are done */
  706. if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
  707. return -EBUSY;
  708. lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
  709. lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
  710. /* Update the host index to point to the next slot */
  711. hq->host_index = ((hq_put_index + 1) % hq->entry_count);
  712. dq->host_index = ((dq_put_index + 1) % dq->entry_count);
  713. hq->RQ_buf_posted++;
  714. /* Ring The Header Receive Queue Doorbell */
  715. if (!(hq->host_index % hq->notify_interval)) {
  716. doorbell.word0 = 0;
  717. if (hq->db_format == LPFC_DB_RING_FORMAT) {
  718. bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
  719. hq->notify_interval);
  720. bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
  721. } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
  722. bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
  723. hq->notify_interval);
  724. bf_set(lpfc_rq_db_list_fm_index, &doorbell,
  725. hq->host_index);
  726. bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
  727. } else {
  728. return -EINVAL;
  729. }
  730. writel(doorbell.word0, hq->db_regaddr);
  731. }
  732. return hq_put_index;
  733. }
  734. /*
  735. * lpfc_sli4_rq_release - Updates internal hba index for RQ
  736. *
  737. * This routine will update the HBA index of a queue to reflect consumption of
  738. * one Receive Queue Entry by the HBA. When the HBA indicates that it has
  739. * consumed an entry the host calls this function to update the queue's
  740. * internal pointers. This routine returns the number of entries that were
  741. * consumed by the HBA.
  742. **/
  743. static uint32_t
  744. lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
  745. {
  746. /* sanity check on queue memory */
  747. if (unlikely(!hq) || unlikely(!dq))
  748. return 0;
  749. if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
  750. return 0;
  751. hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
  752. dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
  753. return 1;
  754. }
  755. /**
  756. * lpfc_cmd_iocb - Get next command iocb entry in the ring
  757. * @phba: Pointer to HBA context object.
  758. * @pring: Pointer to driver SLI ring object.
  759. *
  760. * This function returns pointer to next command iocb entry
  761. * in the command ring. The caller must hold hbalock to prevent
  762. * other threads consume the next command iocb.
  763. * SLI-2/SLI-3 provide different sized iocbs.
  764. **/
  765. static inline IOCB_t *
  766. lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
  767. {
  768. return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
  769. pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
  770. }
  771. /**
  772. * lpfc_resp_iocb - Get next response iocb entry in the ring
  773. * @phba: Pointer to HBA context object.
  774. * @pring: Pointer to driver SLI ring object.
  775. *
  776. * This function returns pointer to next response iocb entry
  777. * in the response ring. The caller must hold hbalock to make sure
  778. * that no other thread consume the next response iocb.
  779. * SLI-2/SLI-3 provide different sized iocbs.
  780. **/
  781. static inline IOCB_t *
  782. lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
  783. {
  784. return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
  785. pring->sli.sli3.rspidx * phba->iocb_rsp_size);
  786. }
  787. /**
  788. * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
  789. * @phba: Pointer to HBA context object.
  790. *
  791. * This function is called with hbalock held. This function
  792. * allocates a new driver iocb object from the iocb pool. If the
  793. * allocation is successful, it returns pointer to the newly
  794. * allocated iocb object else it returns NULL.
  795. **/
  796. struct lpfc_iocbq *
  797. __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
  798. {
  799. struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
  800. struct lpfc_iocbq * iocbq = NULL;
  801. lockdep_assert_held(&phba->hbalock);
  802. list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
  803. if (iocbq)
  804. phba->iocb_cnt++;
  805. if (phba->iocb_cnt > phba->iocb_max)
  806. phba->iocb_max = phba->iocb_cnt;
  807. return iocbq;
  808. }
  809. /**
  810. * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
  811. * @phba: Pointer to HBA context object.
  812. * @xritag: XRI value.
  813. *
  814. * This function clears the sglq pointer from the array of active
  815. * sglq's. The xritag that is passed in is used to index into the
  816. * array. Before the xritag can be used it needs to be adjusted
  817. * by subtracting the xribase.
  818. *
  819. * Returns sglq ponter = success, NULL = Failure.
  820. **/
  821. struct lpfc_sglq *
  822. __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
  823. {
  824. struct lpfc_sglq *sglq;
  825. sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
  826. phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
  827. return sglq;
  828. }
  829. /**
  830. * __lpfc_get_active_sglq - Get the active sglq for this XRI.
  831. * @phba: Pointer to HBA context object.
  832. * @xritag: XRI value.
  833. *
  834. * This function returns the sglq pointer from the array of active
  835. * sglq's. The xritag that is passed in is used to index into the
  836. * array. Before the xritag can be used it needs to be adjusted
  837. * by subtracting the xribase.
  838. *
  839. * Returns sglq ponter = success, NULL = Failure.
  840. **/
  841. struct lpfc_sglq *
  842. __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
  843. {
  844. struct lpfc_sglq *sglq;
  845. sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
  846. return sglq;
  847. }
  848. /**
  849. * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
  850. * @phba: Pointer to HBA context object.
  851. * @xritag: xri used in this exchange.
  852. * @rrq: The RRQ to be cleared.
  853. *
  854. **/
  855. void
  856. lpfc_clr_rrq_active(struct lpfc_hba *phba,
  857. uint16_t xritag,
  858. struct lpfc_node_rrq *rrq)
  859. {
  860. struct lpfc_nodelist *ndlp = NULL;
  861. /* Lookup did to verify if did is still active on this vport */
  862. if (rrq->vport)
  863. ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
  864. if (!ndlp)
  865. goto out;
  866. if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
  867. rrq->send_rrq = 0;
  868. rrq->xritag = 0;
  869. rrq->rrq_stop_time = 0;
  870. }
  871. out:
  872. mempool_free(rrq, phba->rrq_pool);
  873. }
  874. /**
  875. * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
  876. * @phba: Pointer to HBA context object.
  877. *
  878. * This function is called with hbalock held. This function
  879. * Checks if stop_time (ratov from setting rrq active) has
  880. * been reached, if it has and the send_rrq flag is set then
  881. * it will call lpfc_send_rrq. If the send_rrq flag is not set
  882. * then it will just call the routine to clear the rrq and
  883. * free the rrq resource.
  884. * The timer is set to the next rrq that is going to expire before
  885. * leaving the routine.
  886. *
  887. **/
  888. void
  889. lpfc_handle_rrq_active(struct lpfc_hba *phba)
  890. {
  891. struct lpfc_node_rrq *rrq;
  892. struct lpfc_node_rrq *nextrrq;
  893. unsigned long next_time;
  894. unsigned long iflags;
  895. LIST_HEAD(send_rrq);
  896. clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
  897. next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
  898. spin_lock_irqsave(&phba->rrq_list_lock, iflags);
  899. list_for_each_entry_safe(rrq, nextrrq,
  900. &phba->active_rrq_list, list) {
  901. if (time_after(jiffies, rrq->rrq_stop_time))
  902. list_move(&rrq->list, &send_rrq);
  903. else if (time_before(rrq->rrq_stop_time, next_time))
  904. next_time = rrq->rrq_stop_time;
  905. }
  906. spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
  907. if ((!list_empty(&phba->active_rrq_list)) &&
  908. (!test_bit(FC_UNLOADING, &phba->pport->load_flag)))
  909. mod_timer(&phba->rrq_tmr, next_time);
  910. list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
  911. list_del(&rrq->list);
  912. if (!rrq->send_rrq) {
  913. /* this call will free the rrq */
  914. lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
  915. } else if (lpfc_send_rrq(phba, rrq)) {
  916. /* if we send the rrq then the completion handler
  917. * will clear the bit in the xribitmap.
  918. */
  919. lpfc_clr_rrq_active(phba, rrq->xritag,
  920. rrq);
  921. }
  922. }
  923. }
  924. /**
  925. * lpfc_get_active_rrq - Get the active RRQ for this exchange.
  926. * @vport: Pointer to vport context object.
  927. * @xri: The xri used in the exchange.
  928. * @did: The targets DID for this exchange.
  929. *
  930. * returns NULL = rrq not found in the phba->active_rrq_list.
  931. * rrq = rrq for this xri and target.
  932. **/
  933. struct lpfc_node_rrq *
  934. lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
  935. {
  936. struct lpfc_hba *phba = vport->phba;
  937. struct lpfc_node_rrq *rrq;
  938. struct lpfc_node_rrq *nextrrq;
  939. unsigned long iflags;
  940. if (phba->sli_rev != LPFC_SLI_REV4)
  941. return NULL;
  942. spin_lock_irqsave(&phba->rrq_list_lock, iflags);
  943. list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
  944. if (rrq->vport == vport && rrq->xritag == xri &&
  945. rrq->nlp_DID == did){
  946. list_del(&rrq->list);
  947. spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
  948. return rrq;
  949. }
  950. }
  951. spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
  952. return NULL;
  953. }
  954. /**
  955. * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
  956. * @vport: Pointer to vport context object.
  957. * @ndlp: Pointer to the lpfc_node_list structure.
  958. * If ndlp is NULL Remove all active RRQs for this vport from the
  959. * phba->active_rrq_list and clear the rrq.
  960. * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
  961. **/
  962. void
  963. lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
  964. {
  965. struct lpfc_hba *phba = vport->phba;
  966. struct lpfc_node_rrq *rrq;
  967. struct lpfc_node_rrq *nextrrq;
  968. unsigned long iflags;
  969. LIST_HEAD(rrq_list);
  970. if (phba->sli_rev != LPFC_SLI_REV4)
  971. return;
  972. if (!ndlp) {
  973. lpfc_sli4_vport_delete_els_xri_aborted(vport);
  974. lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
  975. }
  976. spin_lock_irqsave(&phba->rrq_list_lock, iflags);
  977. list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
  978. if (rrq->vport != vport)
  979. continue;
  980. if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
  981. list_move(&rrq->list, &rrq_list);
  982. }
  983. spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
  984. list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
  985. list_del(&rrq->list);
  986. lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
  987. }
  988. }
  989. /**
  990. * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
  991. * @phba: Pointer to HBA context object.
  992. * @ndlp: Targets nodelist pointer for this exchange.
  993. * @xritag: the xri in the bitmap to test.
  994. *
  995. * This function returns:
  996. * 0 = rrq not active for this xri
  997. * 1 = rrq is valid for this xri.
  998. **/
  999. int
  1000. lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
  1001. uint16_t xritag)
  1002. {
  1003. if (!ndlp)
  1004. return 0;
  1005. if (!ndlp->active_rrqs_xri_bitmap)
  1006. return 0;
  1007. if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
  1008. return 1;
  1009. else
  1010. return 0;
  1011. }
  1012. /**
  1013. * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
  1014. * @phba: Pointer to HBA context object.
  1015. * @ndlp: nodelist pointer for this target.
  1016. * @xritag: xri used in this exchange.
  1017. * @rxid: Remote Exchange ID.
  1018. * @send_rrq: Flag used to determine if we should send rrq els cmd.
  1019. *
  1020. * This function takes the hbalock.
  1021. * The active bit is always set in the active rrq xri_bitmap even
  1022. * if there is no slot avaiable for the other rrq information.
  1023. *
  1024. * returns 0 rrq actived for this xri
  1025. * < 0 No memory or invalid ndlp.
  1026. **/
  1027. int
  1028. lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
  1029. uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
  1030. {
  1031. unsigned long iflags;
  1032. struct lpfc_node_rrq *rrq;
  1033. int empty;
  1034. if (!ndlp)
  1035. return -EINVAL;
  1036. if (!phba->cfg_enable_rrq)
  1037. return -EINVAL;
  1038. if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
  1039. clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
  1040. goto outnl;
  1041. }
  1042. spin_lock_irqsave(&phba->hbalock, iflags);
  1043. if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag))
  1044. goto out;
  1045. if (!ndlp->active_rrqs_xri_bitmap)
  1046. goto out;
  1047. if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
  1048. goto out;
  1049. spin_unlock_irqrestore(&phba->hbalock, iflags);
  1050. rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
  1051. if (!rrq) {
  1052. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  1053. "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
  1054. " DID:0x%x Send:%d\n",
  1055. xritag, rxid, ndlp->nlp_DID, send_rrq);
  1056. return -EINVAL;
  1057. }
  1058. if (phba->cfg_enable_rrq == 1)
  1059. rrq->send_rrq = send_rrq;
  1060. else
  1061. rrq->send_rrq = 0;
  1062. rrq->xritag = xritag;
  1063. rrq->rrq_stop_time = jiffies +
  1064. msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
  1065. rrq->nlp_DID = ndlp->nlp_DID;
  1066. rrq->vport = ndlp->vport;
  1067. rrq->rxid = rxid;
  1068. spin_lock_irqsave(&phba->rrq_list_lock, iflags);
  1069. empty = list_empty(&phba->active_rrq_list);
  1070. list_add_tail(&rrq->list, &phba->active_rrq_list);
  1071. spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
  1072. set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
  1073. if (empty)
  1074. lpfc_worker_wake_up(phba);
  1075. return 0;
  1076. out:
  1077. spin_unlock_irqrestore(&phba->hbalock, iflags);
  1078. outnl:
  1079. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  1080. "2921 Can't set rrq active xri:0x%x rxid:0x%x"
  1081. " DID:0x%x Send:%d\n",
  1082. xritag, rxid, ndlp->nlp_DID, send_rrq);
  1083. return -EINVAL;
  1084. }
  1085. /**
  1086. * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
  1087. * @phba: Pointer to HBA context object.
  1088. * @piocbq: Pointer to the iocbq.
  1089. *
  1090. * The driver calls this function with either the nvme ls ring lock
  1091. * or the fc els ring lock held depending on the iocb usage. This function
  1092. * gets a new driver sglq object from the sglq list. If the list is not empty
  1093. * then it is successful, it returns pointer to the newly allocated sglq
  1094. * object else it returns NULL.
  1095. **/
  1096. static struct lpfc_sglq *
  1097. __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
  1098. {
  1099. struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
  1100. struct lpfc_sglq *sglq = NULL;
  1101. struct lpfc_sglq *start_sglq = NULL;
  1102. struct lpfc_io_buf *lpfc_cmd;
  1103. struct lpfc_nodelist *ndlp;
  1104. int found = 0;
  1105. u8 cmnd;
  1106. cmnd = get_job_cmnd(phba, piocbq);
  1107. if (piocbq->cmd_flag & LPFC_IO_FCP) {
  1108. lpfc_cmd = piocbq->io_buf;
  1109. ndlp = lpfc_cmd->rdata->pnode;
  1110. } else if ((cmnd == CMD_GEN_REQUEST64_CR) &&
  1111. !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
  1112. ndlp = piocbq->ndlp;
  1113. } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
  1114. if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
  1115. ndlp = NULL;
  1116. else
  1117. ndlp = piocbq->ndlp;
  1118. } else {
  1119. ndlp = piocbq->ndlp;
  1120. }
  1121. spin_lock(&phba->sli4_hba.sgl_list_lock);
  1122. list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
  1123. start_sglq = sglq;
  1124. while (!found) {
  1125. if (!sglq)
  1126. break;
  1127. if (ndlp && ndlp->active_rrqs_xri_bitmap &&
  1128. test_bit(sglq->sli4_lxritag,
  1129. ndlp->active_rrqs_xri_bitmap)) {
  1130. /* This xri has an rrq outstanding for this DID.
  1131. * put it back in the list and get another xri.
  1132. */
  1133. list_add_tail(&sglq->list, lpfc_els_sgl_list);
  1134. sglq = NULL;
  1135. list_remove_head(lpfc_els_sgl_list, sglq,
  1136. struct lpfc_sglq, list);
  1137. if (sglq == start_sglq) {
  1138. list_add_tail(&sglq->list, lpfc_els_sgl_list);
  1139. sglq = NULL;
  1140. break;
  1141. } else
  1142. continue;
  1143. }
  1144. sglq->ndlp = ndlp;
  1145. found = 1;
  1146. phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
  1147. sglq->state = SGL_ALLOCATED;
  1148. }
  1149. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  1150. return sglq;
  1151. }
  1152. /**
  1153. * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
  1154. * @phba: Pointer to HBA context object.
  1155. * @piocbq: Pointer to the iocbq.
  1156. *
  1157. * This function is called with the sgl_list lock held. This function
  1158. * gets a new driver sglq object from the sglq list. If the
  1159. * list is not empty then it is successful, it returns pointer to the newly
  1160. * allocated sglq object else it returns NULL.
  1161. **/
  1162. struct lpfc_sglq *
  1163. __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
  1164. {
  1165. struct list_head *lpfc_nvmet_sgl_list;
  1166. struct lpfc_sglq *sglq = NULL;
  1167. lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
  1168. lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
  1169. list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
  1170. if (!sglq)
  1171. return NULL;
  1172. phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
  1173. sglq->state = SGL_ALLOCATED;
  1174. return sglq;
  1175. }
  1176. /**
  1177. * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
  1178. * @phba: Pointer to HBA context object.
  1179. *
  1180. * This function is called with no lock held. This function
  1181. * allocates a new driver iocb object from the iocb pool. If the
  1182. * allocation is successful, it returns pointer to the newly
  1183. * allocated iocb object else it returns NULL.
  1184. **/
  1185. struct lpfc_iocbq *
  1186. lpfc_sli_get_iocbq(struct lpfc_hba *phba)
  1187. {
  1188. struct lpfc_iocbq * iocbq = NULL;
  1189. unsigned long iflags;
  1190. spin_lock_irqsave(&phba->hbalock, iflags);
  1191. iocbq = __lpfc_sli_get_iocbq(phba);
  1192. spin_unlock_irqrestore(&phba->hbalock, iflags);
  1193. return iocbq;
  1194. }
  1195. /**
  1196. * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
  1197. * @phba: Pointer to HBA context object.
  1198. * @iocbq: Pointer to driver iocb object.
  1199. *
  1200. * This function is called to release the driver iocb object
  1201. * to the iocb pool. The iotag in the iocb object
  1202. * does not change for each use of the iocb object. This function
  1203. * clears all other fields of the iocb object when it is freed.
  1204. * The sqlq structure that holds the xritag and phys and virtual
  1205. * mappings for the scatter gather list is retrieved from the
  1206. * active array of sglq. The get of the sglq pointer also clears
  1207. * the entry in the array. If the status of the IO indiactes that
  1208. * this IO was aborted then the sglq entry it put on the
  1209. * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
  1210. * IO has good status or fails for any other reason then the sglq
  1211. * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
  1212. * asserted held in the code path calling this routine.
  1213. **/
  1214. static void
  1215. __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
  1216. {
  1217. struct lpfc_sglq *sglq;
  1218. unsigned long iflag = 0;
  1219. struct lpfc_sli_ring *pring;
  1220. if (iocbq->sli4_xritag == NO_XRI)
  1221. sglq = NULL;
  1222. else
  1223. sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
  1224. if (sglq) {
  1225. if (iocbq->cmd_flag & LPFC_IO_NVMET) {
  1226. spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
  1227. iflag);
  1228. sglq->state = SGL_FREED;
  1229. sglq->ndlp = NULL;
  1230. list_add_tail(&sglq->list,
  1231. &phba->sli4_hba.lpfc_nvmet_sgl_list);
  1232. spin_unlock_irqrestore(
  1233. &phba->sli4_hba.sgl_list_lock, iflag);
  1234. goto out;
  1235. }
  1236. if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
  1237. (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
  1238. sglq->state != SGL_XRI_ABORTED) {
  1239. spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
  1240. iflag);
  1241. /* Check if we can get a reference on ndlp */
  1242. if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
  1243. sglq->ndlp = NULL;
  1244. list_add(&sglq->list,
  1245. &phba->sli4_hba.lpfc_abts_els_sgl_list);
  1246. spin_unlock_irqrestore(
  1247. &phba->sli4_hba.sgl_list_lock, iflag);
  1248. } else {
  1249. spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
  1250. iflag);
  1251. sglq->state = SGL_FREED;
  1252. sglq->ndlp = NULL;
  1253. list_add_tail(&sglq->list,
  1254. &phba->sli4_hba.lpfc_els_sgl_list);
  1255. spin_unlock_irqrestore(
  1256. &phba->sli4_hba.sgl_list_lock, iflag);
  1257. pring = lpfc_phba_elsring(phba);
  1258. /* Check if TXQ queue needs to be serviced */
  1259. if (pring && (!list_empty(&pring->txq)))
  1260. lpfc_worker_wake_up(phba);
  1261. }
  1262. }
  1263. out:
  1264. /*
  1265. * Clean all volatile data fields, preserve iotag and node struct.
  1266. */
  1267. memset_startat(iocbq, 0, wqe);
  1268. iocbq->sli4_lxritag = NO_XRI;
  1269. iocbq->sli4_xritag = NO_XRI;
  1270. iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
  1271. LPFC_IO_NVME_LS);
  1272. list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
  1273. }
  1274. /**
  1275. * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
  1276. * @phba: Pointer to HBA context object.
  1277. * @iocbq: Pointer to driver iocb object.
  1278. *
  1279. * This function is called to release the driver iocb object to the
  1280. * iocb pool. The iotag in the iocb object does not change for each
  1281. * use of the iocb object. This function clears all other fields of
  1282. * the iocb object when it is freed. The hbalock is asserted held in
  1283. * the code path calling this routine.
  1284. **/
  1285. static void
  1286. __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
  1287. {
  1288. /*
  1289. * Clean all volatile data fields, preserve iotag and node struct.
  1290. */
  1291. memset_startat(iocbq, 0, iocb);
  1292. iocbq->sli4_xritag = NO_XRI;
  1293. list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
  1294. }
  1295. /**
  1296. * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
  1297. * @phba: Pointer to HBA context object.
  1298. * @iocbq: Pointer to driver iocb object.
  1299. *
  1300. * This function is called with hbalock held to release driver
  1301. * iocb object to the iocb pool. The iotag in the iocb object
  1302. * does not change for each use of the iocb object. This function
  1303. * clears all other fields of the iocb object when it is freed.
  1304. **/
  1305. static void
  1306. __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
  1307. {
  1308. lockdep_assert_held(&phba->hbalock);
  1309. phba->__lpfc_sli_release_iocbq(phba, iocbq);
  1310. phba->iocb_cnt--;
  1311. }
  1312. /**
  1313. * lpfc_sli_release_iocbq - Release iocb to the iocb pool
  1314. * @phba: Pointer to HBA context object.
  1315. * @iocbq: Pointer to driver iocb object.
  1316. *
  1317. * This function is called with no lock held to release the iocb to
  1318. * iocb pool.
  1319. **/
  1320. void
  1321. lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
  1322. {
  1323. unsigned long iflags;
  1324. /*
  1325. * Clean all volatile data fields, preserve iotag and node struct.
  1326. */
  1327. spin_lock_irqsave(&phba->hbalock, iflags);
  1328. __lpfc_sli_release_iocbq(phba, iocbq);
  1329. spin_unlock_irqrestore(&phba->hbalock, iflags);
  1330. }
  1331. /**
  1332. * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
  1333. * @phba: Pointer to HBA context object.
  1334. * @iocblist: List of IOCBs.
  1335. * @ulpstatus: ULP status in IOCB command field.
  1336. * @ulpWord4: ULP word-4 in IOCB command field.
  1337. *
  1338. * This function is called with a list of IOCBs to cancel. It cancels the IOCB
  1339. * on the list by invoking the complete callback function associated with the
  1340. * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
  1341. * fields.
  1342. **/
  1343. void
  1344. lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
  1345. uint32_t ulpstatus, uint32_t ulpWord4)
  1346. {
  1347. struct lpfc_iocbq *piocb;
  1348. while (!list_empty(iocblist)) {
  1349. list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
  1350. if (piocb->cmd_cmpl) {
  1351. if (piocb->cmd_flag & LPFC_IO_NVME) {
  1352. lpfc_nvme_cancel_iocb(phba, piocb,
  1353. ulpstatus, ulpWord4);
  1354. } else {
  1355. if (phba->sli_rev == LPFC_SLI_REV4) {
  1356. bf_set(lpfc_wcqe_c_status,
  1357. &piocb->wcqe_cmpl, ulpstatus);
  1358. piocb->wcqe_cmpl.parameter = ulpWord4;
  1359. } else {
  1360. piocb->iocb.ulpStatus = ulpstatus;
  1361. piocb->iocb.un.ulpWord[4] = ulpWord4;
  1362. }
  1363. (piocb->cmd_cmpl) (phba, piocb, piocb);
  1364. }
  1365. } else {
  1366. lpfc_sli_release_iocbq(phba, piocb);
  1367. }
  1368. }
  1369. return;
  1370. }
  1371. /**
  1372. * lpfc_sli_iocb_cmd_type - Get the iocb type
  1373. * @iocb_cmnd: iocb command code.
  1374. *
  1375. * This function is called by ring event handler function to get the iocb type.
  1376. * This function translates the iocb command to an iocb command type used to
  1377. * decide the final disposition of each completed IOCB.
  1378. * The function returns
  1379. * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
  1380. * LPFC_SOL_IOCB if it is a solicited iocb completion
  1381. * LPFC_ABORT_IOCB if it is an abort iocb
  1382. * LPFC_UNSOL_IOCB if it is an unsolicited iocb
  1383. *
  1384. * The caller is not required to hold any lock.
  1385. **/
  1386. static lpfc_iocb_type
  1387. lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
  1388. {
  1389. lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
  1390. if (iocb_cmnd > CMD_MAX_IOCB_CMD)
  1391. return 0;
  1392. switch (iocb_cmnd) {
  1393. case CMD_XMIT_SEQUENCE_CR:
  1394. case CMD_XMIT_SEQUENCE_CX:
  1395. case CMD_XMIT_BCAST_CN:
  1396. case CMD_XMIT_BCAST_CX:
  1397. case CMD_ELS_REQUEST_CR:
  1398. case CMD_ELS_REQUEST_CX:
  1399. case CMD_CREATE_XRI_CR:
  1400. case CMD_CREATE_XRI_CX:
  1401. case CMD_GET_RPI_CN:
  1402. case CMD_XMIT_ELS_RSP_CX:
  1403. case CMD_GET_RPI_CR:
  1404. case CMD_FCP_IWRITE_CR:
  1405. case CMD_FCP_IWRITE_CX:
  1406. case CMD_FCP_IREAD_CR:
  1407. case CMD_FCP_IREAD_CX:
  1408. case CMD_FCP_ICMND_CR:
  1409. case CMD_FCP_ICMND_CX:
  1410. case CMD_FCP_TSEND_CX:
  1411. case CMD_FCP_TRSP_CX:
  1412. case CMD_FCP_TRECEIVE_CX:
  1413. case CMD_FCP_AUTO_TRSP_CX:
  1414. case CMD_ADAPTER_MSG:
  1415. case CMD_ADAPTER_DUMP:
  1416. case CMD_XMIT_SEQUENCE64_CR:
  1417. case CMD_XMIT_SEQUENCE64_CX:
  1418. case CMD_XMIT_BCAST64_CN:
  1419. case CMD_XMIT_BCAST64_CX:
  1420. case CMD_ELS_REQUEST64_CR:
  1421. case CMD_ELS_REQUEST64_CX:
  1422. case CMD_FCP_IWRITE64_CR:
  1423. case CMD_FCP_IWRITE64_CX:
  1424. case CMD_FCP_IREAD64_CR:
  1425. case CMD_FCP_IREAD64_CX:
  1426. case CMD_FCP_ICMND64_CR:
  1427. case CMD_FCP_ICMND64_CX:
  1428. case CMD_FCP_TSEND64_CX:
  1429. case CMD_FCP_TRSP64_CX:
  1430. case CMD_FCP_TRECEIVE64_CX:
  1431. case CMD_GEN_REQUEST64_CR:
  1432. case CMD_GEN_REQUEST64_CX:
  1433. case CMD_XMIT_ELS_RSP64_CX:
  1434. case DSSCMD_IWRITE64_CR:
  1435. case DSSCMD_IWRITE64_CX:
  1436. case DSSCMD_IREAD64_CR:
  1437. case DSSCMD_IREAD64_CX:
  1438. case CMD_SEND_FRAME:
  1439. type = LPFC_SOL_IOCB;
  1440. break;
  1441. case CMD_ABORT_XRI_CN:
  1442. case CMD_ABORT_XRI_CX:
  1443. case CMD_CLOSE_XRI_CN:
  1444. case CMD_CLOSE_XRI_CX:
  1445. case CMD_XRI_ABORTED_CX:
  1446. case CMD_ABORT_MXRI64_CN:
  1447. case CMD_XMIT_BLS_RSP64_CX:
  1448. type = LPFC_ABORT_IOCB;
  1449. break;
  1450. case CMD_RCV_SEQUENCE_CX:
  1451. case CMD_RCV_ELS_REQ_CX:
  1452. case CMD_RCV_SEQUENCE64_CX:
  1453. case CMD_RCV_ELS_REQ64_CX:
  1454. case CMD_ASYNC_STATUS:
  1455. case CMD_IOCB_RCV_SEQ64_CX:
  1456. case CMD_IOCB_RCV_ELS64_CX:
  1457. case CMD_IOCB_RCV_CONT64_CX:
  1458. case CMD_IOCB_RET_XRI64_CX:
  1459. type = LPFC_UNSOL_IOCB;
  1460. break;
  1461. case CMD_IOCB_XMIT_MSEQ64_CR:
  1462. case CMD_IOCB_XMIT_MSEQ64_CX:
  1463. case CMD_IOCB_RCV_SEQ_LIST64_CX:
  1464. case CMD_IOCB_RCV_ELS_LIST64_CX:
  1465. case CMD_IOCB_CLOSE_EXTENDED_CN:
  1466. case CMD_IOCB_ABORT_EXTENDED_CN:
  1467. case CMD_IOCB_RET_HBQE64_CN:
  1468. case CMD_IOCB_FCP_IBIDIR64_CR:
  1469. case CMD_IOCB_FCP_IBIDIR64_CX:
  1470. case CMD_IOCB_FCP_ITASKMGT64_CX:
  1471. case CMD_IOCB_LOGENTRY_CN:
  1472. case CMD_IOCB_LOGENTRY_ASYNC_CN:
  1473. printk("%s - Unhandled SLI-3 Command x%x\n",
  1474. __func__, iocb_cmnd);
  1475. type = LPFC_UNKNOWN_IOCB;
  1476. break;
  1477. default:
  1478. type = LPFC_UNKNOWN_IOCB;
  1479. break;
  1480. }
  1481. return type;
  1482. }
  1483. /**
  1484. * lpfc_sli_ring_map - Issue config_ring mbox for all rings
  1485. * @phba: Pointer to HBA context object.
  1486. *
  1487. * This function is called from SLI initialization code
  1488. * to configure every ring of the HBA's SLI interface. The
  1489. * caller is not required to hold any lock. This function issues
  1490. * a config_ring mailbox command for each ring.
  1491. * This function returns zero if successful else returns a negative
  1492. * error code.
  1493. **/
  1494. static int
  1495. lpfc_sli_ring_map(struct lpfc_hba *phba)
  1496. {
  1497. struct lpfc_sli *psli = &phba->sli;
  1498. LPFC_MBOXQ_t *pmb;
  1499. MAILBOX_t *pmbox;
  1500. int i, rc, ret = 0;
  1501. pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1502. if (!pmb)
  1503. return -ENOMEM;
  1504. pmbox = &pmb->u.mb;
  1505. phba->link_state = LPFC_INIT_MBX_CMDS;
  1506. for (i = 0; i < psli->num_rings; i++) {
  1507. lpfc_config_ring(phba, i, pmb);
  1508. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  1509. if (rc != MBX_SUCCESS) {
  1510. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  1511. "0446 Adapter failed to init (%d), "
  1512. "mbxCmd x%x CFG_RING, mbxStatus x%x, "
  1513. "ring %d\n",
  1514. rc, pmbox->mbxCommand,
  1515. pmbox->mbxStatus, i);
  1516. phba->link_state = LPFC_HBA_ERROR;
  1517. ret = -ENXIO;
  1518. break;
  1519. }
  1520. }
  1521. mempool_free(pmb, phba->mbox_mem_pool);
  1522. return ret;
  1523. }
  1524. /**
  1525. * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
  1526. * @phba: Pointer to HBA context object.
  1527. * @pring: Pointer to driver SLI ring object.
  1528. * @piocb: Pointer to the driver iocb object.
  1529. *
  1530. * The driver calls this function with the hbalock held for SLI3 ports or
  1531. * the ring lock held for SLI4 ports. The function adds the
  1532. * new iocb to txcmplq of the given ring. This function always returns
  1533. * 0. If this function is called for ELS ring, this function checks if
  1534. * there is a vport associated with the ELS command. This function also
  1535. * starts els_tmofunc timer if this is an ELS command.
  1536. **/
  1537. static int
  1538. lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  1539. struct lpfc_iocbq *piocb)
  1540. {
  1541. u32 ulp_command = 0;
  1542. BUG_ON(!piocb);
  1543. ulp_command = get_job_cmnd(phba, piocb);
  1544. list_add_tail(&piocb->list, &pring->txcmplq);
  1545. piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
  1546. pring->txcmplq_cnt++;
  1547. if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
  1548. (ulp_command != CMD_ABORT_XRI_WQE) &&
  1549. (ulp_command != CMD_ABORT_XRI_CN) &&
  1550. (ulp_command != CMD_CLOSE_XRI_CN)) {
  1551. BUG_ON(!piocb->vport);
  1552. if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag))
  1553. mod_timer(&piocb->vport->els_tmofunc,
  1554. jiffies +
  1555. msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
  1556. }
  1557. return 0;
  1558. }
  1559. /**
  1560. * lpfc_sli_ringtx_get - Get first element of the txq
  1561. * @phba: Pointer to HBA context object.
  1562. * @pring: Pointer to driver SLI ring object.
  1563. *
  1564. * This function is called with hbalock held to get next
  1565. * iocb in txq of the given ring. If there is any iocb in
  1566. * the txq, the function returns first iocb in the list after
  1567. * removing the iocb from the list, else it returns NULL.
  1568. **/
  1569. struct lpfc_iocbq *
  1570. lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
  1571. {
  1572. struct lpfc_iocbq *cmd_iocb;
  1573. lockdep_assert_held(&phba->hbalock);
  1574. list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
  1575. return cmd_iocb;
  1576. }
  1577. /**
  1578. * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
  1579. * @phba: Pointer to HBA context object.
  1580. * @cmdiocb: Pointer to driver command iocb object.
  1581. * @rspiocb: Pointer to driver response iocb object.
  1582. *
  1583. * This routine will inform the driver of any BW adjustments we need
  1584. * to make. These changes will be picked up during the next CMF
  1585. * timer interrupt. In addition, any BW changes will be logged
  1586. * with LOG_CGN_MGMT.
  1587. **/
  1588. static void
  1589. lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  1590. struct lpfc_iocbq *rspiocb)
  1591. {
  1592. union lpfc_wqe128 *wqe;
  1593. uint32_t status, info;
  1594. struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
  1595. uint64_t bw, bwdif, slop;
  1596. uint64_t pcent, bwpcent;
  1597. int asig, afpin, sigcnt, fpincnt;
  1598. int wsigmax, wfpinmax, cg, tdp;
  1599. char *s;
  1600. /* First check for error */
  1601. status = bf_get(lpfc_wcqe_c_status, wcqe);
  1602. if (status) {
  1603. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  1604. "6211 CMF_SYNC_WQE Error "
  1605. "req_tag x%x status x%x hwstatus x%x "
  1606. "tdatap x%x parm x%x\n",
  1607. bf_get(lpfc_wcqe_c_request_tag, wcqe),
  1608. bf_get(lpfc_wcqe_c_status, wcqe),
  1609. bf_get(lpfc_wcqe_c_hw_status, wcqe),
  1610. wcqe->total_data_placed,
  1611. wcqe->parameter);
  1612. goto out;
  1613. }
  1614. /* Gather congestion information on a successful cmpl */
  1615. info = wcqe->parameter;
  1616. phba->cmf_active_info = info;
  1617. /* See if firmware info count is valid or has changed */
  1618. if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
  1619. info = 0;
  1620. else
  1621. phba->cmf_info_per_interval = info;
  1622. tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
  1623. cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
  1624. /* Get BW requirement from firmware */
  1625. bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
  1626. if (!bw) {
  1627. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  1628. "6212 CMF_SYNC_WQE x%x: NULL bw\n",
  1629. bf_get(lpfc_wcqe_c_request_tag, wcqe));
  1630. goto out;
  1631. }
  1632. /* Gather information needed for logging if a BW change is required */
  1633. wqe = &cmdiocb->wqe;
  1634. asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
  1635. afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
  1636. fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
  1637. sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
  1638. if (phba->cmf_max_bytes_per_interval != bw ||
  1639. (asig || afpin || sigcnt || fpincnt)) {
  1640. /* Are we increasing or decreasing BW */
  1641. if (phba->cmf_max_bytes_per_interval < bw) {
  1642. bwdif = bw - phba->cmf_max_bytes_per_interval;
  1643. s = "Increase";
  1644. } else {
  1645. bwdif = phba->cmf_max_bytes_per_interval - bw;
  1646. s = "Decrease";
  1647. }
  1648. /* What is the change percentage */
  1649. slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
  1650. pcent = div64_u64(bwdif * 100 + slop,
  1651. phba->cmf_link_byte_count);
  1652. bwpcent = div64_u64(bw * 100 + slop,
  1653. phba->cmf_link_byte_count);
  1654. /* Because of bytes adjustment due to shorter timer in
  1655. * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and
  1656. * may seem like BW is above 100%.
  1657. */
  1658. if (bwpcent > 100)
  1659. bwpcent = 100;
  1660. if (phba->cmf_max_bytes_per_interval < bw &&
  1661. bwpcent > 95)
  1662. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  1663. "6208 Congestion bandwidth "
  1664. "limits removed\n");
  1665. else if ((phba->cmf_max_bytes_per_interval > bw) &&
  1666. ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95))
  1667. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  1668. "6209 Congestion bandwidth "
  1669. "limits in effect\n");
  1670. if (asig) {
  1671. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  1672. "6237 BW Threshold %lld%% (%lld): "
  1673. "%lld%% %s: Signal Alarm: cg:%d "
  1674. "Info:%u\n",
  1675. bwpcent, bw, pcent, s, cg,
  1676. phba->cmf_active_info);
  1677. } else if (afpin) {
  1678. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  1679. "6238 BW Threshold %lld%% (%lld): "
  1680. "%lld%% %s: FPIN Alarm: cg:%d "
  1681. "Info:%u\n",
  1682. bwpcent, bw, pcent, s, cg,
  1683. phba->cmf_active_info);
  1684. } else if (sigcnt) {
  1685. wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
  1686. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  1687. "6239 BW Threshold %lld%% (%lld): "
  1688. "%lld%% %s: Signal Warning: "
  1689. "Cnt %d Max %d: cg:%d Info:%u\n",
  1690. bwpcent, bw, pcent, s, sigcnt,
  1691. wsigmax, cg, phba->cmf_active_info);
  1692. } else if (fpincnt) {
  1693. wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
  1694. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  1695. "6240 BW Threshold %lld%% (%lld): "
  1696. "%lld%% %s: FPIN Warning: "
  1697. "Cnt %d Max %d: cg:%d Info:%u\n",
  1698. bwpcent, bw, pcent, s, fpincnt,
  1699. wfpinmax, cg, phba->cmf_active_info);
  1700. } else {
  1701. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  1702. "6241 BW Threshold %lld%% (%lld): "
  1703. "CMF %lld%% %s: cg:%d Info:%u\n",
  1704. bwpcent, bw, pcent, s, cg,
  1705. phba->cmf_active_info);
  1706. }
  1707. } else if (info) {
  1708. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  1709. "6246 Info Threshold %u\n", info);
  1710. }
  1711. /* Save BW change to be picked up during next timer interrupt */
  1712. phba->cmf_last_sync_bw = bw;
  1713. out:
  1714. lpfc_sli_release_iocbq(phba, cmdiocb);
  1715. }
  1716. /**
  1717. * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
  1718. * @phba: Pointer to HBA context object.
  1719. * @ms: ms to set in WQE interval, 0 means use init op
  1720. * @total: Total rcv bytes for this interval
  1721. *
  1722. * This routine is called every CMF timer interrupt. Its purpose is
  1723. * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
  1724. * that may indicate we have congestion (FPINs or Signals). Upon
  1725. * completion, the firmware will indicate any BW restrictions the
  1726. * driver may need to take.
  1727. **/
  1728. int
  1729. lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
  1730. {
  1731. union lpfc_wqe128 *wqe;
  1732. struct lpfc_iocbq *sync_buf;
  1733. unsigned long iflags;
  1734. u32 ret_val;
  1735. u32 atot, wtot, max;
  1736. u8 warn_sync_period = 0;
  1737. /* First address any alarm / warning activity */
  1738. atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
  1739. wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
  1740. spin_lock_irqsave(&phba->hbalock, iflags);
  1741. /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
  1742. if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
  1743. phba->link_state < LPFC_LINK_UP) {
  1744. ret_val = 0;
  1745. goto out_unlock;
  1746. }
  1747. sync_buf = __lpfc_sli_get_iocbq(phba);
  1748. if (!sync_buf) {
  1749. lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
  1750. "6244 No available WQEs for CMF_SYNC_WQE\n");
  1751. ret_val = ENOMEM;
  1752. goto out_unlock;
  1753. }
  1754. wqe = &sync_buf->wqe;
  1755. /* WQEs are reused. Clear stale data and set key fields to zero */
  1756. memset(wqe, 0, sizeof(*wqe));
  1757. /* If this is the very first CMF_SYNC_WQE, issue an init operation */
  1758. if (!ms) {
  1759. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  1760. "6441 CMF Init %d - CMF_SYNC_WQE\n",
  1761. phba->fc_eventTag);
  1762. bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
  1763. bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
  1764. goto initpath;
  1765. }
  1766. bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
  1767. bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
  1768. /* Check for alarms / warnings */
  1769. if (atot) {
  1770. if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
  1771. /* We hit an Signal alarm condition */
  1772. bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
  1773. } else {
  1774. /* We hit a FPIN alarm condition */
  1775. bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
  1776. }
  1777. } else if (wtot) {
  1778. if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
  1779. phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
  1780. /* We hit an Signal warning condition */
  1781. max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
  1782. lpfc_acqe_cgn_frequency;
  1783. bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
  1784. bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
  1785. warn_sync_period = lpfc_acqe_cgn_frequency;
  1786. } else {
  1787. /* We hit a FPIN warning condition */
  1788. bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
  1789. bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
  1790. if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ)
  1791. warn_sync_period =
  1792. LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency);
  1793. }
  1794. }
  1795. /* Update total read blocks during previous timer interval */
  1796. wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
  1797. initpath:
  1798. bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
  1799. wqe->cmf_sync.event_tag = phba->fc_eventTag;
  1800. bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
  1801. /* Setup reqtag to match the wqe completion. */
  1802. bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
  1803. bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
  1804. bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period);
  1805. bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
  1806. bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
  1807. bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
  1808. sync_buf->vport = phba->pport;
  1809. sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
  1810. sync_buf->cmd_dmabuf = NULL;
  1811. sync_buf->rsp_dmabuf = NULL;
  1812. sync_buf->bpl_dmabuf = NULL;
  1813. sync_buf->sli4_xritag = NO_XRI;
  1814. sync_buf->cmd_flag |= LPFC_IO_CMF;
  1815. ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
  1816. if (ret_val) {
  1817. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  1818. "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
  1819. ret_val);
  1820. __lpfc_sli_release_iocbq(phba, sync_buf);
  1821. }
  1822. out_unlock:
  1823. spin_unlock_irqrestore(&phba->hbalock, iflags);
  1824. return ret_val;
  1825. }
  1826. /**
  1827. * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
  1828. * @phba: Pointer to HBA context object.
  1829. * @pring: Pointer to driver SLI ring object.
  1830. *
  1831. * This function is called with hbalock held and the caller must post the
  1832. * iocb without releasing the lock. If the caller releases the lock,
  1833. * iocb slot returned by the function is not guaranteed to be available.
  1834. * The function returns pointer to the next available iocb slot if there
  1835. * is available slot in the ring, else it returns NULL.
  1836. * If the get index of the ring is ahead of the put index, the function
  1837. * will post an error attention event to the worker thread to take the
  1838. * HBA to offline state.
  1839. **/
  1840. static IOCB_t *
  1841. lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
  1842. {
  1843. struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
  1844. uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
  1845. lockdep_assert_held(&phba->hbalock);
  1846. if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
  1847. (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
  1848. pring->sli.sli3.next_cmdidx = 0;
  1849. if (unlikely(pring->sli.sli3.local_getidx ==
  1850. pring->sli.sli3.next_cmdidx)) {
  1851. pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
  1852. if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
  1853. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  1854. "0315 Ring %d issue: portCmdGet %d "
  1855. "is bigger than cmd ring %d\n",
  1856. pring->ringno,
  1857. pring->sli.sli3.local_getidx,
  1858. max_cmd_idx);
  1859. phba->link_state = LPFC_HBA_ERROR;
  1860. /*
  1861. * All error attention handlers are posted to
  1862. * worker thread
  1863. */
  1864. phba->work_ha |= HA_ERATT;
  1865. phba->work_hs = HS_FFER3;
  1866. lpfc_worker_wake_up(phba);
  1867. return NULL;
  1868. }
  1869. if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
  1870. return NULL;
  1871. }
  1872. return lpfc_cmd_iocb(phba, pring);
  1873. }
  1874. /**
  1875. * lpfc_sli_next_iotag - Get an iotag for the iocb
  1876. * @phba: Pointer to HBA context object.
  1877. * @iocbq: Pointer to driver iocb object.
  1878. *
  1879. * This function gets an iotag for the iocb. If there is no unused iotag and
  1880. * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
  1881. * array and assigns a new iotag.
  1882. * The function returns the allocated iotag if successful, else returns zero.
  1883. * Zero is not a valid iotag.
  1884. * The caller is not required to hold any lock.
  1885. **/
  1886. uint16_t
  1887. lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
  1888. {
  1889. struct lpfc_iocbq **new_arr;
  1890. struct lpfc_iocbq **old_arr;
  1891. size_t new_len;
  1892. struct lpfc_sli *psli = &phba->sli;
  1893. uint16_t iotag;
  1894. spin_lock_irq(&phba->hbalock);
  1895. iotag = psli->last_iotag;
  1896. if(++iotag < psli->iocbq_lookup_len) {
  1897. psli->last_iotag = iotag;
  1898. psli->iocbq_lookup[iotag] = iocbq;
  1899. spin_unlock_irq(&phba->hbalock);
  1900. iocbq->iotag = iotag;
  1901. return iotag;
  1902. } else if (psli->iocbq_lookup_len < (0xffff
  1903. - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
  1904. new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
  1905. spin_unlock_irq(&phba->hbalock);
  1906. new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
  1907. GFP_KERNEL);
  1908. if (new_arr) {
  1909. spin_lock_irq(&phba->hbalock);
  1910. old_arr = psli->iocbq_lookup;
  1911. if (new_len <= psli->iocbq_lookup_len) {
  1912. /* highly unprobable case */
  1913. kfree(new_arr);
  1914. iotag = psli->last_iotag;
  1915. if(++iotag < psli->iocbq_lookup_len) {
  1916. psli->last_iotag = iotag;
  1917. psli->iocbq_lookup[iotag] = iocbq;
  1918. spin_unlock_irq(&phba->hbalock);
  1919. iocbq->iotag = iotag;
  1920. return iotag;
  1921. }
  1922. spin_unlock_irq(&phba->hbalock);
  1923. return 0;
  1924. }
  1925. if (psli->iocbq_lookup)
  1926. memcpy(new_arr, old_arr,
  1927. ((psli->last_iotag + 1) *
  1928. sizeof (struct lpfc_iocbq *)));
  1929. psli->iocbq_lookup = new_arr;
  1930. psli->iocbq_lookup_len = new_len;
  1931. psli->last_iotag = iotag;
  1932. psli->iocbq_lookup[iotag] = iocbq;
  1933. spin_unlock_irq(&phba->hbalock);
  1934. iocbq->iotag = iotag;
  1935. kfree(old_arr);
  1936. return iotag;
  1937. }
  1938. } else
  1939. spin_unlock_irq(&phba->hbalock);
  1940. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  1941. "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
  1942. psli->last_iotag);
  1943. return 0;
  1944. }
  1945. /**
  1946. * lpfc_sli_submit_iocb - Submit an iocb to the firmware
  1947. * @phba: Pointer to HBA context object.
  1948. * @pring: Pointer to driver SLI ring object.
  1949. * @iocb: Pointer to iocb slot in the ring.
  1950. * @nextiocb: Pointer to driver iocb object which need to be
  1951. * posted to firmware.
  1952. *
  1953. * This function is called to post a new iocb to the firmware. This
  1954. * function copies the new iocb to ring iocb slot and updates the
  1955. * ring pointers. It adds the new iocb to txcmplq if there is
  1956. * a completion call back for this iocb else the function will free the
  1957. * iocb object. The hbalock is asserted held in the code path calling
  1958. * this routine.
  1959. **/
  1960. static void
  1961. lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  1962. IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
  1963. {
  1964. /*
  1965. * Set up an iotag
  1966. */
  1967. nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
  1968. if (pring->ringno == LPFC_ELS_RING) {
  1969. lpfc_debugfs_slow_ring_trc(phba,
  1970. "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
  1971. *(((uint32_t *) &nextiocb->iocb) + 4),
  1972. *(((uint32_t *) &nextiocb->iocb) + 6),
  1973. *(((uint32_t *) &nextiocb->iocb) + 7));
  1974. }
  1975. /*
  1976. * Issue iocb command to adapter
  1977. */
  1978. lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
  1979. wmb();
  1980. pring->stats.iocb_cmd++;
  1981. /*
  1982. * If there is no completion routine to call, we can release the
  1983. * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
  1984. * that have no rsp ring completion, cmd_cmpl MUST be NULL.
  1985. */
  1986. if (nextiocb->cmd_cmpl)
  1987. lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
  1988. else
  1989. __lpfc_sli_release_iocbq(phba, nextiocb);
  1990. /*
  1991. * Let the HBA know what IOCB slot will be the next one the
  1992. * driver will put a command into.
  1993. */
  1994. pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
  1995. writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
  1996. }
  1997. /**
  1998. * lpfc_sli_update_full_ring - Update the chip attention register
  1999. * @phba: Pointer to HBA context object.
  2000. * @pring: Pointer to driver SLI ring object.
  2001. *
  2002. * The caller is not required to hold any lock for calling this function.
  2003. * This function updates the chip attention bits for the ring to inform firmware
  2004. * that there are pending work to be done for this ring and requests an
  2005. * interrupt when there is space available in the ring. This function is
  2006. * called when the driver is unable to post more iocbs to the ring due
  2007. * to unavailability of space in the ring.
  2008. **/
  2009. static void
  2010. lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
  2011. {
  2012. int ringno = pring->ringno;
  2013. pring->flag |= LPFC_CALL_RING_AVAILABLE;
  2014. wmb();
  2015. /*
  2016. * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
  2017. * The HBA will tell us when an IOCB entry is available.
  2018. */
  2019. writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
  2020. readl(phba->CAregaddr); /* flush */
  2021. pring->stats.iocb_cmd_full++;
  2022. }
  2023. /**
  2024. * lpfc_sli_update_ring - Update chip attention register
  2025. * @phba: Pointer to HBA context object.
  2026. * @pring: Pointer to driver SLI ring object.
  2027. *
  2028. * This function updates the chip attention register bit for the
  2029. * given ring to inform HBA that there is more work to be done
  2030. * in this ring. The caller is not required to hold any lock.
  2031. **/
  2032. static void
  2033. lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
  2034. {
  2035. int ringno = pring->ringno;
  2036. /*
  2037. * Tell the HBA that there is work to do in this ring.
  2038. */
  2039. if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
  2040. wmb();
  2041. writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
  2042. readl(phba->CAregaddr); /* flush */
  2043. }
  2044. }
  2045. /**
  2046. * lpfc_sli_resume_iocb - Process iocbs in the txq
  2047. * @phba: Pointer to HBA context object.
  2048. * @pring: Pointer to driver SLI ring object.
  2049. *
  2050. * This function is called with hbalock held to post pending iocbs
  2051. * in the txq to the firmware. This function is called when driver
  2052. * detects space available in the ring.
  2053. **/
  2054. static void
  2055. lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
  2056. {
  2057. IOCB_t *iocb;
  2058. struct lpfc_iocbq *nextiocb;
  2059. lockdep_assert_held(&phba->hbalock);
  2060. /*
  2061. * Check to see if:
  2062. * (a) there is anything on the txq to send
  2063. * (b) link is up
  2064. * (c) link attention events can be processed (fcp ring only)
  2065. * (d) IOCB processing is not blocked by the outstanding mbox command.
  2066. */
  2067. if (lpfc_is_link_up(phba) &&
  2068. (!list_empty(&pring->txq)) &&
  2069. (pring->ringno != LPFC_FCP_RING ||
  2070. phba->sli.sli_flag & LPFC_PROCESS_LA)) {
  2071. while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
  2072. (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
  2073. lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
  2074. if (iocb)
  2075. lpfc_sli_update_ring(phba, pring);
  2076. else
  2077. lpfc_sli_update_full_ring(phba, pring);
  2078. }
  2079. return;
  2080. }
  2081. /**
  2082. * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
  2083. * @phba: Pointer to HBA context object.
  2084. * @hbqno: HBQ number.
  2085. *
  2086. * This function is called with hbalock held to get the next
  2087. * available slot for the given HBQ. If there is free slot
  2088. * available for the HBQ it will return pointer to the next available
  2089. * HBQ entry else it will return NULL.
  2090. **/
  2091. static struct lpfc_hbq_entry *
  2092. lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
  2093. {
  2094. struct hbq_s *hbqp = &phba->hbqs[hbqno];
  2095. lockdep_assert_held(&phba->hbalock);
  2096. if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
  2097. ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
  2098. hbqp->next_hbqPutIdx = 0;
  2099. if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
  2100. uint32_t raw_index = phba->hbq_get[hbqno];
  2101. uint32_t getidx = le32_to_cpu(raw_index);
  2102. hbqp->local_hbqGetIdx = getidx;
  2103. if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
  2104. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  2105. "1802 HBQ %d: local_hbqGetIdx "
  2106. "%u is > than hbqp->entry_count %u\n",
  2107. hbqno, hbqp->local_hbqGetIdx,
  2108. hbqp->entry_count);
  2109. phba->link_state = LPFC_HBA_ERROR;
  2110. return NULL;
  2111. }
  2112. if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
  2113. return NULL;
  2114. }
  2115. return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
  2116. hbqp->hbqPutIdx;
  2117. }
  2118. /**
  2119. * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
  2120. * @phba: Pointer to HBA context object.
  2121. *
  2122. * This function is called with no lock held to free all the
  2123. * hbq buffers while uninitializing the SLI interface. It also
  2124. * frees the HBQ buffers returned by the firmware but not yet
  2125. * processed by the upper layers.
  2126. **/
  2127. void
  2128. lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
  2129. {
  2130. struct lpfc_dmabuf *dmabuf, *next_dmabuf;
  2131. struct hbq_dmabuf *hbq_buf;
  2132. unsigned long flags;
  2133. int i, hbq_count;
  2134. hbq_count = lpfc_sli_hbq_count();
  2135. /* Return all memory used by all HBQs */
  2136. spin_lock_irqsave(&phba->hbalock, flags);
  2137. for (i = 0; i < hbq_count; ++i) {
  2138. list_for_each_entry_safe(dmabuf, next_dmabuf,
  2139. &phba->hbqs[i].hbq_buffer_list, list) {
  2140. hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
  2141. list_del(&hbq_buf->dbuf.list);
  2142. (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
  2143. }
  2144. phba->hbqs[i].buffer_count = 0;
  2145. }
  2146. /* Mark the HBQs not in use */
  2147. phba->hbq_in_use = 0;
  2148. spin_unlock_irqrestore(&phba->hbalock, flags);
  2149. }
  2150. /**
  2151. * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
  2152. * @phba: Pointer to HBA context object.
  2153. * @hbqno: HBQ number.
  2154. * @hbq_buf: Pointer to HBQ buffer.
  2155. *
  2156. * This function is called with the hbalock held to post a
  2157. * hbq buffer to the firmware. If the function finds an empty
  2158. * slot in the HBQ, it will post the buffer. The function will return
  2159. * pointer to the hbq entry if it successfully post the buffer
  2160. * else it will return NULL.
  2161. **/
  2162. static int
  2163. lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
  2164. struct hbq_dmabuf *hbq_buf)
  2165. {
  2166. lockdep_assert_held(&phba->hbalock);
  2167. return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
  2168. }
  2169. /**
  2170. * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
  2171. * @phba: Pointer to HBA context object.
  2172. * @hbqno: HBQ number.
  2173. * @hbq_buf: Pointer to HBQ buffer.
  2174. *
  2175. * This function is called with the hbalock held to post a hbq buffer to the
  2176. * firmware. If the function finds an empty slot in the HBQ, it will post the
  2177. * buffer and place it on the hbq_buffer_list. The function will return zero if
  2178. * it successfully post the buffer else it will return an error.
  2179. **/
  2180. static int
  2181. lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
  2182. struct hbq_dmabuf *hbq_buf)
  2183. {
  2184. struct lpfc_hbq_entry *hbqe;
  2185. dma_addr_t physaddr = hbq_buf->dbuf.phys;
  2186. lockdep_assert_held(&phba->hbalock);
  2187. /* Get next HBQ entry slot to use */
  2188. hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
  2189. if (hbqe) {
  2190. struct hbq_s *hbqp = &phba->hbqs[hbqno];
  2191. hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
  2192. hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
  2193. hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
  2194. hbqe->bde.tus.f.bdeFlags = 0;
  2195. hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
  2196. hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
  2197. /* Sync SLIM */
  2198. hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
  2199. writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
  2200. /* flush */
  2201. readl(phba->hbq_put + hbqno);
  2202. list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
  2203. return 0;
  2204. } else
  2205. return -ENOMEM;
  2206. }
  2207. /**
  2208. * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
  2209. * @phba: Pointer to HBA context object.
  2210. * @hbqno: HBQ number.
  2211. * @hbq_buf: Pointer to HBQ buffer.
  2212. *
  2213. * This function is called with the hbalock held to post an RQE to the SLI4
  2214. * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
  2215. * the hbq_buffer_list and return zero, otherwise it will return an error.
  2216. **/
  2217. static int
  2218. lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
  2219. struct hbq_dmabuf *hbq_buf)
  2220. {
  2221. int rc;
  2222. struct lpfc_rqe hrqe;
  2223. struct lpfc_rqe drqe;
  2224. struct lpfc_queue *hrq;
  2225. struct lpfc_queue *drq;
  2226. if (hbqno != LPFC_ELS_HBQ)
  2227. return 1;
  2228. hrq = phba->sli4_hba.hdr_rq;
  2229. drq = phba->sli4_hba.dat_rq;
  2230. lockdep_assert_held(&phba->hbalock);
  2231. hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
  2232. hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
  2233. drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
  2234. drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
  2235. rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
  2236. if (rc < 0)
  2237. return rc;
  2238. hbq_buf->tag = (rc | (hbqno << 16));
  2239. list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
  2240. return 0;
  2241. }
  2242. /* HBQ for ELS and CT traffic. */
  2243. static struct lpfc_hbq_init lpfc_els_hbq = {
  2244. .rn = 1,
  2245. .entry_count = 256,
  2246. .mask_count = 0,
  2247. .profile = 0,
  2248. .ring_mask = (1 << LPFC_ELS_RING),
  2249. .buffer_count = 0,
  2250. .init_count = 40,
  2251. .add_count = 40,
  2252. };
  2253. /* Array of HBQs */
  2254. struct lpfc_hbq_init *lpfc_hbq_defs[] = {
  2255. &lpfc_els_hbq,
  2256. };
  2257. /**
  2258. * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
  2259. * @phba: Pointer to HBA context object.
  2260. * @hbqno: HBQ number.
  2261. * @count: Number of HBQ buffers to be posted.
  2262. *
  2263. * This function is called with no lock held to post more hbq buffers to the
  2264. * given HBQ. The function returns the number of HBQ buffers successfully
  2265. * posted.
  2266. **/
  2267. static int
  2268. lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
  2269. {
  2270. uint32_t i, posted = 0;
  2271. unsigned long flags;
  2272. struct hbq_dmabuf *hbq_buffer;
  2273. LIST_HEAD(hbq_buf_list);
  2274. if (!phba->hbqs[hbqno].hbq_alloc_buffer)
  2275. return 0;
  2276. if ((phba->hbqs[hbqno].buffer_count + count) >
  2277. lpfc_hbq_defs[hbqno]->entry_count)
  2278. count = lpfc_hbq_defs[hbqno]->entry_count -
  2279. phba->hbqs[hbqno].buffer_count;
  2280. if (!count)
  2281. return 0;
  2282. /* Allocate HBQ entries */
  2283. for (i = 0; i < count; i++) {
  2284. hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
  2285. if (!hbq_buffer)
  2286. break;
  2287. list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
  2288. }
  2289. /* Check whether HBQ is still in use */
  2290. spin_lock_irqsave(&phba->hbalock, flags);
  2291. if (!phba->hbq_in_use)
  2292. goto err;
  2293. while (!list_empty(&hbq_buf_list)) {
  2294. list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
  2295. dbuf.list);
  2296. hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
  2297. (hbqno << 16));
  2298. if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
  2299. phba->hbqs[hbqno].buffer_count++;
  2300. posted++;
  2301. } else
  2302. (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
  2303. }
  2304. spin_unlock_irqrestore(&phba->hbalock, flags);
  2305. return posted;
  2306. err:
  2307. spin_unlock_irqrestore(&phba->hbalock, flags);
  2308. while (!list_empty(&hbq_buf_list)) {
  2309. list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
  2310. dbuf.list);
  2311. (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
  2312. }
  2313. return 0;
  2314. }
  2315. /**
  2316. * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
  2317. * @phba: Pointer to HBA context object.
  2318. * @qno: HBQ number.
  2319. *
  2320. * This function posts more buffers to the HBQ. This function
  2321. * is called with no lock held. The function returns the number of HBQ entries
  2322. * successfully allocated.
  2323. **/
  2324. int
  2325. lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
  2326. {
  2327. if (phba->sli_rev == LPFC_SLI_REV4)
  2328. return 0;
  2329. else
  2330. return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
  2331. lpfc_hbq_defs[qno]->add_count);
  2332. }
  2333. /**
  2334. * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
  2335. * @phba: Pointer to HBA context object.
  2336. * @qno: HBQ queue number.
  2337. *
  2338. * This function is called from SLI initialization code path with
  2339. * no lock held to post initial HBQ buffers to firmware. The
  2340. * function returns the number of HBQ entries successfully allocated.
  2341. **/
  2342. static int
  2343. lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
  2344. {
  2345. if (phba->sli_rev == LPFC_SLI_REV4)
  2346. return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
  2347. lpfc_hbq_defs[qno]->entry_count);
  2348. else
  2349. return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
  2350. lpfc_hbq_defs[qno]->init_count);
  2351. }
  2352. /*
  2353. * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
  2354. *
  2355. * This function removes the first hbq buffer on an hbq list and returns a
  2356. * pointer to that buffer. If it finds no buffers on the list it returns NULL.
  2357. **/
  2358. static struct hbq_dmabuf *
  2359. lpfc_sli_hbqbuf_get(struct list_head *rb_list)
  2360. {
  2361. struct lpfc_dmabuf *d_buf;
  2362. list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
  2363. if (!d_buf)
  2364. return NULL;
  2365. return container_of(d_buf, struct hbq_dmabuf, dbuf);
  2366. }
  2367. /**
  2368. * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
  2369. * @phba: Pointer to HBA context object.
  2370. * @hrq: HBQ number.
  2371. *
  2372. * This function removes the first RQ buffer on an RQ buffer list and returns a
  2373. * pointer to that buffer. If it finds no buffers on the list it returns NULL.
  2374. **/
  2375. static struct rqb_dmabuf *
  2376. lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
  2377. {
  2378. struct lpfc_dmabuf *h_buf;
  2379. struct lpfc_rqb *rqbp;
  2380. rqbp = hrq->rqbp;
  2381. list_remove_head(&rqbp->rqb_buffer_list, h_buf,
  2382. struct lpfc_dmabuf, list);
  2383. if (!h_buf)
  2384. return NULL;
  2385. rqbp->buffer_count--;
  2386. return container_of(h_buf, struct rqb_dmabuf, hbuf);
  2387. }
  2388. /**
  2389. * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
  2390. * @phba: Pointer to HBA context object.
  2391. * @tag: Tag of the hbq buffer.
  2392. *
  2393. * This function searches for the hbq buffer associated with the given tag in
  2394. * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
  2395. * otherwise it returns NULL.
  2396. **/
  2397. static struct hbq_dmabuf *
  2398. lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
  2399. {
  2400. struct lpfc_dmabuf *d_buf;
  2401. struct hbq_dmabuf *hbq_buf;
  2402. uint32_t hbqno;
  2403. hbqno = tag >> 16;
  2404. if (hbqno >= LPFC_MAX_HBQS)
  2405. return NULL;
  2406. spin_lock_irq(&phba->hbalock);
  2407. list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
  2408. hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
  2409. if (hbq_buf->tag == tag) {
  2410. spin_unlock_irq(&phba->hbalock);
  2411. return hbq_buf;
  2412. }
  2413. }
  2414. spin_unlock_irq(&phba->hbalock);
  2415. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  2416. "1803 Bad hbq tag. Data: x%x x%x\n",
  2417. tag, phba->hbqs[tag >> 16].buffer_count);
  2418. return NULL;
  2419. }
  2420. /**
  2421. * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
  2422. * @phba: Pointer to HBA context object.
  2423. * @hbq_buffer: Pointer to HBQ buffer.
  2424. *
  2425. * This function is called with hbalock. This function gives back
  2426. * the hbq buffer to firmware. If the HBQ does not have space to
  2427. * post the buffer, it will free the buffer.
  2428. **/
  2429. void
  2430. lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
  2431. {
  2432. uint32_t hbqno;
  2433. if (hbq_buffer) {
  2434. hbqno = hbq_buffer->tag >> 16;
  2435. if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
  2436. (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
  2437. }
  2438. }
  2439. /**
  2440. * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
  2441. * @mbxCommand: mailbox command code.
  2442. *
  2443. * This function is called by the mailbox event handler function to verify
  2444. * that the completed mailbox command is a legitimate mailbox command. If the
  2445. * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
  2446. * and the mailbox event handler will take the HBA offline.
  2447. **/
  2448. static int
  2449. lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
  2450. {
  2451. uint8_t ret;
  2452. switch (mbxCommand) {
  2453. case MBX_LOAD_SM:
  2454. case MBX_READ_NV:
  2455. case MBX_WRITE_NV:
  2456. case MBX_WRITE_VPARMS:
  2457. case MBX_RUN_BIU_DIAG:
  2458. case MBX_INIT_LINK:
  2459. case MBX_DOWN_LINK:
  2460. case MBX_CONFIG_LINK:
  2461. case MBX_CONFIG_RING:
  2462. case MBX_RESET_RING:
  2463. case MBX_READ_CONFIG:
  2464. case MBX_READ_RCONFIG:
  2465. case MBX_READ_SPARM:
  2466. case MBX_READ_STATUS:
  2467. case MBX_READ_RPI:
  2468. case MBX_READ_XRI:
  2469. case MBX_READ_REV:
  2470. case MBX_READ_LNK_STAT:
  2471. case MBX_REG_LOGIN:
  2472. case MBX_UNREG_LOGIN:
  2473. case MBX_CLEAR_LA:
  2474. case MBX_DUMP_MEMORY:
  2475. case MBX_DUMP_CONTEXT:
  2476. case MBX_RUN_DIAGS:
  2477. case MBX_RESTART:
  2478. case MBX_UPDATE_CFG:
  2479. case MBX_DOWN_LOAD:
  2480. case MBX_DEL_LD_ENTRY:
  2481. case MBX_RUN_PROGRAM:
  2482. case MBX_SET_MASK:
  2483. case MBX_SET_VARIABLE:
  2484. case MBX_UNREG_D_ID:
  2485. case MBX_KILL_BOARD:
  2486. case MBX_CONFIG_FARP:
  2487. case MBX_BEACON:
  2488. case MBX_LOAD_AREA:
  2489. case MBX_RUN_BIU_DIAG64:
  2490. case MBX_CONFIG_PORT:
  2491. case MBX_READ_SPARM64:
  2492. case MBX_READ_RPI64:
  2493. case MBX_REG_LOGIN64:
  2494. case MBX_READ_TOPOLOGY:
  2495. case MBX_WRITE_WWN:
  2496. case MBX_SET_DEBUG:
  2497. case MBX_LOAD_EXP_ROM:
  2498. case MBX_ASYNCEVT_ENABLE:
  2499. case MBX_REG_VPI:
  2500. case MBX_UNREG_VPI:
  2501. case MBX_HEARTBEAT:
  2502. case MBX_PORT_CAPABILITIES:
  2503. case MBX_PORT_IOV_CONTROL:
  2504. case MBX_SLI4_CONFIG:
  2505. case MBX_SLI4_REQ_FTRS:
  2506. case MBX_REG_FCFI:
  2507. case MBX_UNREG_FCFI:
  2508. case MBX_REG_VFI:
  2509. case MBX_UNREG_VFI:
  2510. case MBX_INIT_VPI:
  2511. case MBX_INIT_VFI:
  2512. case MBX_RESUME_RPI:
  2513. case MBX_READ_EVENT_LOG_STATUS:
  2514. case MBX_READ_EVENT_LOG:
  2515. case MBX_SECURITY_MGMT:
  2516. case MBX_AUTH_PORT:
  2517. case MBX_ACCESS_VDATA:
  2518. ret = mbxCommand;
  2519. break;
  2520. default:
  2521. ret = MBX_SHUTDOWN;
  2522. break;
  2523. }
  2524. return ret;
  2525. }
  2526. /**
  2527. * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
  2528. * @phba: Pointer to HBA context object.
  2529. * @pmboxq: Pointer to mailbox command.
  2530. *
  2531. * This is completion handler function for mailbox commands issued from
  2532. * lpfc_sli_issue_mbox_wait function. This function is called by the
  2533. * mailbox event handler function with no lock held. This function
  2534. * will wake up thread waiting on the wait queue pointed by context1
  2535. * of the mailbox.
  2536. **/
  2537. void
  2538. lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
  2539. {
  2540. unsigned long drvr_flag;
  2541. struct completion *pmbox_done;
  2542. /*
  2543. * If pmbox_done is empty, the driver thread gave up waiting and
  2544. * continued running.
  2545. */
  2546. pmboxq->mbox_flag |= LPFC_MBX_WAKE;
  2547. spin_lock_irqsave(&phba->hbalock, drvr_flag);
  2548. pmbox_done = pmboxq->ctx_u.mbox_wait;
  2549. if (pmbox_done)
  2550. complete(pmbox_done);
  2551. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  2552. return;
  2553. }
  2554. static void
  2555. __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
  2556. {
  2557. unsigned long iflags;
  2558. if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
  2559. lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
  2560. spin_lock_irqsave(&ndlp->lock, iflags);
  2561. ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
  2562. ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
  2563. spin_unlock_irqrestore(&ndlp->lock, iflags);
  2564. }
  2565. ndlp->nlp_flag &= ~NLP_UNREG_INP;
  2566. }
  2567. void
  2568. lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
  2569. {
  2570. __lpfc_sli_rpi_release(vport, ndlp);
  2571. }
  2572. /**
  2573. * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
  2574. * @phba: Pointer to HBA context object.
  2575. * @pmb: Pointer to mailbox object.
  2576. *
  2577. * This function is the default mailbox completion handler. It
  2578. * frees the memory resources associated with the completed mailbox
  2579. * command. If the completed command is a REG_LOGIN mailbox command,
  2580. * this function will issue a UREG_LOGIN to re-claim the RPI.
  2581. **/
  2582. void
  2583. lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
  2584. {
  2585. struct lpfc_vport *vport = pmb->vport;
  2586. struct lpfc_dmabuf *mp;
  2587. struct lpfc_nodelist *ndlp;
  2588. struct Scsi_Host *shost;
  2589. uint16_t rpi, vpi;
  2590. int rc;
  2591. /*
  2592. * If a REG_LOGIN succeeded after node is destroyed or node
  2593. * is in re-discovery driver need to cleanup the RPI.
  2594. */
  2595. if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
  2596. pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
  2597. !pmb->u.mb.mbxStatus) {
  2598. mp = pmb->ctx_buf;
  2599. if (mp) {
  2600. pmb->ctx_buf = NULL;
  2601. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  2602. kfree(mp);
  2603. }
  2604. rpi = pmb->u.mb.un.varWords[0];
  2605. vpi = pmb->u.mb.un.varRegLogin.vpi;
  2606. if (phba->sli_rev == LPFC_SLI_REV4)
  2607. vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
  2608. lpfc_unreg_login(phba, vpi, rpi, pmb);
  2609. pmb->vport = vport;
  2610. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  2611. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  2612. if (rc != MBX_NOT_FINISHED)
  2613. return;
  2614. }
  2615. if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
  2616. !test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
  2617. !pmb->u.mb.mbxStatus) {
  2618. shost = lpfc_shost_from_vport(vport);
  2619. spin_lock_irq(shost->host_lock);
  2620. vport->vpi_state |= LPFC_VPI_REGISTERED;
  2621. spin_unlock_irq(shost->host_lock);
  2622. clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
  2623. }
  2624. if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
  2625. ndlp = pmb->ctx_ndlp;
  2626. lpfc_nlp_put(ndlp);
  2627. }
  2628. if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
  2629. ndlp = pmb->ctx_ndlp;
  2630. /* Check to see if there are any deferred events to process */
  2631. if (ndlp) {
  2632. lpfc_printf_vlog(
  2633. vport,
  2634. KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
  2635. "1438 UNREG cmpl deferred mbox x%x "
  2636. "on NPort x%x Data: x%x x%x x%px x%lx x%x\n",
  2637. ndlp->nlp_rpi, ndlp->nlp_DID,
  2638. ndlp->nlp_flag, ndlp->nlp_defer_did,
  2639. ndlp, vport->load_flag, kref_read(&ndlp->kref));
  2640. if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
  2641. (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
  2642. ndlp->nlp_flag &= ~NLP_UNREG_INP;
  2643. ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
  2644. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  2645. } else {
  2646. __lpfc_sli_rpi_release(vport, ndlp);
  2647. }
  2648. /* The unreg_login mailbox is complete and had a
  2649. * reference that has to be released. The PLOGI
  2650. * got its own ref.
  2651. */
  2652. lpfc_nlp_put(ndlp);
  2653. pmb->ctx_ndlp = NULL;
  2654. }
  2655. }
  2656. /* This nlp_put pairs with lpfc_sli4_resume_rpi */
  2657. if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
  2658. ndlp = pmb->ctx_ndlp;
  2659. lpfc_nlp_put(ndlp);
  2660. }
  2661. /* Check security permission status on INIT_LINK mailbox command */
  2662. if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
  2663. (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
  2664. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  2665. "2860 SLI authentication is required "
  2666. "for INIT_LINK but has not done yet\n");
  2667. if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
  2668. lpfc_sli4_mbox_cmd_free(phba, pmb);
  2669. else
  2670. lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
  2671. }
  2672. /**
  2673. * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
  2674. * @phba: Pointer to HBA context object.
  2675. * @pmb: Pointer to mailbox object.
  2676. *
  2677. * This function is the unreg rpi mailbox completion handler. It
  2678. * frees the memory resources associated with the completed mailbox
  2679. * command. An additional reference is put on the ndlp to prevent
  2680. * lpfc_nlp_release from freeing the rpi bit in the bitmask before
  2681. * the unreg mailbox command completes, this routine puts the
  2682. * reference back.
  2683. *
  2684. **/
  2685. void
  2686. lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
  2687. {
  2688. struct lpfc_vport *vport = pmb->vport;
  2689. struct lpfc_nodelist *ndlp;
  2690. ndlp = pmb->ctx_ndlp;
  2691. if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
  2692. if (phba->sli_rev == LPFC_SLI_REV4 &&
  2693. (bf_get(lpfc_sli_intf_if_type,
  2694. &phba->sli4_hba.sli_intf) >=
  2695. LPFC_SLI_INTF_IF_TYPE_2)) {
  2696. if (ndlp) {
  2697. lpfc_printf_vlog(
  2698. vport, KERN_INFO,
  2699. LOG_MBOX | LOG_SLI | LOG_NODE,
  2700. "0010 UNREG_LOGIN vpi:x%x "
  2701. "rpi:%x DID:%x defer x%x flg x%x "
  2702. "x%px\n",
  2703. vport->vpi, ndlp->nlp_rpi,
  2704. ndlp->nlp_DID, ndlp->nlp_defer_did,
  2705. ndlp->nlp_flag,
  2706. ndlp);
  2707. ndlp->nlp_flag &= ~NLP_LOGO_ACC;
  2708. /* Check to see if there are any deferred
  2709. * events to process
  2710. */
  2711. if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
  2712. (ndlp->nlp_defer_did !=
  2713. NLP_EVT_NOTHING_PENDING)) {
  2714. lpfc_printf_vlog(
  2715. vport, KERN_INFO,
  2716. LOG_MBOX | LOG_SLI | LOG_NODE,
  2717. "4111 UNREG cmpl deferred "
  2718. "clr x%x on "
  2719. "NPort x%x Data: x%x x%px\n",
  2720. ndlp->nlp_rpi, ndlp->nlp_DID,
  2721. ndlp->nlp_defer_did, ndlp);
  2722. ndlp->nlp_flag &= ~NLP_UNREG_INP;
  2723. ndlp->nlp_defer_did =
  2724. NLP_EVT_NOTHING_PENDING;
  2725. lpfc_issue_els_plogi(
  2726. vport, ndlp->nlp_DID, 0);
  2727. } else {
  2728. __lpfc_sli_rpi_release(vport, ndlp);
  2729. }
  2730. lpfc_nlp_put(ndlp);
  2731. }
  2732. }
  2733. }
  2734. mempool_free(pmb, phba->mbox_mem_pool);
  2735. }
  2736. /**
  2737. * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
  2738. * @phba: Pointer to HBA context object.
  2739. *
  2740. * This function is called with no lock held. This function processes all
  2741. * the completed mailbox commands and gives it to upper layers. The interrupt
  2742. * service routine processes mailbox completion interrupt and adds completed
  2743. * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
  2744. * Worker thread call lpfc_sli_handle_mb_event, which will return the
  2745. * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
  2746. * function returns the mailbox commands to the upper layer by calling the
  2747. * completion handler function of each mailbox.
  2748. **/
  2749. int
  2750. lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
  2751. {
  2752. MAILBOX_t *pmbox;
  2753. LPFC_MBOXQ_t *pmb;
  2754. int rc;
  2755. LIST_HEAD(cmplq);
  2756. phba->sli.slistat.mbox_event++;
  2757. /* Get all completed mailboxe buffers into the cmplq */
  2758. spin_lock_irq(&phba->hbalock);
  2759. list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
  2760. spin_unlock_irq(&phba->hbalock);
  2761. /* Get a Mailbox buffer to setup mailbox commands for callback */
  2762. do {
  2763. list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
  2764. if (pmb == NULL)
  2765. break;
  2766. pmbox = &pmb->u.mb;
  2767. if (pmbox->mbxCommand != MBX_HEARTBEAT) {
  2768. if (pmb->vport) {
  2769. lpfc_debugfs_disc_trc(pmb->vport,
  2770. LPFC_DISC_TRC_MBOX_VPORT,
  2771. "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
  2772. (uint32_t)pmbox->mbxCommand,
  2773. pmbox->un.varWords[0],
  2774. pmbox->un.varWords[1]);
  2775. }
  2776. else {
  2777. lpfc_debugfs_disc_trc(phba->pport,
  2778. LPFC_DISC_TRC_MBOX,
  2779. "MBOX cmpl: cmd:x%x mb:x%x x%x",
  2780. (uint32_t)pmbox->mbxCommand,
  2781. pmbox->un.varWords[0],
  2782. pmbox->un.varWords[1]);
  2783. }
  2784. }
  2785. /*
  2786. * It is a fatal error if unknown mbox command completion.
  2787. */
  2788. if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
  2789. MBX_SHUTDOWN) {
  2790. /* Unknown mailbox command compl */
  2791. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  2792. "(%d):0323 Unknown Mailbox command "
  2793. "x%x (x%x/x%x) Cmpl\n",
  2794. pmb->vport ? pmb->vport->vpi :
  2795. LPFC_VPORT_UNKNOWN,
  2796. pmbox->mbxCommand,
  2797. lpfc_sli_config_mbox_subsys_get(phba,
  2798. pmb),
  2799. lpfc_sli_config_mbox_opcode_get(phba,
  2800. pmb));
  2801. phba->link_state = LPFC_HBA_ERROR;
  2802. phba->work_hs = HS_FFER3;
  2803. lpfc_handle_eratt(phba);
  2804. continue;
  2805. }
  2806. if (pmbox->mbxStatus) {
  2807. phba->sli.slistat.mbox_stat_err++;
  2808. if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
  2809. /* Mbox cmd cmpl error - RETRYing */
  2810. lpfc_printf_log(phba, KERN_INFO,
  2811. LOG_MBOX | LOG_SLI,
  2812. "(%d):0305 Mbox cmd cmpl "
  2813. "error - RETRYing Data: x%x "
  2814. "(x%x/x%x) x%x x%x x%x\n",
  2815. pmb->vport ? pmb->vport->vpi :
  2816. LPFC_VPORT_UNKNOWN,
  2817. pmbox->mbxCommand,
  2818. lpfc_sli_config_mbox_subsys_get(phba,
  2819. pmb),
  2820. lpfc_sli_config_mbox_opcode_get(phba,
  2821. pmb),
  2822. pmbox->mbxStatus,
  2823. pmbox->un.varWords[0],
  2824. pmb->vport ? pmb->vport->port_state :
  2825. LPFC_VPORT_UNKNOWN);
  2826. pmbox->mbxStatus = 0;
  2827. pmbox->mbxOwner = OWN_HOST;
  2828. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  2829. if (rc != MBX_NOT_FINISHED)
  2830. continue;
  2831. }
  2832. }
  2833. /* Mailbox cmd <cmd> Cmpl <cmpl> */
  2834. lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  2835. "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
  2836. "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
  2837. "x%x x%x x%x\n",
  2838. pmb->vport ? pmb->vport->vpi : 0,
  2839. pmbox->mbxCommand,
  2840. lpfc_sli_config_mbox_subsys_get(phba, pmb),
  2841. lpfc_sli_config_mbox_opcode_get(phba, pmb),
  2842. pmb->mbox_cmpl,
  2843. *((uint32_t *) pmbox),
  2844. pmbox->un.varWords[0],
  2845. pmbox->un.varWords[1],
  2846. pmbox->un.varWords[2],
  2847. pmbox->un.varWords[3],
  2848. pmbox->un.varWords[4],
  2849. pmbox->un.varWords[5],
  2850. pmbox->un.varWords[6],
  2851. pmbox->un.varWords[7],
  2852. pmbox->un.varWords[8],
  2853. pmbox->un.varWords[9],
  2854. pmbox->un.varWords[10]);
  2855. if (pmb->mbox_cmpl)
  2856. pmb->mbox_cmpl(phba,pmb);
  2857. } while (1);
  2858. return 0;
  2859. }
  2860. /**
  2861. * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
  2862. * @phba: Pointer to HBA context object.
  2863. * @pring: Pointer to driver SLI ring object.
  2864. * @tag: buffer tag.
  2865. *
  2866. * This function is called with no lock held. When QUE_BUFTAG_BIT bit
  2867. * is set in the tag the buffer is posted for a particular exchange,
  2868. * the function will return the buffer without replacing the buffer.
  2869. * If the buffer is for unsolicited ELS or CT traffic, this function
  2870. * returns the buffer and also posts another buffer to the firmware.
  2871. **/
  2872. static struct lpfc_dmabuf *
  2873. lpfc_sli_get_buff(struct lpfc_hba *phba,
  2874. struct lpfc_sli_ring *pring,
  2875. uint32_t tag)
  2876. {
  2877. struct hbq_dmabuf *hbq_entry;
  2878. if (tag & QUE_BUFTAG_BIT)
  2879. return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
  2880. hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
  2881. if (!hbq_entry)
  2882. return NULL;
  2883. return &hbq_entry->dbuf;
  2884. }
  2885. /**
  2886. * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
  2887. * containing a NVME LS request.
  2888. * @phba: pointer to lpfc hba data structure.
  2889. * @piocb: pointer to the iocbq struct representing the sequence starting
  2890. * frame.
  2891. *
  2892. * This routine initially validates the NVME LS, validates there is a login
  2893. * with the port that sent the LS, and then calls the appropriate nvme host
  2894. * or target LS request handler.
  2895. **/
  2896. static void
  2897. lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
  2898. {
  2899. struct lpfc_nodelist *ndlp;
  2900. struct lpfc_dmabuf *d_buf;
  2901. struct hbq_dmabuf *nvmebuf;
  2902. struct fc_frame_header *fc_hdr;
  2903. struct lpfc_async_xchg_ctx *axchg = NULL;
  2904. char *failwhy = NULL;
  2905. uint32_t oxid, sid, did, fctl, size;
  2906. int ret = 1;
  2907. d_buf = piocb->cmd_dmabuf;
  2908. nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
  2909. fc_hdr = nvmebuf->hbuf.virt;
  2910. oxid = be16_to_cpu(fc_hdr->fh_ox_id);
  2911. sid = sli4_sid_from_fc_hdr(fc_hdr);
  2912. did = sli4_did_from_fc_hdr(fc_hdr);
  2913. fctl = (fc_hdr->fh_f_ctl[0] << 16 |
  2914. fc_hdr->fh_f_ctl[1] << 8 |
  2915. fc_hdr->fh_f_ctl[2]);
  2916. size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
  2917. lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
  2918. oxid, size, sid);
  2919. if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
  2920. failwhy = "Driver Unloading";
  2921. } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
  2922. failwhy = "NVME FC4 Disabled";
  2923. } else if (!phba->nvmet_support && !phba->pport->localport) {
  2924. failwhy = "No Localport";
  2925. } else if (phba->nvmet_support && !phba->targetport) {
  2926. failwhy = "No Targetport";
  2927. } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
  2928. failwhy = "Bad NVME LS R_CTL";
  2929. } else if (unlikely((fctl & 0x00FF0000) !=
  2930. (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
  2931. failwhy = "Bad NVME LS F_CTL";
  2932. } else {
  2933. axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
  2934. if (!axchg)
  2935. failwhy = "No CTX memory";
  2936. }
  2937. if (unlikely(failwhy)) {
  2938. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  2939. "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
  2940. sid, oxid, failwhy);
  2941. goto out_fail;
  2942. }
  2943. /* validate the source of the LS is logged in */
  2944. ndlp = lpfc_findnode_did(phba->pport, sid);
  2945. if (!ndlp ||
  2946. ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
  2947. (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
  2948. lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
  2949. "6216 NVME Unsol rcv: No ndlp: "
  2950. "NPort_ID x%x oxid x%x\n",
  2951. sid, oxid);
  2952. goto out_fail;
  2953. }
  2954. axchg->phba = phba;
  2955. axchg->ndlp = ndlp;
  2956. axchg->size = size;
  2957. axchg->oxid = oxid;
  2958. axchg->sid = sid;
  2959. axchg->wqeq = NULL;
  2960. axchg->state = LPFC_NVME_STE_LS_RCV;
  2961. axchg->entry_cnt = 1;
  2962. axchg->rqb_buffer = (void *)nvmebuf;
  2963. axchg->hdwq = &phba->sli4_hba.hdwq[0];
  2964. axchg->payload = nvmebuf->dbuf.virt;
  2965. INIT_LIST_HEAD(&axchg->list);
  2966. if (phba->nvmet_support) {
  2967. ret = lpfc_nvmet_handle_lsreq(phba, axchg);
  2968. spin_lock_irq(&ndlp->lock);
  2969. if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
  2970. ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
  2971. spin_unlock_irq(&ndlp->lock);
  2972. /* This reference is a single occurrence to hold the
  2973. * node valid until the nvmet transport calls
  2974. * host_release.
  2975. */
  2976. if (!lpfc_nlp_get(ndlp))
  2977. goto out_fail;
  2978. lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
  2979. "6206 NVMET unsol ls_req ndlp x%px "
  2980. "DID x%x xflags x%x refcnt %d\n",
  2981. ndlp, ndlp->nlp_DID,
  2982. ndlp->fc4_xpt_flags,
  2983. kref_read(&ndlp->kref));
  2984. } else {
  2985. spin_unlock_irq(&ndlp->lock);
  2986. }
  2987. } else {
  2988. ret = lpfc_nvme_handle_lsreq(phba, axchg);
  2989. }
  2990. /* if zero, LS was successfully handled. If non-zero, LS not handled */
  2991. if (!ret)
  2992. return;
  2993. out_fail:
  2994. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  2995. "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
  2996. "NVMe%s handler failed %d\n",
  2997. did, sid, oxid,
  2998. (phba->nvmet_support) ? "T" : "I", ret);
  2999. /* recycle receive buffer */
  3000. lpfc_in_buf_free(phba, &nvmebuf->dbuf);
  3001. /* If start of new exchange, abort it */
  3002. if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
  3003. ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
  3004. if (ret)
  3005. kfree(axchg);
  3006. }
  3007. /**
  3008. * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
  3009. * @phba: Pointer to HBA context object.
  3010. * @pring: Pointer to driver SLI ring object.
  3011. * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
  3012. * @fch_r_ctl: the r_ctl for the first frame of the sequence.
  3013. * @fch_type: the type for the first frame of the sequence.
  3014. *
  3015. * This function is called with no lock held. This function uses the r_ctl and
  3016. * type of the received sequence to find the correct callback function to call
  3017. * to process the sequence.
  3018. **/
  3019. static int
  3020. lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  3021. struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
  3022. uint32_t fch_type)
  3023. {
  3024. int i;
  3025. switch (fch_type) {
  3026. case FC_TYPE_NVME:
  3027. lpfc_nvme_unsol_ls_handler(phba, saveq);
  3028. return 1;
  3029. default:
  3030. break;
  3031. }
  3032. /* unSolicited Responses */
  3033. if (pring->prt[0].profile) {
  3034. if (pring->prt[0].lpfc_sli_rcv_unsol_event)
  3035. (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
  3036. saveq);
  3037. return 1;
  3038. }
  3039. /* We must search, based on rctl / type
  3040. for the right routine */
  3041. for (i = 0; i < pring->num_mask; i++) {
  3042. if ((pring->prt[i].rctl == fch_r_ctl) &&
  3043. (pring->prt[i].type == fch_type)) {
  3044. if (pring->prt[i].lpfc_sli_rcv_unsol_event)
  3045. (pring->prt[i].lpfc_sli_rcv_unsol_event)
  3046. (phba, pring, saveq);
  3047. return 1;
  3048. }
  3049. }
  3050. return 0;
  3051. }
  3052. static void
  3053. lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
  3054. struct lpfc_iocbq *saveq)
  3055. {
  3056. IOCB_t *irsp;
  3057. union lpfc_wqe128 *wqe;
  3058. u16 i = 0;
  3059. irsp = &saveq->iocb;
  3060. wqe = &saveq->wqe;
  3061. /* Fill wcqe with the IOCB status fields */
  3062. bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
  3063. saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
  3064. saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
  3065. saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
  3066. /* Source ID */
  3067. bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
  3068. /* rx-id of the response frame */
  3069. bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
  3070. /* ox-id of the frame */
  3071. bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
  3072. irsp->unsli3.rcvsli3.ox_id);
  3073. /* DID */
  3074. bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
  3075. irsp->un.rcvels.remoteID);
  3076. /* unsol data len */
  3077. for (i = 0; i < irsp->ulpBdeCount; i++) {
  3078. struct lpfc_hbq_entry *hbqe = NULL;
  3079. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  3080. if (i == 0) {
  3081. hbqe = (struct lpfc_hbq_entry *)
  3082. &irsp->un.ulpWord[0];
  3083. saveq->wqe.gen_req.bde.tus.f.bdeSize =
  3084. hbqe->bde.tus.f.bdeSize;
  3085. } else if (i == 1) {
  3086. hbqe = (struct lpfc_hbq_entry *)
  3087. &irsp->unsli3.sli3Words[4];
  3088. saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
  3089. }
  3090. }
  3091. }
  3092. }
  3093. /**
  3094. * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
  3095. * @phba: Pointer to HBA context object.
  3096. * @pring: Pointer to driver SLI ring object.
  3097. * @saveq: Pointer to the unsolicited iocb.
  3098. *
  3099. * This function is called with no lock held by the ring event handler
  3100. * when there is an unsolicited iocb posted to the response ring by the
  3101. * firmware. This function gets the buffer associated with the iocbs
  3102. * and calls the event handler for the ring. This function handles both
  3103. * qring buffers and hbq buffers.
  3104. * When the function returns 1 the caller can free the iocb object otherwise
  3105. * upper layer functions will free the iocb objects.
  3106. **/
  3107. static int
  3108. lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  3109. struct lpfc_iocbq *saveq)
  3110. {
  3111. IOCB_t * irsp;
  3112. WORD5 * w5p;
  3113. dma_addr_t paddr;
  3114. uint32_t Rctl, Type;
  3115. struct lpfc_iocbq *iocbq;
  3116. struct lpfc_dmabuf *dmzbuf;
  3117. irsp = &saveq->iocb;
  3118. saveq->vport = phba->pport;
  3119. if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
  3120. if (pring->lpfc_sli_rcv_async_status)
  3121. pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
  3122. else
  3123. lpfc_printf_log(phba,
  3124. KERN_WARNING,
  3125. LOG_SLI,
  3126. "0316 Ring %d handler: unexpected "
  3127. "ASYNC_STATUS iocb received evt_code "
  3128. "0x%x\n",
  3129. pring->ringno,
  3130. irsp->un.asyncstat.evt_code);
  3131. return 1;
  3132. }
  3133. if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
  3134. (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
  3135. if (irsp->ulpBdeCount > 0) {
  3136. dmzbuf = lpfc_sli_get_buff(phba, pring,
  3137. irsp->un.ulpWord[3]);
  3138. lpfc_in_buf_free(phba, dmzbuf);
  3139. }
  3140. if (irsp->ulpBdeCount > 1) {
  3141. dmzbuf = lpfc_sli_get_buff(phba, pring,
  3142. irsp->unsli3.sli3Words[3]);
  3143. lpfc_in_buf_free(phba, dmzbuf);
  3144. }
  3145. if (irsp->ulpBdeCount > 2) {
  3146. dmzbuf = lpfc_sli_get_buff(phba, pring,
  3147. irsp->unsli3.sli3Words[7]);
  3148. lpfc_in_buf_free(phba, dmzbuf);
  3149. }
  3150. return 1;
  3151. }
  3152. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  3153. if (irsp->ulpBdeCount != 0) {
  3154. saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
  3155. irsp->un.ulpWord[3]);
  3156. if (!saveq->cmd_dmabuf)
  3157. lpfc_printf_log(phba,
  3158. KERN_ERR,
  3159. LOG_SLI,
  3160. "0341 Ring %d Cannot find buffer for "
  3161. "an unsolicited iocb. tag 0x%x\n",
  3162. pring->ringno,
  3163. irsp->un.ulpWord[3]);
  3164. }
  3165. if (irsp->ulpBdeCount == 2) {
  3166. saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
  3167. irsp->unsli3.sli3Words[7]);
  3168. if (!saveq->bpl_dmabuf)
  3169. lpfc_printf_log(phba,
  3170. KERN_ERR,
  3171. LOG_SLI,
  3172. "0342 Ring %d Cannot find buffer for an"
  3173. " unsolicited iocb. tag 0x%x\n",
  3174. pring->ringno,
  3175. irsp->unsli3.sli3Words[7]);
  3176. }
  3177. list_for_each_entry(iocbq, &saveq->list, list) {
  3178. irsp = &iocbq->iocb;
  3179. if (irsp->ulpBdeCount != 0) {
  3180. iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
  3181. pring,
  3182. irsp->un.ulpWord[3]);
  3183. if (!iocbq->cmd_dmabuf)
  3184. lpfc_printf_log(phba,
  3185. KERN_ERR,
  3186. LOG_SLI,
  3187. "0343 Ring %d Cannot find "
  3188. "buffer for an unsolicited iocb"
  3189. ". tag 0x%x\n", pring->ringno,
  3190. irsp->un.ulpWord[3]);
  3191. }
  3192. if (irsp->ulpBdeCount == 2) {
  3193. iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
  3194. pring,
  3195. irsp->unsli3.sli3Words[7]);
  3196. if (!iocbq->bpl_dmabuf)
  3197. lpfc_printf_log(phba,
  3198. KERN_ERR,
  3199. LOG_SLI,
  3200. "0344 Ring %d Cannot find "
  3201. "buffer for an unsolicited "
  3202. "iocb. tag 0x%x\n",
  3203. pring->ringno,
  3204. irsp->unsli3.sli3Words[7]);
  3205. }
  3206. }
  3207. } else {
  3208. paddr = getPaddr(irsp->un.cont64[0].addrHigh,
  3209. irsp->un.cont64[0].addrLow);
  3210. saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
  3211. paddr);
  3212. if (irsp->ulpBdeCount == 2) {
  3213. paddr = getPaddr(irsp->un.cont64[1].addrHigh,
  3214. irsp->un.cont64[1].addrLow);
  3215. saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
  3216. pring,
  3217. paddr);
  3218. }
  3219. }
  3220. if (irsp->ulpBdeCount != 0 &&
  3221. (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
  3222. irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
  3223. int found = 0;
  3224. /* search continue save q for same XRI */
  3225. list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
  3226. if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
  3227. saveq->iocb.unsli3.rcvsli3.ox_id) {
  3228. list_add_tail(&saveq->list, &iocbq->list);
  3229. found = 1;
  3230. break;
  3231. }
  3232. }
  3233. if (!found)
  3234. list_add_tail(&saveq->clist,
  3235. &pring->iocb_continue_saveq);
  3236. if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
  3237. list_del_init(&iocbq->clist);
  3238. saveq = iocbq;
  3239. irsp = &saveq->iocb;
  3240. } else {
  3241. return 0;
  3242. }
  3243. }
  3244. if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
  3245. (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
  3246. (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
  3247. Rctl = FC_RCTL_ELS_REQ;
  3248. Type = FC_TYPE_ELS;
  3249. } else {
  3250. w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
  3251. Rctl = w5p->hcsw.Rctl;
  3252. Type = w5p->hcsw.Type;
  3253. /* Firmware Workaround */
  3254. if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
  3255. (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
  3256. irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
  3257. Rctl = FC_RCTL_ELS_REQ;
  3258. Type = FC_TYPE_ELS;
  3259. w5p->hcsw.Rctl = Rctl;
  3260. w5p->hcsw.Type = Type;
  3261. }
  3262. }
  3263. if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
  3264. (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
  3265. irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
  3266. if (irsp->unsli3.rcvsli3.vpi == 0xffff)
  3267. saveq->vport = phba->pport;
  3268. else
  3269. saveq->vport = lpfc_find_vport_by_vpid(phba,
  3270. irsp->unsli3.rcvsli3.vpi);
  3271. }
  3272. /* Prepare WQE with Unsol frame */
  3273. lpfc_sli_prep_unsol_wqe(phba, saveq);
  3274. if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
  3275. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  3276. "0313 Ring %d handler: unexpected Rctl x%x "
  3277. "Type x%x received\n",
  3278. pring->ringno, Rctl, Type);
  3279. return 1;
  3280. }
  3281. /**
  3282. * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
  3283. * @phba: Pointer to HBA context object.
  3284. * @pring: Pointer to driver SLI ring object.
  3285. * @prspiocb: Pointer to response iocb object.
  3286. *
  3287. * This function looks up the iocb_lookup table to get the command iocb
  3288. * corresponding to the given response iocb using the iotag of the
  3289. * response iocb. The driver calls this function with the hbalock held
  3290. * for SLI3 ports or the ring lock held for SLI4 ports.
  3291. * This function returns the command iocb object if it finds the command
  3292. * iocb else returns NULL.
  3293. **/
  3294. static struct lpfc_iocbq *
  3295. lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
  3296. struct lpfc_sli_ring *pring,
  3297. struct lpfc_iocbq *prspiocb)
  3298. {
  3299. struct lpfc_iocbq *cmd_iocb = NULL;
  3300. u16 iotag;
  3301. if (phba->sli_rev == LPFC_SLI_REV4)
  3302. iotag = get_wqe_reqtag(prspiocb);
  3303. else
  3304. iotag = prspiocb->iocb.ulpIoTag;
  3305. if (iotag != 0 && iotag <= phba->sli.last_iotag) {
  3306. cmd_iocb = phba->sli.iocbq_lookup[iotag];
  3307. if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
  3308. /* remove from txcmpl queue list */
  3309. list_del_init(&cmd_iocb->list);
  3310. cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
  3311. pring->txcmplq_cnt--;
  3312. return cmd_iocb;
  3313. }
  3314. }
  3315. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  3316. "0317 iotag x%x is out of "
  3317. "range: max iotag x%x\n",
  3318. iotag, phba->sli.last_iotag);
  3319. return NULL;
  3320. }
  3321. /**
  3322. * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
  3323. * @phba: Pointer to HBA context object.
  3324. * @pring: Pointer to driver SLI ring object.
  3325. * @iotag: IOCB tag.
  3326. *
  3327. * This function looks up the iocb_lookup table to get the command iocb
  3328. * corresponding to the given iotag. The driver calls this function with
  3329. * the ring lock held because this function is an SLI4 port only helper.
  3330. * This function returns the command iocb object if it finds the command
  3331. * iocb else returns NULL.
  3332. **/
  3333. static struct lpfc_iocbq *
  3334. lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
  3335. struct lpfc_sli_ring *pring, uint16_t iotag)
  3336. {
  3337. struct lpfc_iocbq *cmd_iocb = NULL;
  3338. if (iotag != 0 && iotag <= phba->sli.last_iotag) {
  3339. cmd_iocb = phba->sli.iocbq_lookup[iotag];
  3340. if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
  3341. /* remove from txcmpl queue list */
  3342. list_del_init(&cmd_iocb->list);
  3343. cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
  3344. pring->txcmplq_cnt--;
  3345. return cmd_iocb;
  3346. }
  3347. }
  3348. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  3349. "0372 iotag x%x lookup error: max iotag (x%x) "
  3350. "cmd_flag x%x\n",
  3351. iotag, phba->sli.last_iotag,
  3352. cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
  3353. return NULL;
  3354. }
  3355. /**
  3356. * lpfc_sli_process_sol_iocb - process solicited iocb completion
  3357. * @phba: Pointer to HBA context object.
  3358. * @pring: Pointer to driver SLI ring object.
  3359. * @saveq: Pointer to the response iocb to be processed.
  3360. *
  3361. * This function is called by the ring event handler for non-fcp
  3362. * rings when there is a new response iocb in the response ring.
  3363. * The caller is not required to hold any locks. This function
  3364. * gets the command iocb associated with the response iocb and
  3365. * calls the completion handler for the command iocb. If there
  3366. * is no completion handler, the function will free the resources
  3367. * associated with command iocb. If the response iocb is for
  3368. * an already aborted command iocb, the status of the completion
  3369. * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
  3370. * This function always returns 1.
  3371. **/
  3372. static int
  3373. lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  3374. struct lpfc_iocbq *saveq)
  3375. {
  3376. struct lpfc_iocbq *cmdiocbp;
  3377. unsigned long iflag;
  3378. u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
  3379. if (phba->sli_rev == LPFC_SLI_REV4)
  3380. spin_lock_irqsave(&pring->ring_lock, iflag);
  3381. else
  3382. spin_lock_irqsave(&phba->hbalock, iflag);
  3383. cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
  3384. if (phba->sli_rev == LPFC_SLI_REV4)
  3385. spin_unlock_irqrestore(&pring->ring_lock, iflag);
  3386. else
  3387. spin_unlock_irqrestore(&phba->hbalock, iflag);
  3388. ulp_command = get_job_cmnd(phba, saveq);
  3389. ulp_status = get_job_ulpstatus(phba, saveq);
  3390. ulp_word4 = get_job_word4(phba, saveq);
  3391. ulp_context = get_job_ulpcontext(phba, saveq);
  3392. if (phba->sli_rev == LPFC_SLI_REV4)
  3393. iotag = get_wqe_reqtag(saveq);
  3394. else
  3395. iotag = saveq->iocb.ulpIoTag;
  3396. if (cmdiocbp) {
  3397. ulp_command = get_job_cmnd(phba, cmdiocbp);
  3398. if (cmdiocbp->cmd_cmpl) {
  3399. /*
  3400. * If an ELS command failed send an event to mgmt
  3401. * application.
  3402. */
  3403. if (ulp_status &&
  3404. (pring->ringno == LPFC_ELS_RING) &&
  3405. (ulp_command == CMD_ELS_REQUEST64_CR))
  3406. lpfc_send_els_failure_event(phba,
  3407. cmdiocbp, saveq);
  3408. /*
  3409. * Post all ELS completions to the worker thread.
  3410. * All other are passed to the completion callback.
  3411. */
  3412. if (pring->ringno == LPFC_ELS_RING) {
  3413. if ((phba->sli_rev < LPFC_SLI_REV4) &&
  3414. (cmdiocbp->cmd_flag &
  3415. LPFC_DRIVER_ABORTED)) {
  3416. spin_lock_irqsave(&phba->hbalock,
  3417. iflag);
  3418. cmdiocbp->cmd_flag &=
  3419. ~LPFC_DRIVER_ABORTED;
  3420. spin_unlock_irqrestore(&phba->hbalock,
  3421. iflag);
  3422. saveq->iocb.ulpStatus =
  3423. IOSTAT_LOCAL_REJECT;
  3424. saveq->iocb.un.ulpWord[4] =
  3425. IOERR_SLI_ABORTED;
  3426. /* Firmware could still be in progress
  3427. * of DMAing payload, so don't free data
  3428. * buffer till after a hbeat.
  3429. */
  3430. spin_lock_irqsave(&phba->hbalock,
  3431. iflag);
  3432. saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
  3433. spin_unlock_irqrestore(&phba->hbalock,
  3434. iflag);
  3435. }
  3436. if (phba->sli_rev == LPFC_SLI_REV4) {
  3437. if (saveq->cmd_flag &
  3438. LPFC_EXCHANGE_BUSY) {
  3439. /* Set cmdiocb flag for the
  3440. * exchange busy so sgl (xri)
  3441. * will not be released until
  3442. * the abort xri is received
  3443. * from hba.
  3444. */
  3445. spin_lock_irqsave(
  3446. &phba->hbalock, iflag);
  3447. cmdiocbp->cmd_flag |=
  3448. LPFC_EXCHANGE_BUSY;
  3449. spin_unlock_irqrestore(
  3450. &phba->hbalock, iflag);
  3451. }
  3452. if (cmdiocbp->cmd_flag &
  3453. LPFC_DRIVER_ABORTED) {
  3454. /*
  3455. * Clear LPFC_DRIVER_ABORTED
  3456. * bit in case it was driver
  3457. * initiated abort.
  3458. */
  3459. spin_lock_irqsave(
  3460. &phba->hbalock, iflag);
  3461. cmdiocbp->cmd_flag &=
  3462. ~LPFC_DRIVER_ABORTED;
  3463. spin_unlock_irqrestore(
  3464. &phba->hbalock, iflag);
  3465. set_job_ulpstatus(cmdiocbp,
  3466. IOSTAT_LOCAL_REJECT);
  3467. set_job_ulpword4(cmdiocbp,
  3468. IOERR_ABORT_REQUESTED);
  3469. /*
  3470. * For SLI4, irspiocb contains
  3471. * NO_XRI in sli_xritag, it
  3472. * shall not affect releasing
  3473. * sgl (xri) process.
  3474. */
  3475. set_job_ulpstatus(saveq,
  3476. IOSTAT_LOCAL_REJECT);
  3477. set_job_ulpword4(saveq,
  3478. IOERR_SLI_ABORTED);
  3479. spin_lock_irqsave(
  3480. &phba->hbalock, iflag);
  3481. saveq->cmd_flag |=
  3482. LPFC_DELAY_MEM_FREE;
  3483. spin_unlock_irqrestore(
  3484. &phba->hbalock, iflag);
  3485. }
  3486. }
  3487. }
  3488. cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
  3489. } else
  3490. lpfc_sli_release_iocbq(phba, cmdiocbp);
  3491. } else {
  3492. /*
  3493. * Unknown initiating command based on the response iotag.
  3494. * This could be the case on the ELS ring because of
  3495. * lpfc_els_abort().
  3496. */
  3497. if (pring->ringno != LPFC_ELS_RING) {
  3498. /*
  3499. * Ring <ringno> handler: unexpected completion IoTag
  3500. * <IoTag>
  3501. */
  3502. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  3503. "0322 Ring %d handler: "
  3504. "unexpected completion IoTag x%x "
  3505. "Data: x%x x%x x%x x%x\n",
  3506. pring->ringno, iotag, ulp_status,
  3507. ulp_word4, ulp_command, ulp_context);
  3508. }
  3509. }
  3510. return 1;
  3511. }
  3512. /**
  3513. * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
  3514. * @phba: Pointer to HBA context object.
  3515. * @pring: Pointer to driver SLI ring object.
  3516. *
  3517. * This function is called from the iocb ring event handlers when
  3518. * put pointer is ahead of the get pointer for a ring. This function signal
  3519. * an error attention condition to the worker thread and the worker
  3520. * thread will transition the HBA to offline state.
  3521. **/
  3522. static void
  3523. lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
  3524. {
  3525. struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
  3526. /*
  3527. * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
  3528. * rsp ring <portRspMax>
  3529. */
  3530. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  3531. "0312 Ring %d handler: portRspPut %d "
  3532. "is bigger than rsp ring %d\n",
  3533. pring->ringno, le32_to_cpu(pgp->rspPutInx),
  3534. pring->sli.sli3.numRiocb);
  3535. phba->link_state = LPFC_HBA_ERROR;
  3536. /*
  3537. * All error attention handlers are posted to
  3538. * worker thread
  3539. */
  3540. phba->work_ha |= HA_ERATT;
  3541. phba->work_hs = HS_FFER3;
  3542. lpfc_worker_wake_up(phba);
  3543. return;
  3544. }
  3545. /**
  3546. * lpfc_poll_eratt - Error attention polling timer timeout handler
  3547. * @t: Context to fetch pointer to address of HBA context object from.
  3548. *
  3549. * This function is invoked by the Error Attention polling timer when the
  3550. * timer times out. It will check the SLI Error Attention register for
  3551. * possible attention events. If so, it will post an Error Attention event
  3552. * and wake up worker thread to process it. Otherwise, it will set up the
  3553. * Error Attention polling timer for the next poll.
  3554. **/
  3555. void lpfc_poll_eratt(struct timer_list *t)
  3556. {
  3557. struct lpfc_hba *phba;
  3558. uint32_t eratt = 0;
  3559. uint64_t sli_intr, cnt;
  3560. phba = from_timer(phba, t, eratt_poll);
  3561. if (!test_bit(HBA_SETUP, &phba->hba_flag))
  3562. return;
  3563. if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
  3564. return;
  3565. /* Here we will also keep track of interrupts per sec of the hba */
  3566. sli_intr = phba->sli.slistat.sli_intr;
  3567. if (phba->sli.slistat.sli_prev_intr > sli_intr)
  3568. cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
  3569. sli_intr);
  3570. else
  3571. cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
  3572. /* 64-bit integer division not supported on 32-bit x86 - use do_div */
  3573. do_div(cnt, phba->eratt_poll_interval);
  3574. phba->sli.slistat.sli_ips = cnt;
  3575. phba->sli.slistat.sli_prev_intr = sli_intr;
  3576. /* Check chip HA register for error event */
  3577. eratt = lpfc_sli_check_eratt(phba);
  3578. if (eratt)
  3579. /* Tell the worker thread there is work to do */
  3580. lpfc_worker_wake_up(phba);
  3581. else
  3582. /* Restart the timer for next eratt poll */
  3583. mod_timer(&phba->eratt_poll,
  3584. jiffies +
  3585. msecs_to_jiffies(1000 * phba->eratt_poll_interval));
  3586. return;
  3587. }
  3588. /**
  3589. * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
  3590. * @phba: Pointer to HBA context object.
  3591. * @pring: Pointer to driver SLI ring object.
  3592. * @mask: Host attention register mask for this ring.
  3593. *
  3594. * This function is called from the interrupt context when there is a ring
  3595. * event for the fcp ring. The caller does not hold any lock.
  3596. * The function processes each response iocb in the response ring until it
  3597. * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
  3598. * LE bit set. The function will call the completion handler of the command iocb
  3599. * if the response iocb indicates a completion for a command iocb or it is
  3600. * an abort completion. The function will call lpfc_sli_process_unsol_iocb
  3601. * function if this is an unsolicited iocb.
  3602. * This routine presumes LPFC_FCP_RING handling and doesn't bother
  3603. * to check it explicitly.
  3604. */
  3605. int
  3606. lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
  3607. struct lpfc_sli_ring *pring, uint32_t mask)
  3608. {
  3609. struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
  3610. IOCB_t *irsp = NULL;
  3611. IOCB_t *entry = NULL;
  3612. struct lpfc_iocbq *cmdiocbq = NULL;
  3613. struct lpfc_iocbq rspiocbq;
  3614. uint32_t status;
  3615. uint32_t portRspPut, portRspMax;
  3616. int rc = 1;
  3617. lpfc_iocb_type type;
  3618. unsigned long iflag;
  3619. uint32_t rsp_cmpl = 0;
  3620. spin_lock_irqsave(&phba->hbalock, iflag);
  3621. pring->stats.iocb_event++;
  3622. /*
  3623. * The next available response entry should never exceed the maximum
  3624. * entries. If it does, treat it as an adapter hardware error.
  3625. */
  3626. portRspMax = pring->sli.sli3.numRiocb;
  3627. portRspPut = le32_to_cpu(pgp->rspPutInx);
  3628. if (unlikely(portRspPut >= portRspMax)) {
  3629. lpfc_sli_rsp_pointers_error(phba, pring);
  3630. spin_unlock_irqrestore(&phba->hbalock, iflag);
  3631. return 1;
  3632. }
  3633. if (phba->fcp_ring_in_use) {
  3634. spin_unlock_irqrestore(&phba->hbalock, iflag);
  3635. return 1;
  3636. } else
  3637. phba->fcp_ring_in_use = 1;
  3638. rmb();
  3639. while (pring->sli.sli3.rspidx != portRspPut) {
  3640. /*
  3641. * Fetch an entry off the ring and copy it into a local data
  3642. * structure. The copy involves a byte-swap since the
  3643. * network byte order and pci byte orders are different.
  3644. */
  3645. entry = lpfc_resp_iocb(phba, pring);
  3646. phba->last_completion_time = jiffies;
  3647. if (++pring->sli.sli3.rspidx >= portRspMax)
  3648. pring->sli.sli3.rspidx = 0;
  3649. lpfc_sli_pcimem_bcopy((uint32_t *) entry,
  3650. (uint32_t *) &rspiocbq.iocb,
  3651. phba->iocb_rsp_size);
  3652. INIT_LIST_HEAD(&(rspiocbq.list));
  3653. irsp = &rspiocbq.iocb;
  3654. type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
  3655. pring->stats.iocb_rsp++;
  3656. rsp_cmpl++;
  3657. if (unlikely(irsp->ulpStatus)) {
  3658. /*
  3659. * If resource errors reported from HBA, reduce
  3660. * queuedepths of the SCSI device.
  3661. */
  3662. if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
  3663. ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
  3664. IOERR_NO_RESOURCES)) {
  3665. spin_unlock_irqrestore(&phba->hbalock, iflag);
  3666. phba->lpfc_rampdown_queue_depth(phba);
  3667. spin_lock_irqsave(&phba->hbalock, iflag);
  3668. }
  3669. /* Rsp ring <ringno> error: IOCB */
  3670. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  3671. "0336 Rsp Ring %d error: IOCB Data: "
  3672. "x%x x%x x%x x%x x%x x%x x%x x%x\n",
  3673. pring->ringno,
  3674. irsp->un.ulpWord[0],
  3675. irsp->un.ulpWord[1],
  3676. irsp->un.ulpWord[2],
  3677. irsp->un.ulpWord[3],
  3678. irsp->un.ulpWord[4],
  3679. irsp->un.ulpWord[5],
  3680. *(uint32_t *)&irsp->un1,
  3681. *((uint32_t *)&irsp->un1 + 1));
  3682. }
  3683. switch (type) {
  3684. case LPFC_ABORT_IOCB:
  3685. case LPFC_SOL_IOCB:
  3686. /*
  3687. * Idle exchange closed via ABTS from port. No iocb
  3688. * resources need to be recovered.
  3689. */
  3690. if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
  3691. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3692. "0333 IOCB cmd 0x%x"
  3693. " processed. Skipping"
  3694. " completion\n",
  3695. irsp->ulpCommand);
  3696. break;
  3697. }
  3698. cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
  3699. &rspiocbq);
  3700. if (unlikely(!cmdiocbq))
  3701. break;
  3702. if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
  3703. cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
  3704. if (cmdiocbq->cmd_cmpl) {
  3705. spin_unlock_irqrestore(&phba->hbalock, iflag);
  3706. cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
  3707. spin_lock_irqsave(&phba->hbalock, iflag);
  3708. }
  3709. break;
  3710. case LPFC_UNSOL_IOCB:
  3711. spin_unlock_irqrestore(&phba->hbalock, iflag);
  3712. lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
  3713. spin_lock_irqsave(&phba->hbalock, iflag);
  3714. break;
  3715. default:
  3716. if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
  3717. char adaptermsg[LPFC_MAX_ADPTMSG];
  3718. memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
  3719. memcpy(&adaptermsg[0], (uint8_t *) irsp,
  3720. MAX_MSG_DATA);
  3721. dev_warn(&((phba->pcidev)->dev),
  3722. "lpfc%d: %s\n",
  3723. phba->brd_no, adaptermsg);
  3724. } else {
  3725. /* Unknown IOCB command */
  3726. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  3727. "0334 Unknown IOCB command "
  3728. "Data: x%x, x%x x%x x%x x%x\n",
  3729. type, irsp->ulpCommand,
  3730. irsp->ulpStatus,
  3731. irsp->ulpIoTag,
  3732. irsp->ulpContext);
  3733. }
  3734. break;
  3735. }
  3736. /*
  3737. * The response IOCB has been processed. Update the ring
  3738. * pointer in SLIM. If the port response put pointer has not
  3739. * been updated, sync the pgp->rspPutInx and fetch the new port
  3740. * response put pointer.
  3741. */
  3742. writel(pring->sli.sli3.rspidx,
  3743. &phba->host_gp[pring->ringno].rspGetInx);
  3744. if (pring->sli.sli3.rspidx == portRspPut)
  3745. portRspPut = le32_to_cpu(pgp->rspPutInx);
  3746. }
  3747. if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
  3748. pring->stats.iocb_rsp_full++;
  3749. status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
  3750. writel(status, phba->CAregaddr);
  3751. readl(phba->CAregaddr);
  3752. }
  3753. if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
  3754. pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
  3755. pring->stats.iocb_cmd_empty++;
  3756. /* Force update of the local copy of cmdGetInx */
  3757. pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
  3758. lpfc_sli_resume_iocb(phba, pring);
  3759. if ((pring->lpfc_sli_cmd_available))
  3760. (pring->lpfc_sli_cmd_available) (phba, pring);
  3761. }
  3762. phba->fcp_ring_in_use = 0;
  3763. spin_unlock_irqrestore(&phba->hbalock, iflag);
  3764. return rc;
  3765. }
  3766. /**
  3767. * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
  3768. * @phba: Pointer to HBA context object.
  3769. * @pring: Pointer to driver SLI ring object.
  3770. * @rspiocbp: Pointer to driver response IOCB object.
  3771. *
  3772. * This function is called from the worker thread when there is a slow-path
  3773. * response IOCB to process. This function chains all the response iocbs until
  3774. * seeing the iocb with the LE bit set. The function will call
  3775. * lpfc_sli_process_sol_iocb function if the response iocb indicates a
  3776. * completion of a command iocb. The function will call the
  3777. * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
  3778. * The function frees the resources or calls the completion handler if this
  3779. * iocb is an abort completion. The function returns NULL when the response
  3780. * iocb has the LE bit set and all the chained iocbs are processed, otherwise
  3781. * this function shall chain the iocb on to the iocb_continueq and return the
  3782. * response iocb passed in.
  3783. **/
  3784. static struct lpfc_iocbq *
  3785. lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  3786. struct lpfc_iocbq *rspiocbp)
  3787. {
  3788. struct lpfc_iocbq *saveq;
  3789. struct lpfc_iocbq *cmdiocb;
  3790. struct lpfc_iocbq *next_iocb;
  3791. IOCB_t *irsp;
  3792. uint32_t free_saveq;
  3793. u8 cmd_type;
  3794. lpfc_iocb_type type;
  3795. unsigned long iflag;
  3796. u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
  3797. u32 ulp_word4 = get_job_word4(phba, rspiocbp);
  3798. u32 ulp_command = get_job_cmnd(phba, rspiocbp);
  3799. int rc;
  3800. spin_lock_irqsave(&phba->hbalock, iflag);
  3801. /* First add the response iocb to the countinueq list */
  3802. list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
  3803. pring->iocb_continueq_cnt++;
  3804. /*
  3805. * By default, the driver expects to free all resources
  3806. * associated with this iocb completion.
  3807. */
  3808. free_saveq = 1;
  3809. saveq = list_get_first(&pring->iocb_continueq,
  3810. struct lpfc_iocbq, list);
  3811. list_del_init(&pring->iocb_continueq);
  3812. pring->iocb_continueq_cnt = 0;
  3813. pring->stats.iocb_rsp++;
  3814. /*
  3815. * If resource errors reported from HBA, reduce
  3816. * queuedepths of the SCSI device.
  3817. */
  3818. if (ulp_status == IOSTAT_LOCAL_REJECT &&
  3819. ((ulp_word4 & IOERR_PARAM_MASK) ==
  3820. IOERR_NO_RESOURCES)) {
  3821. spin_unlock_irqrestore(&phba->hbalock, iflag);
  3822. phba->lpfc_rampdown_queue_depth(phba);
  3823. spin_lock_irqsave(&phba->hbalock, iflag);
  3824. }
  3825. if (ulp_status) {
  3826. /* Rsp ring <ringno> error: IOCB */
  3827. if (phba->sli_rev < LPFC_SLI_REV4) {
  3828. irsp = &rspiocbp->iocb;
  3829. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  3830. "0328 Rsp Ring %d error: ulp_status x%x "
  3831. "IOCB Data: "
  3832. "x%08x x%08x x%08x x%08x "
  3833. "x%08x x%08x x%08x x%08x "
  3834. "x%08x x%08x x%08x x%08x "
  3835. "x%08x x%08x x%08x x%08x\n",
  3836. pring->ringno, ulp_status,
  3837. get_job_ulpword(rspiocbp, 0),
  3838. get_job_ulpword(rspiocbp, 1),
  3839. get_job_ulpword(rspiocbp, 2),
  3840. get_job_ulpword(rspiocbp, 3),
  3841. get_job_ulpword(rspiocbp, 4),
  3842. get_job_ulpword(rspiocbp, 5),
  3843. *(((uint32_t *)irsp) + 6),
  3844. *(((uint32_t *)irsp) + 7),
  3845. *(((uint32_t *)irsp) + 8),
  3846. *(((uint32_t *)irsp) + 9),
  3847. *(((uint32_t *)irsp) + 10),
  3848. *(((uint32_t *)irsp) + 11),
  3849. *(((uint32_t *)irsp) + 12),
  3850. *(((uint32_t *)irsp) + 13),
  3851. *(((uint32_t *)irsp) + 14),
  3852. *(((uint32_t *)irsp) + 15));
  3853. } else {
  3854. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  3855. "0321 Rsp Ring %d error: "
  3856. "IOCB Data: "
  3857. "x%x x%x x%x x%x\n",
  3858. pring->ringno,
  3859. rspiocbp->wcqe_cmpl.word0,
  3860. rspiocbp->wcqe_cmpl.total_data_placed,
  3861. rspiocbp->wcqe_cmpl.parameter,
  3862. rspiocbp->wcqe_cmpl.word3);
  3863. }
  3864. }
  3865. /*
  3866. * Fetch the iocb command type and call the correct completion
  3867. * routine. Solicited and Unsolicited IOCBs on the ELS ring
  3868. * get freed back to the lpfc_iocb_list by the discovery
  3869. * kernel thread.
  3870. */
  3871. cmd_type = ulp_command & CMD_IOCB_MASK;
  3872. type = lpfc_sli_iocb_cmd_type(cmd_type);
  3873. switch (type) {
  3874. case LPFC_SOL_IOCB:
  3875. spin_unlock_irqrestore(&phba->hbalock, iflag);
  3876. rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
  3877. spin_lock_irqsave(&phba->hbalock, iflag);
  3878. break;
  3879. case LPFC_UNSOL_IOCB:
  3880. spin_unlock_irqrestore(&phba->hbalock, iflag);
  3881. rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
  3882. spin_lock_irqsave(&phba->hbalock, iflag);
  3883. if (!rc)
  3884. free_saveq = 0;
  3885. break;
  3886. case LPFC_ABORT_IOCB:
  3887. cmdiocb = NULL;
  3888. if (ulp_command != CMD_XRI_ABORTED_CX)
  3889. cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
  3890. saveq);
  3891. if (cmdiocb) {
  3892. /* Call the specified completion routine */
  3893. if (cmdiocb->cmd_cmpl) {
  3894. spin_unlock_irqrestore(&phba->hbalock, iflag);
  3895. cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
  3896. spin_lock_irqsave(&phba->hbalock, iflag);
  3897. } else {
  3898. __lpfc_sli_release_iocbq(phba, cmdiocb);
  3899. }
  3900. }
  3901. break;
  3902. case LPFC_UNKNOWN_IOCB:
  3903. if (ulp_command == CMD_ADAPTER_MSG) {
  3904. char adaptermsg[LPFC_MAX_ADPTMSG];
  3905. memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
  3906. memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
  3907. MAX_MSG_DATA);
  3908. dev_warn(&((phba->pcidev)->dev),
  3909. "lpfc%d: %s\n",
  3910. phba->brd_no, adaptermsg);
  3911. } else {
  3912. /* Unknown command */
  3913. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  3914. "0335 Unknown IOCB "
  3915. "command Data: x%x "
  3916. "x%x x%x x%x\n",
  3917. ulp_command,
  3918. ulp_status,
  3919. get_wqe_reqtag(rspiocbp),
  3920. get_job_ulpcontext(phba, rspiocbp));
  3921. }
  3922. break;
  3923. }
  3924. if (free_saveq) {
  3925. list_for_each_entry_safe(rspiocbp, next_iocb,
  3926. &saveq->list, list) {
  3927. list_del_init(&rspiocbp->list);
  3928. __lpfc_sli_release_iocbq(phba, rspiocbp);
  3929. }
  3930. __lpfc_sli_release_iocbq(phba, saveq);
  3931. }
  3932. rspiocbp = NULL;
  3933. spin_unlock_irqrestore(&phba->hbalock, iflag);
  3934. return rspiocbp;
  3935. }
  3936. /**
  3937. * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
  3938. * @phba: Pointer to HBA context object.
  3939. * @pring: Pointer to driver SLI ring object.
  3940. * @mask: Host attention register mask for this ring.
  3941. *
  3942. * This routine wraps the actual slow_ring event process routine from the
  3943. * API jump table function pointer from the lpfc_hba struct.
  3944. **/
  3945. void
  3946. lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
  3947. struct lpfc_sli_ring *pring, uint32_t mask)
  3948. {
  3949. phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
  3950. }
  3951. /**
  3952. * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
  3953. * @phba: Pointer to HBA context object.
  3954. * @pring: Pointer to driver SLI ring object.
  3955. * @mask: Host attention register mask for this ring.
  3956. *
  3957. * This function is called from the worker thread when there is a ring event
  3958. * for non-fcp rings. The caller does not hold any lock. The function will
  3959. * remove each response iocb in the response ring and calls the handle
  3960. * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
  3961. **/
  3962. static void
  3963. lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
  3964. struct lpfc_sli_ring *pring, uint32_t mask)
  3965. {
  3966. struct lpfc_pgp *pgp;
  3967. IOCB_t *entry;
  3968. IOCB_t *irsp = NULL;
  3969. struct lpfc_iocbq *rspiocbp = NULL;
  3970. uint32_t portRspPut, portRspMax;
  3971. unsigned long iflag;
  3972. uint32_t status;
  3973. pgp = &phba->port_gp[pring->ringno];
  3974. spin_lock_irqsave(&phba->hbalock, iflag);
  3975. pring->stats.iocb_event++;
  3976. /*
  3977. * The next available response entry should never exceed the maximum
  3978. * entries. If it does, treat it as an adapter hardware error.
  3979. */
  3980. portRspMax = pring->sli.sli3.numRiocb;
  3981. portRspPut = le32_to_cpu(pgp->rspPutInx);
  3982. if (portRspPut >= portRspMax) {
  3983. /*
  3984. * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
  3985. * rsp ring <portRspMax>
  3986. */
  3987. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  3988. "0303 Ring %d handler: portRspPut %d "
  3989. "is bigger than rsp ring %d\n",
  3990. pring->ringno, portRspPut, portRspMax);
  3991. phba->link_state = LPFC_HBA_ERROR;
  3992. spin_unlock_irqrestore(&phba->hbalock, iflag);
  3993. phba->work_hs = HS_FFER3;
  3994. lpfc_handle_eratt(phba);
  3995. return;
  3996. }
  3997. rmb();
  3998. while (pring->sli.sli3.rspidx != portRspPut) {
  3999. /*
  4000. * Build a completion list and call the appropriate handler.
  4001. * The process is to get the next available response iocb, get
  4002. * a free iocb from the list, copy the response data into the
  4003. * free iocb, insert to the continuation list, and update the
  4004. * next response index to slim. This process makes response
  4005. * iocb's in the ring available to DMA as fast as possible but
  4006. * pays a penalty for a copy operation. Since the iocb is
  4007. * only 32 bytes, this penalty is considered small relative to
  4008. * the PCI reads for register values and a slim write. When
  4009. * the ulpLe field is set, the entire Command has been
  4010. * received.
  4011. */
  4012. entry = lpfc_resp_iocb(phba, pring);
  4013. phba->last_completion_time = jiffies;
  4014. rspiocbp = __lpfc_sli_get_iocbq(phba);
  4015. if (rspiocbp == NULL) {
  4016. printk(KERN_ERR "%s: out of buffers! Failing "
  4017. "completion.\n", __func__);
  4018. break;
  4019. }
  4020. lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
  4021. phba->iocb_rsp_size);
  4022. irsp = &rspiocbp->iocb;
  4023. if (++pring->sli.sli3.rspidx >= portRspMax)
  4024. pring->sli.sli3.rspidx = 0;
  4025. if (pring->ringno == LPFC_ELS_RING) {
  4026. lpfc_debugfs_slow_ring_trc(phba,
  4027. "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
  4028. *(((uint32_t *) irsp) + 4),
  4029. *(((uint32_t *) irsp) + 6),
  4030. *(((uint32_t *) irsp) + 7));
  4031. }
  4032. writel(pring->sli.sli3.rspidx,
  4033. &phba->host_gp[pring->ringno].rspGetInx);
  4034. spin_unlock_irqrestore(&phba->hbalock, iflag);
  4035. /* Handle the response IOCB */
  4036. rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
  4037. spin_lock_irqsave(&phba->hbalock, iflag);
  4038. /*
  4039. * If the port response put pointer has not been updated, sync
  4040. * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
  4041. * response put pointer.
  4042. */
  4043. if (pring->sli.sli3.rspidx == portRspPut) {
  4044. portRspPut = le32_to_cpu(pgp->rspPutInx);
  4045. }
  4046. } /* while (pring->sli.sli3.rspidx != portRspPut) */
  4047. if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
  4048. /* At least one response entry has been freed */
  4049. pring->stats.iocb_rsp_full++;
  4050. /* SET RxRE_RSP in Chip Att register */
  4051. status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
  4052. writel(status, phba->CAregaddr);
  4053. readl(phba->CAregaddr); /* flush */
  4054. }
  4055. if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
  4056. pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
  4057. pring->stats.iocb_cmd_empty++;
  4058. /* Force update of the local copy of cmdGetInx */
  4059. pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
  4060. lpfc_sli_resume_iocb(phba, pring);
  4061. if ((pring->lpfc_sli_cmd_available))
  4062. (pring->lpfc_sli_cmd_available) (phba, pring);
  4063. }
  4064. spin_unlock_irqrestore(&phba->hbalock, iflag);
  4065. return;
  4066. }
  4067. /**
  4068. * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
  4069. * @phba: Pointer to HBA context object.
  4070. * @pring: Pointer to driver SLI ring object.
  4071. * @mask: Host attention register mask for this ring.
  4072. *
  4073. * This function is called from the worker thread when there is a pending
  4074. * ELS response iocb on the driver internal slow-path response iocb worker
  4075. * queue. The caller does not hold any lock. The function will remove each
  4076. * response iocb from the response worker queue and calls the handle
  4077. * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
  4078. **/
  4079. static void
  4080. lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
  4081. struct lpfc_sli_ring *pring, uint32_t mask)
  4082. {
  4083. struct lpfc_iocbq *irspiocbq;
  4084. struct hbq_dmabuf *dmabuf;
  4085. struct lpfc_cq_event *cq_event;
  4086. unsigned long iflag;
  4087. int count = 0;
  4088. clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
  4089. while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
  4090. /* Get the response iocb from the head of work queue */
  4091. spin_lock_irqsave(&phba->hbalock, iflag);
  4092. list_remove_head(&phba->sli4_hba.sp_queue_event,
  4093. cq_event, struct lpfc_cq_event, list);
  4094. spin_unlock_irqrestore(&phba->hbalock, iflag);
  4095. switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
  4096. case CQE_CODE_COMPL_WQE:
  4097. irspiocbq = container_of(cq_event, struct lpfc_iocbq,
  4098. cq_event);
  4099. /* Translate ELS WCQE to response IOCBQ */
  4100. irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
  4101. irspiocbq);
  4102. if (irspiocbq)
  4103. lpfc_sli_sp_handle_rspiocb(phba, pring,
  4104. irspiocbq);
  4105. count++;
  4106. break;
  4107. case CQE_CODE_RECEIVE:
  4108. case CQE_CODE_RECEIVE_V1:
  4109. dmabuf = container_of(cq_event, struct hbq_dmabuf,
  4110. cq_event);
  4111. lpfc_sli4_handle_received_buffer(phba, dmabuf);
  4112. count++;
  4113. break;
  4114. default:
  4115. break;
  4116. }
  4117. /* Limit the number of events to 64 to avoid soft lockups */
  4118. if (count == 64)
  4119. break;
  4120. }
  4121. }
  4122. /**
  4123. * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
  4124. * @phba: Pointer to HBA context object.
  4125. * @pring: Pointer to driver SLI ring object.
  4126. *
  4127. * This function aborts all iocbs in the given ring and frees all the iocb
  4128. * objects in txq. This function issues an abort iocb for all the iocb commands
  4129. * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
  4130. * the return of this function. The caller is not required to hold any locks.
  4131. **/
  4132. void
  4133. lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
  4134. {
  4135. LIST_HEAD(tx_completions);
  4136. LIST_HEAD(txcmplq_completions);
  4137. struct lpfc_iocbq *iocb, *next_iocb;
  4138. int offline;
  4139. if (pring->ringno == LPFC_ELS_RING) {
  4140. lpfc_fabric_abort_hba(phba);
  4141. }
  4142. offline = pci_channel_offline(phba->pcidev);
  4143. /* Error everything on txq and txcmplq
  4144. * First do the txq.
  4145. */
  4146. if (phba->sli_rev >= LPFC_SLI_REV4) {
  4147. spin_lock_irq(&pring->ring_lock);
  4148. list_splice_init(&pring->txq, &tx_completions);
  4149. pring->txq_cnt = 0;
  4150. if (offline) {
  4151. list_splice_init(&pring->txcmplq,
  4152. &txcmplq_completions);
  4153. } else {
  4154. /* Next issue ABTS for everything on the txcmplq */
  4155. list_for_each_entry_safe(iocb, next_iocb,
  4156. &pring->txcmplq, list)
  4157. lpfc_sli_issue_abort_iotag(phba, pring,
  4158. iocb, NULL);
  4159. }
  4160. spin_unlock_irq(&pring->ring_lock);
  4161. } else {
  4162. spin_lock_irq(&phba->hbalock);
  4163. list_splice_init(&pring->txq, &tx_completions);
  4164. pring->txq_cnt = 0;
  4165. if (offline) {
  4166. list_splice_init(&pring->txcmplq, &txcmplq_completions);
  4167. } else {
  4168. /* Next issue ABTS for everything on the txcmplq */
  4169. list_for_each_entry_safe(iocb, next_iocb,
  4170. &pring->txcmplq, list)
  4171. lpfc_sli_issue_abort_iotag(phba, pring,
  4172. iocb, NULL);
  4173. }
  4174. spin_unlock_irq(&phba->hbalock);
  4175. }
  4176. if (offline) {
  4177. /* Cancel all the IOCBs from the completions list */
  4178. lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
  4179. IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
  4180. } else {
  4181. /* Make sure HBA is alive */
  4182. lpfc_issue_hb_tmo(phba);
  4183. }
  4184. /* Cancel all the IOCBs from the completions list */
  4185. lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
  4186. IOERR_SLI_ABORTED);
  4187. }
  4188. /**
  4189. * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
  4190. * @phba: Pointer to HBA context object.
  4191. *
  4192. * This function aborts all iocbs in FCP rings and frees all the iocb
  4193. * objects in txq. This function issues an abort iocb for all the iocb commands
  4194. * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
  4195. * the return of this function. The caller is not required to hold any locks.
  4196. **/
  4197. void
  4198. lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
  4199. {
  4200. struct lpfc_sli *psli = &phba->sli;
  4201. struct lpfc_sli_ring *pring;
  4202. uint32_t i;
  4203. /* Look on all the FCP Rings for the iotag */
  4204. if (phba->sli_rev >= LPFC_SLI_REV4) {
  4205. for (i = 0; i < phba->cfg_hdw_queue; i++) {
  4206. pring = phba->sli4_hba.hdwq[i].io_wq->pring;
  4207. lpfc_sli_abort_iocb_ring(phba, pring);
  4208. }
  4209. } else {
  4210. pring = &psli->sli3_ring[LPFC_FCP_RING];
  4211. lpfc_sli_abort_iocb_ring(phba, pring);
  4212. }
  4213. }
  4214. /**
  4215. * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
  4216. * @phba: Pointer to HBA context object.
  4217. *
  4218. * This function flushes all iocbs in the IO ring and frees all the iocb
  4219. * objects in txq and txcmplq. This function will not issue abort iocbs
  4220. * for all the iocb commands in txcmplq, they will just be returned with
  4221. * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
  4222. * slot has been permanently disabled.
  4223. **/
  4224. void
  4225. lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
  4226. {
  4227. LIST_HEAD(txq);
  4228. LIST_HEAD(txcmplq);
  4229. struct lpfc_sli *psli = &phba->sli;
  4230. struct lpfc_sli_ring *pring;
  4231. uint32_t i;
  4232. struct lpfc_iocbq *piocb, *next_iocb;
  4233. /* Indicate the I/O queues are flushed */
  4234. set_bit(HBA_IOQ_FLUSH, &phba->hba_flag);
  4235. /* Look on all the FCP Rings for the iotag */
  4236. if (phba->sli_rev >= LPFC_SLI_REV4) {
  4237. for (i = 0; i < phba->cfg_hdw_queue; i++) {
  4238. if (!phba->sli4_hba.hdwq ||
  4239. !phba->sli4_hba.hdwq[i].io_wq) {
  4240. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4241. "7777 hdwq's deleted %lx "
  4242. "%lx %x %x\n",
  4243. phba->pport->load_flag,
  4244. phba->hba_flag,
  4245. phba->link_state,
  4246. phba->sli.sli_flag);
  4247. return;
  4248. }
  4249. pring = phba->sli4_hba.hdwq[i].io_wq->pring;
  4250. spin_lock_irq(&pring->ring_lock);
  4251. /* Retrieve everything on txq */
  4252. list_splice_init(&pring->txq, &txq);
  4253. list_for_each_entry_safe(piocb, next_iocb,
  4254. &pring->txcmplq, list)
  4255. piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
  4256. /* Retrieve everything on the txcmplq */
  4257. list_splice_init(&pring->txcmplq, &txcmplq);
  4258. pring->txq_cnt = 0;
  4259. pring->txcmplq_cnt = 0;
  4260. spin_unlock_irq(&pring->ring_lock);
  4261. /* Flush the txq */
  4262. lpfc_sli_cancel_iocbs(phba, &txq,
  4263. IOSTAT_LOCAL_REJECT,
  4264. IOERR_SLI_DOWN);
  4265. /* Flush the txcmplq */
  4266. lpfc_sli_cancel_iocbs(phba, &txcmplq,
  4267. IOSTAT_LOCAL_REJECT,
  4268. IOERR_SLI_DOWN);
  4269. if (unlikely(pci_channel_offline(phba->pcidev)))
  4270. lpfc_sli4_io_xri_aborted(phba, NULL, 0);
  4271. }
  4272. } else {
  4273. pring = &psli->sli3_ring[LPFC_FCP_RING];
  4274. spin_lock_irq(&phba->hbalock);
  4275. /* Retrieve everything on txq */
  4276. list_splice_init(&pring->txq, &txq);
  4277. list_for_each_entry_safe(piocb, next_iocb,
  4278. &pring->txcmplq, list)
  4279. piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
  4280. /* Retrieve everything on the txcmplq */
  4281. list_splice_init(&pring->txcmplq, &txcmplq);
  4282. pring->txq_cnt = 0;
  4283. pring->txcmplq_cnt = 0;
  4284. spin_unlock_irq(&phba->hbalock);
  4285. /* Flush the txq */
  4286. lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
  4287. IOERR_SLI_DOWN);
  4288. /* Flush the txcmpq */
  4289. lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
  4290. IOERR_SLI_DOWN);
  4291. }
  4292. }
  4293. /**
  4294. * lpfc_sli_brdready_s3 - Check for sli3 host ready status
  4295. * @phba: Pointer to HBA context object.
  4296. * @mask: Bit mask to be checked.
  4297. *
  4298. * This function reads the host status register and compares
  4299. * with the provided bit mask to check if HBA completed
  4300. * the restart. This function will wait in a loop for the
  4301. * HBA to complete restart. If the HBA does not restart within
  4302. * 15 iterations, the function will reset the HBA again. The
  4303. * function returns 1 when HBA fail to restart otherwise returns
  4304. * zero.
  4305. **/
  4306. static int
  4307. lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
  4308. {
  4309. uint32_t status;
  4310. int i = 0;
  4311. int retval = 0;
  4312. /* Read the HBA Host Status Register */
  4313. if (lpfc_readl(phba->HSregaddr, &status))
  4314. return 1;
  4315. set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
  4316. /*
  4317. * Check status register every 100ms for 5 retries, then every
  4318. * 500ms for 5, then every 2.5 sec for 5, then reset board and
  4319. * every 2.5 sec for 4.
  4320. * Break our of the loop if errors occurred during init.
  4321. */
  4322. while (((status & mask) != mask) &&
  4323. !(status & HS_FFERM) &&
  4324. i++ < 20) {
  4325. if (i <= 5)
  4326. msleep(10);
  4327. else if (i <= 10)
  4328. msleep(500);
  4329. else
  4330. msleep(2500);
  4331. if (i == 15) {
  4332. /* Do post */
  4333. phba->pport->port_state = LPFC_VPORT_UNKNOWN;
  4334. lpfc_sli_brdrestart(phba);
  4335. }
  4336. /* Read the HBA Host Status Register */
  4337. if (lpfc_readl(phba->HSregaddr, &status)) {
  4338. retval = 1;
  4339. break;
  4340. }
  4341. }
  4342. /* Check to see if any errors occurred during init */
  4343. if ((status & HS_FFERM) || (i >= 20)) {
  4344. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  4345. "2751 Adapter failed to restart, "
  4346. "status reg x%x, FW Data: A8 x%x AC x%x\n",
  4347. status,
  4348. readl(phba->MBslimaddr + 0xa8),
  4349. readl(phba->MBslimaddr + 0xac));
  4350. phba->link_state = LPFC_HBA_ERROR;
  4351. retval = 1;
  4352. }
  4353. return retval;
  4354. }
  4355. /**
  4356. * lpfc_sli_brdready_s4 - Check for sli4 host ready status
  4357. * @phba: Pointer to HBA context object.
  4358. * @mask: Bit mask to be checked.
  4359. *
  4360. * This function checks the host status register to check if HBA is
  4361. * ready. This function will wait in a loop for the HBA to be ready
  4362. * If the HBA is not ready , the function will will reset the HBA PCI
  4363. * function again. The function returns 1 when HBA fail to be ready
  4364. * otherwise returns zero.
  4365. **/
  4366. static int
  4367. lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
  4368. {
  4369. uint32_t status;
  4370. int retval = 0;
  4371. /* Read the HBA Host Status Register */
  4372. status = lpfc_sli4_post_status_check(phba);
  4373. if (status) {
  4374. phba->pport->port_state = LPFC_VPORT_UNKNOWN;
  4375. lpfc_sli_brdrestart(phba);
  4376. status = lpfc_sli4_post_status_check(phba);
  4377. }
  4378. /* Check to see if any errors occurred during init */
  4379. if (status) {
  4380. phba->link_state = LPFC_HBA_ERROR;
  4381. retval = 1;
  4382. } else
  4383. phba->sli4_hba.intr_enable = 0;
  4384. clear_bit(HBA_SETUP, &phba->hba_flag);
  4385. return retval;
  4386. }
  4387. /**
  4388. * lpfc_sli_brdready - Wrapper func for checking the hba readyness
  4389. * @phba: Pointer to HBA context object.
  4390. * @mask: Bit mask to be checked.
  4391. *
  4392. * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
  4393. * from the API jump table function pointer from the lpfc_hba struct.
  4394. **/
  4395. int
  4396. lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
  4397. {
  4398. return phba->lpfc_sli_brdready(phba, mask);
  4399. }
  4400. #define BARRIER_TEST_PATTERN (0xdeadbeef)
  4401. /**
  4402. * lpfc_reset_barrier - Make HBA ready for HBA reset
  4403. * @phba: Pointer to HBA context object.
  4404. *
  4405. * This function is called before resetting an HBA. This function is called
  4406. * with hbalock held and requests HBA to quiesce DMAs before a reset.
  4407. **/
  4408. void lpfc_reset_barrier(struct lpfc_hba *phba)
  4409. {
  4410. uint32_t __iomem *resp_buf;
  4411. uint32_t __iomem *mbox_buf;
  4412. volatile struct MAILBOX_word0 mbox;
  4413. uint32_t hc_copy, ha_copy, resp_data;
  4414. int i;
  4415. uint8_t hdrtype;
  4416. lockdep_assert_held(&phba->hbalock);
  4417. pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
  4418. if (hdrtype != PCI_HEADER_TYPE_MFD ||
  4419. (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
  4420. FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
  4421. return;
  4422. /*
  4423. * Tell the other part of the chip to suspend temporarily all
  4424. * its DMA activity.
  4425. */
  4426. resp_buf = phba->MBslimaddr;
  4427. /* Disable the error attention */
  4428. if (lpfc_readl(phba->HCregaddr, &hc_copy))
  4429. return;
  4430. writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
  4431. readl(phba->HCregaddr); /* flush */
  4432. phba->link_flag |= LS_IGNORE_ERATT;
  4433. if (lpfc_readl(phba->HAregaddr, &ha_copy))
  4434. return;
  4435. if (ha_copy & HA_ERATT) {
  4436. /* Clear Chip error bit */
  4437. writel(HA_ERATT, phba->HAregaddr);
  4438. phba->pport->stopped = 1;
  4439. }
  4440. mbox.word0 = 0;
  4441. mbox.mbxCommand = MBX_KILL_BOARD;
  4442. mbox.mbxOwner = OWN_CHIP;
  4443. writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
  4444. mbox_buf = phba->MBslimaddr;
  4445. writel(mbox.word0, mbox_buf);
  4446. for (i = 0; i < 50; i++) {
  4447. if (lpfc_readl((resp_buf + 1), &resp_data))
  4448. return;
  4449. if (resp_data != ~(BARRIER_TEST_PATTERN))
  4450. mdelay(1);
  4451. else
  4452. break;
  4453. }
  4454. resp_data = 0;
  4455. if (lpfc_readl((resp_buf + 1), &resp_data))
  4456. return;
  4457. if (resp_data != ~(BARRIER_TEST_PATTERN)) {
  4458. if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
  4459. phba->pport->stopped)
  4460. goto restore_hc;
  4461. else
  4462. goto clear_errat;
  4463. }
  4464. mbox.mbxOwner = OWN_HOST;
  4465. resp_data = 0;
  4466. for (i = 0; i < 500; i++) {
  4467. if (lpfc_readl(resp_buf, &resp_data))
  4468. return;
  4469. if (resp_data != mbox.word0)
  4470. mdelay(1);
  4471. else
  4472. break;
  4473. }
  4474. clear_errat:
  4475. while (++i < 500) {
  4476. if (lpfc_readl(phba->HAregaddr, &ha_copy))
  4477. return;
  4478. if (!(ha_copy & HA_ERATT))
  4479. mdelay(1);
  4480. else
  4481. break;
  4482. }
  4483. if (readl(phba->HAregaddr) & HA_ERATT) {
  4484. writel(HA_ERATT, phba->HAregaddr);
  4485. phba->pport->stopped = 1;
  4486. }
  4487. restore_hc:
  4488. phba->link_flag &= ~LS_IGNORE_ERATT;
  4489. writel(hc_copy, phba->HCregaddr);
  4490. readl(phba->HCregaddr); /* flush */
  4491. }
  4492. /**
  4493. * lpfc_sli_brdkill - Issue a kill_board mailbox command
  4494. * @phba: Pointer to HBA context object.
  4495. *
  4496. * This function issues a kill_board mailbox command and waits for
  4497. * the error attention interrupt. This function is called for stopping
  4498. * the firmware processing. The caller is not required to hold any
  4499. * locks. This function calls lpfc_hba_down_post function to free
  4500. * any pending commands after the kill. The function will return 1 when it
  4501. * fails to kill the board else will return 0.
  4502. **/
  4503. int
  4504. lpfc_sli_brdkill(struct lpfc_hba *phba)
  4505. {
  4506. struct lpfc_sli *psli;
  4507. LPFC_MBOXQ_t *pmb;
  4508. uint32_t status;
  4509. uint32_t ha_copy;
  4510. int retval;
  4511. int i = 0;
  4512. psli = &phba->sli;
  4513. /* Kill HBA */
  4514. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4515. "0329 Kill HBA Data: x%x x%x\n",
  4516. phba->pport->port_state, psli->sli_flag);
  4517. pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  4518. if (!pmb)
  4519. return 1;
  4520. /* Disable the error attention */
  4521. spin_lock_irq(&phba->hbalock);
  4522. if (lpfc_readl(phba->HCregaddr, &status)) {
  4523. spin_unlock_irq(&phba->hbalock);
  4524. mempool_free(pmb, phba->mbox_mem_pool);
  4525. return 1;
  4526. }
  4527. status &= ~HC_ERINT_ENA;
  4528. writel(status, phba->HCregaddr);
  4529. readl(phba->HCregaddr); /* flush */
  4530. phba->link_flag |= LS_IGNORE_ERATT;
  4531. spin_unlock_irq(&phba->hbalock);
  4532. lpfc_kill_board(phba, pmb);
  4533. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  4534. retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  4535. if (retval != MBX_SUCCESS) {
  4536. if (retval != MBX_BUSY)
  4537. mempool_free(pmb, phba->mbox_mem_pool);
  4538. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  4539. "2752 KILL_BOARD command failed retval %d\n",
  4540. retval);
  4541. spin_lock_irq(&phba->hbalock);
  4542. phba->link_flag &= ~LS_IGNORE_ERATT;
  4543. spin_unlock_irq(&phba->hbalock);
  4544. return 1;
  4545. }
  4546. spin_lock_irq(&phba->hbalock);
  4547. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  4548. spin_unlock_irq(&phba->hbalock);
  4549. mempool_free(pmb, phba->mbox_mem_pool);
  4550. /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
  4551. * attention every 100ms for 3 seconds. If we don't get ERATT after
  4552. * 3 seconds we still set HBA_ERROR state because the status of the
  4553. * board is now undefined.
  4554. */
  4555. if (lpfc_readl(phba->HAregaddr, &ha_copy))
  4556. return 1;
  4557. while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
  4558. mdelay(100);
  4559. if (lpfc_readl(phba->HAregaddr, &ha_copy))
  4560. return 1;
  4561. }
  4562. del_timer_sync(&psli->mbox_tmo);
  4563. if (ha_copy & HA_ERATT) {
  4564. writel(HA_ERATT, phba->HAregaddr);
  4565. phba->pport->stopped = 1;
  4566. }
  4567. spin_lock_irq(&phba->hbalock);
  4568. psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  4569. psli->mbox_active = NULL;
  4570. phba->link_flag &= ~LS_IGNORE_ERATT;
  4571. spin_unlock_irq(&phba->hbalock);
  4572. lpfc_hba_down_post(phba);
  4573. phba->link_state = LPFC_HBA_ERROR;
  4574. return ha_copy & HA_ERATT ? 0 : 1;
  4575. }
  4576. /**
  4577. * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
  4578. * @phba: Pointer to HBA context object.
  4579. *
  4580. * This function resets the HBA by writing HC_INITFF to the control
  4581. * register. After the HBA resets, this function resets all the iocb ring
  4582. * indices. This function disables PCI layer parity checking during
  4583. * the reset.
  4584. * This function returns 0 always.
  4585. * The caller is not required to hold any locks.
  4586. **/
  4587. int
  4588. lpfc_sli_brdreset(struct lpfc_hba *phba)
  4589. {
  4590. struct lpfc_sli *psli;
  4591. struct lpfc_sli_ring *pring;
  4592. uint16_t cfg_value;
  4593. int i;
  4594. psli = &phba->sli;
  4595. /* Reset HBA */
  4596. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4597. "0325 Reset HBA Data: x%x x%x\n",
  4598. (phba->pport) ? phba->pport->port_state : 0,
  4599. psli->sli_flag);
  4600. /* perform board reset */
  4601. phba->fc_eventTag = 0;
  4602. phba->link_events = 0;
  4603. set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
  4604. if (phba->pport) {
  4605. phba->pport->fc_myDID = 0;
  4606. phba->pport->fc_prevDID = 0;
  4607. }
  4608. /* Turn off parity checking and serr during the physical reset */
  4609. if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
  4610. return -EIO;
  4611. pci_write_config_word(phba->pcidev, PCI_COMMAND,
  4612. (cfg_value &
  4613. ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
  4614. psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
  4615. /* Now toggle INITFF bit in the Host Control Register */
  4616. writel(HC_INITFF, phba->HCregaddr);
  4617. mdelay(1);
  4618. readl(phba->HCregaddr); /* flush */
  4619. writel(0, phba->HCregaddr);
  4620. readl(phba->HCregaddr); /* flush */
  4621. /* Restore PCI cmd register */
  4622. pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
  4623. /* Initialize relevant SLI info */
  4624. for (i = 0; i < psli->num_rings; i++) {
  4625. pring = &psli->sli3_ring[i];
  4626. pring->flag = 0;
  4627. pring->sli.sli3.rspidx = 0;
  4628. pring->sli.sli3.next_cmdidx = 0;
  4629. pring->sli.sli3.local_getidx = 0;
  4630. pring->sli.sli3.cmdidx = 0;
  4631. pring->missbufcnt = 0;
  4632. }
  4633. phba->link_state = LPFC_WARM_START;
  4634. return 0;
  4635. }
  4636. /**
  4637. * lpfc_sli4_brdreset - Reset a sli-4 HBA
  4638. * @phba: Pointer to HBA context object.
  4639. *
  4640. * This function resets a SLI4 HBA. This function disables PCI layer parity
  4641. * checking during resets the device. The caller is not required to hold
  4642. * any locks.
  4643. *
  4644. * This function returns 0 on success else returns negative error code.
  4645. **/
  4646. int
  4647. lpfc_sli4_brdreset(struct lpfc_hba *phba)
  4648. {
  4649. struct lpfc_sli *psli = &phba->sli;
  4650. uint16_t cfg_value;
  4651. int rc = 0;
  4652. /* Reset HBA */
  4653. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4654. "0295 Reset HBA Data: x%x x%x x%lx\n",
  4655. phba->pport->port_state, psli->sli_flag,
  4656. phba->hba_flag);
  4657. /* perform board reset */
  4658. phba->fc_eventTag = 0;
  4659. phba->link_events = 0;
  4660. phba->pport->fc_myDID = 0;
  4661. phba->pport->fc_prevDID = 0;
  4662. clear_bit(HBA_SETUP, &phba->hba_flag);
  4663. spin_lock_irq(&phba->hbalock);
  4664. psli->sli_flag &= ~(LPFC_PROCESS_LA);
  4665. phba->fcf.fcf_flag = 0;
  4666. spin_unlock_irq(&phba->hbalock);
  4667. /* Now physically reset the device */
  4668. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  4669. "0389 Performing PCI function reset!\n");
  4670. /* Turn off parity checking and serr during the physical reset */
  4671. if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
  4672. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  4673. "3205 PCI read Config failed\n");
  4674. return -EIO;
  4675. }
  4676. pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
  4677. ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
  4678. /* Perform FCoE PCI function reset before freeing queue memory */
  4679. rc = lpfc_pci_function_reset(phba);
  4680. /* Restore PCI cmd register */
  4681. pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
  4682. return rc;
  4683. }
  4684. /**
  4685. * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
  4686. * @phba: Pointer to HBA context object.
  4687. *
  4688. * This function is called in the SLI initialization code path to
  4689. * restart the HBA. The caller is not required to hold any lock.
  4690. * This function writes MBX_RESTART mailbox command to the SLIM and
  4691. * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
  4692. * function to free any pending commands. The function enables
  4693. * POST only during the first initialization. The function returns zero.
  4694. * The function does not guarantee completion of MBX_RESTART mailbox
  4695. * command before the return of this function.
  4696. **/
  4697. static int
  4698. lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
  4699. {
  4700. volatile struct MAILBOX_word0 mb;
  4701. struct lpfc_sli *psli;
  4702. void __iomem *to_slim;
  4703. spin_lock_irq(&phba->hbalock);
  4704. psli = &phba->sli;
  4705. /* Restart HBA */
  4706. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4707. "0337 Restart HBA Data: x%x x%x\n",
  4708. (phba->pport) ? phba->pport->port_state : 0,
  4709. psli->sli_flag);
  4710. mb.word0 = 0;
  4711. mb.mbxCommand = MBX_RESTART;
  4712. mb.mbxHc = 1;
  4713. lpfc_reset_barrier(phba);
  4714. to_slim = phba->MBslimaddr;
  4715. writel(mb.word0, to_slim);
  4716. readl(to_slim); /* flush */
  4717. /* Only skip post after fc_ffinit is completed */
  4718. if (phba->pport && phba->pport->port_state)
  4719. mb.word0 = 1; /* This is really setting up word1 */
  4720. else
  4721. mb.word0 = 0; /* This is really setting up word1 */
  4722. to_slim = phba->MBslimaddr + sizeof (uint32_t);
  4723. writel(mb.word0, to_slim);
  4724. readl(to_slim); /* flush */
  4725. lpfc_sli_brdreset(phba);
  4726. if (phba->pport)
  4727. phba->pport->stopped = 0;
  4728. phba->link_state = LPFC_INIT_START;
  4729. phba->hba_flag = 0;
  4730. spin_unlock_irq(&phba->hbalock);
  4731. memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
  4732. psli->stats_start = ktime_get_seconds();
  4733. /* Give the INITFF and Post time to settle. */
  4734. mdelay(100);
  4735. lpfc_hba_down_post(phba);
  4736. return 0;
  4737. }
  4738. /**
  4739. * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
  4740. * @phba: Pointer to HBA context object.
  4741. *
  4742. * This function is called in the SLI initialization code path to restart
  4743. * a SLI4 HBA. The caller is not required to hold any lock.
  4744. * At the end of the function, it calls lpfc_hba_down_post function to
  4745. * free any pending commands.
  4746. **/
  4747. static int
  4748. lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
  4749. {
  4750. struct lpfc_sli *psli = &phba->sli;
  4751. int rc;
  4752. /* Restart HBA */
  4753. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4754. "0296 Restart HBA Data: x%x x%x\n",
  4755. phba->pport->port_state, psli->sli_flag);
  4756. lpfc_sli4_queue_unset(phba);
  4757. rc = lpfc_sli4_brdreset(phba);
  4758. if (rc) {
  4759. phba->link_state = LPFC_HBA_ERROR;
  4760. goto hba_down_queue;
  4761. }
  4762. spin_lock_irq(&phba->hbalock);
  4763. phba->pport->stopped = 0;
  4764. phba->link_state = LPFC_INIT_START;
  4765. phba->hba_flag = 0;
  4766. /* Preserve FA-PWWN expectation */
  4767. phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
  4768. spin_unlock_irq(&phba->hbalock);
  4769. memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
  4770. psli->stats_start = ktime_get_seconds();
  4771. hba_down_queue:
  4772. lpfc_hba_down_post(phba);
  4773. lpfc_sli4_queue_destroy(phba);
  4774. return rc;
  4775. }
  4776. /**
  4777. * lpfc_sli_brdrestart - Wrapper func for restarting hba
  4778. * @phba: Pointer to HBA context object.
  4779. *
  4780. * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
  4781. * API jump table function pointer from the lpfc_hba struct.
  4782. **/
  4783. int
  4784. lpfc_sli_brdrestart(struct lpfc_hba *phba)
  4785. {
  4786. return phba->lpfc_sli_brdrestart(phba);
  4787. }
  4788. /**
  4789. * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
  4790. * @phba: Pointer to HBA context object.
  4791. *
  4792. * This function is called after a HBA restart to wait for successful
  4793. * restart of the HBA. Successful restart of the HBA is indicated by
  4794. * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
  4795. * iteration, the function will restart the HBA again. The function returns
  4796. * zero if HBA successfully restarted else returns negative error code.
  4797. **/
  4798. int
  4799. lpfc_sli_chipset_init(struct lpfc_hba *phba)
  4800. {
  4801. uint32_t status, i = 0;
  4802. /* Read the HBA Host Status Register */
  4803. if (lpfc_readl(phba->HSregaddr, &status))
  4804. return -EIO;
  4805. /* Check status register to see what current state is */
  4806. i = 0;
  4807. while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
  4808. /* Check every 10ms for 10 retries, then every 100ms for 90
  4809. * retries, then every 1 sec for 50 retires for a total of
  4810. * ~60 seconds before reset the board again and check every
  4811. * 1 sec for 50 retries. The up to 60 seconds before the
  4812. * board ready is required by the Falcon FIPS zeroization
  4813. * complete, and any reset the board in between shall cause
  4814. * restart of zeroization, further delay the board ready.
  4815. */
  4816. if (i++ >= 200) {
  4817. /* Adapter failed to init, timeout, status reg
  4818. <status> */
  4819. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  4820. "0436 Adapter failed to init, "
  4821. "timeout, status reg x%x, "
  4822. "FW Data: A8 x%x AC x%x\n", status,
  4823. readl(phba->MBslimaddr + 0xa8),
  4824. readl(phba->MBslimaddr + 0xac));
  4825. phba->link_state = LPFC_HBA_ERROR;
  4826. return -ETIMEDOUT;
  4827. }
  4828. /* Check to see if any errors occurred during init */
  4829. if (status & HS_FFERM) {
  4830. /* ERROR: During chipset initialization */
  4831. /* Adapter failed to init, chipset, status reg
  4832. <status> */
  4833. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  4834. "0437 Adapter failed to init, "
  4835. "chipset, status reg x%x, "
  4836. "FW Data: A8 x%x AC x%x\n", status,
  4837. readl(phba->MBslimaddr + 0xa8),
  4838. readl(phba->MBslimaddr + 0xac));
  4839. phba->link_state = LPFC_HBA_ERROR;
  4840. return -EIO;
  4841. }
  4842. if (i <= 10)
  4843. msleep(10);
  4844. else if (i <= 100)
  4845. msleep(100);
  4846. else
  4847. msleep(1000);
  4848. if (i == 150) {
  4849. /* Do post */
  4850. phba->pport->port_state = LPFC_VPORT_UNKNOWN;
  4851. lpfc_sli_brdrestart(phba);
  4852. }
  4853. /* Read the HBA Host Status Register */
  4854. if (lpfc_readl(phba->HSregaddr, &status))
  4855. return -EIO;
  4856. }
  4857. /* Check to see if any errors occurred during init */
  4858. if (status & HS_FFERM) {
  4859. /* ERROR: During chipset initialization */
  4860. /* Adapter failed to init, chipset, status reg <status> */
  4861. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  4862. "0438 Adapter failed to init, chipset, "
  4863. "status reg x%x, "
  4864. "FW Data: A8 x%x AC x%x\n", status,
  4865. readl(phba->MBslimaddr + 0xa8),
  4866. readl(phba->MBslimaddr + 0xac));
  4867. phba->link_state = LPFC_HBA_ERROR;
  4868. return -EIO;
  4869. }
  4870. set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
  4871. /* Clear all interrupt enable conditions */
  4872. writel(0, phba->HCregaddr);
  4873. readl(phba->HCregaddr); /* flush */
  4874. /* setup host attn register */
  4875. writel(0xffffffff, phba->HAregaddr);
  4876. readl(phba->HAregaddr); /* flush */
  4877. return 0;
  4878. }
  4879. /**
  4880. * lpfc_sli_hbq_count - Get the number of HBQs to be configured
  4881. *
  4882. * This function calculates and returns the number of HBQs required to be
  4883. * configured.
  4884. **/
  4885. int
  4886. lpfc_sli_hbq_count(void)
  4887. {
  4888. return ARRAY_SIZE(lpfc_hbq_defs);
  4889. }
  4890. /**
  4891. * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
  4892. *
  4893. * This function adds the number of hbq entries in every HBQ to get
  4894. * the total number of hbq entries required for the HBA and returns
  4895. * the total count.
  4896. **/
  4897. static int
  4898. lpfc_sli_hbq_entry_count(void)
  4899. {
  4900. int hbq_count = lpfc_sli_hbq_count();
  4901. int count = 0;
  4902. int i;
  4903. for (i = 0; i < hbq_count; ++i)
  4904. count += lpfc_hbq_defs[i]->entry_count;
  4905. return count;
  4906. }
  4907. /**
  4908. * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
  4909. *
  4910. * This function calculates amount of memory required for all hbq entries
  4911. * to be configured and returns the total memory required.
  4912. **/
  4913. int
  4914. lpfc_sli_hbq_size(void)
  4915. {
  4916. return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
  4917. }
  4918. /**
  4919. * lpfc_sli_hbq_setup - configure and initialize HBQs
  4920. * @phba: Pointer to HBA context object.
  4921. *
  4922. * This function is called during the SLI initialization to configure
  4923. * all the HBQs and post buffers to the HBQ. The caller is not
  4924. * required to hold any locks. This function will return zero if successful
  4925. * else it will return negative error code.
  4926. **/
  4927. static int
  4928. lpfc_sli_hbq_setup(struct lpfc_hba *phba)
  4929. {
  4930. int hbq_count = lpfc_sli_hbq_count();
  4931. LPFC_MBOXQ_t *pmb;
  4932. MAILBOX_t *pmbox;
  4933. uint32_t hbqno;
  4934. uint32_t hbq_entry_index;
  4935. /* Get a Mailbox buffer to setup mailbox
  4936. * commands for HBA initialization
  4937. */
  4938. pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  4939. if (!pmb)
  4940. return -ENOMEM;
  4941. pmbox = &pmb->u.mb;
  4942. /* Initialize the struct lpfc_sli_hbq structure for each hbq */
  4943. phba->link_state = LPFC_INIT_MBX_CMDS;
  4944. phba->hbq_in_use = 1;
  4945. hbq_entry_index = 0;
  4946. for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
  4947. phba->hbqs[hbqno].next_hbqPutIdx = 0;
  4948. phba->hbqs[hbqno].hbqPutIdx = 0;
  4949. phba->hbqs[hbqno].local_hbqGetIdx = 0;
  4950. phba->hbqs[hbqno].entry_count =
  4951. lpfc_hbq_defs[hbqno]->entry_count;
  4952. lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
  4953. hbq_entry_index, pmb);
  4954. hbq_entry_index += phba->hbqs[hbqno].entry_count;
  4955. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  4956. /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
  4957. mbxStatus <status>, ring <num> */
  4958. lpfc_printf_log(phba, KERN_ERR,
  4959. LOG_SLI | LOG_VPORT,
  4960. "1805 Adapter failed to init. "
  4961. "Data: x%x x%x x%x\n",
  4962. pmbox->mbxCommand,
  4963. pmbox->mbxStatus, hbqno);
  4964. phba->link_state = LPFC_HBA_ERROR;
  4965. mempool_free(pmb, phba->mbox_mem_pool);
  4966. return -ENXIO;
  4967. }
  4968. }
  4969. phba->hbq_count = hbq_count;
  4970. mempool_free(pmb, phba->mbox_mem_pool);
  4971. /* Initially populate or replenish the HBQs */
  4972. for (hbqno = 0; hbqno < hbq_count; ++hbqno)
  4973. lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
  4974. return 0;
  4975. }
  4976. /**
  4977. * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
  4978. * @phba: Pointer to HBA context object.
  4979. *
  4980. * This function is called during the SLI initialization to configure
  4981. * all the HBQs and post buffers to the HBQ. The caller is not
  4982. * required to hold any locks. This function will return zero if successful
  4983. * else it will return negative error code.
  4984. **/
  4985. static int
  4986. lpfc_sli4_rb_setup(struct lpfc_hba *phba)
  4987. {
  4988. phba->hbq_in_use = 1;
  4989. /**
  4990. * Specific case when the MDS diagnostics is enabled and supported.
  4991. * The receive buffer count is truncated to manage the incoming
  4992. * traffic.
  4993. **/
  4994. if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
  4995. phba->hbqs[LPFC_ELS_HBQ].entry_count =
  4996. lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
  4997. else
  4998. phba->hbqs[LPFC_ELS_HBQ].entry_count =
  4999. lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
  5000. phba->hbq_count = 1;
  5001. lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
  5002. /* Initially populate or replenish the HBQs */
  5003. return 0;
  5004. }
  5005. /**
  5006. * lpfc_sli_config_port - Issue config port mailbox command
  5007. * @phba: Pointer to HBA context object.
  5008. * @sli_mode: sli mode - 2/3
  5009. *
  5010. * This function is called by the sli initialization code path
  5011. * to issue config_port mailbox command. This function restarts the
  5012. * HBA firmware and issues a config_port mailbox command to configure
  5013. * the SLI interface in the sli mode specified by sli_mode
  5014. * variable. The caller is not required to hold any locks.
  5015. * The function returns 0 if successful, else returns negative error
  5016. * code.
  5017. **/
  5018. int
  5019. lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
  5020. {
  5021. LPFC_MBOXQ_t *pmb;
  5022. uint32_t resetcount = 0, rc = 0, done = 0;
  5023. pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  5024. if (!pmb) {
  5025. phba->link_state = LPFC_HBA_ERROR;
  5026. return -ENOMEM;
  5027. }
  5028. phba->sli_rev = sli_mode;
  5029. while (resetcount < 2 && !done) {
  5030. spin_lock_irq(&phba->hbalock);
  5031. phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
  5032. spin_unlock_irq(&phba->hbalock);
  5033. phba->pport->port_state = LPFC_VPORT_UNKNOWN;
  5034. lpfc_sli_brdrestart(phba);
  5035. rc = lpfc_sli_chipset_init(phba);
  5036. if (rc)
  5037. break;
  5038. spin_lock_irq(&phba->hbalock);
  5039. phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  5040. spin_unlock_irq(&phba->hbalock);
  5041. resetcount++;
  5042. /* Call pre CONFIG_PORT mailbox command initialization. A
  5043. * value of 0 means the call was successful. Any other
  5044. * nonzero value is a failure, but if ERESTART is returned,
  5045. * the driver may reset the HBA and try again.
  5046. */
  5047. rc = lpfc_config_port_prep(phba);
  5048. if (rc == -ERESTART) {
  5049. phba->link_state = LPFC_LINK_UNKNOWN;
  5050. continue;
  5051. } else if (rc)
  5052. break;
  5053. phba->link_state = LPFC_INIT_MBX_CMDS;
  5054. lpfc_config_port(phba, pmb);
  5055. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  5056. phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
  5057. LPFC_SLI3_HBQ_ENABLED |
  5058. LPFC_SLI3_CRP_ENABLED |
  5059. LPFC_SLI3_DSS_ENABLED);
  5060. if (rc != MBX_SUCCESS) {
  5061. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  5062. "0442 Adapter failed to init, mbxCmd x%x "
  5063. "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
  5064. pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
  5065. spin_lock_irq(&phba->hbalock);
  5066. phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
  5067. spin_unlock_irq(&phba->hbalock);
  5068. rc = -ENXIO;
  5069. } else {
  5070. /* Allow asynchronous mailbox command to go through */
  5071. spin_lock_irq(&phba->hbalock);
  5072. phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
  5073. spin_unlock_irq(&phba->hbalock);
  5074. done = 1;
  5075. if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
  5076. (pmb->u.mb.un.varCfgPort.gasabt == 0))
  5077. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  5078. "3110 Port did not grant ASABT\n");
  5079. }
  5080. }
  5081. if (!done) {
  5082. rc = -EINVAL;
  5083. goto do_prep_failed;
  5084. }
  5085. if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
  5086. if (!pmb->u.mb.un.varCfgPort.cMA) {
  5087. rc = -ENXIO;
  5088. goto do_prep_failed;
  5089. }
  5090. if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
  5091. phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
  5092. phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
  5093. phba->max_vports = (phba->max_vpi > phba->max_vports) ?
  5094. phba->max_vpi : phba->max_vports;
  5095. } else
  5096. phba->max_vpi = 0;
  5097. if (pmb->u.mb.un.varCfgPort.gerbm)
  5098. phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
  5099. if (pmb->u.mb.un.varCfgPort.gcrp)
  5100. phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
  5101. phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
  5102. phba->port_gp = phba->mbox->us.s3_pgp.port;
  5103. if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
  5104. if (pmb->u.mb.un.varCfgPort.gbg == 0) {
  5105. phba->cfg_enable_bg = 0;
  5106. phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
  5107. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  5108. "0443 Adapter did not grant "
  5109. "BlockGuard\n");
  5110. }
  5111. }
  5112. } else {
  5113. phba->hbq_get = NULL;
  5114. phba->port_gp = phba->mbox->us.s2.port;
  5115. phba->max_vpi = 0;
  5116. }
  5117. do_prep_failed:
  5118. mempool_free(pmb, phba->mbox_mem_pool);
  5119. return rc;
  5120. }
  5121. /**
  5122. * lpfc_sli_hba_setup - SLI initialization function
  5123. * @phba: Pointer to HBA context object.
  5124. *
  5125. * This function is the main SLI initialization function. This function
  5126. * is called by the HBA initialization code, HBA reset code and HBA
  5127. * error attention handler code. Caller is not required to hold any
  5128. * locks. This function issues config_port mailbox command to configure
  5129. * the SLI, setup iocb rings and HBQ rings. In the end the function
  5130. * calls the config_port_post function to issue init_link mailbox
  5131. * command and to start the discovery. The function will return zero
  5132. * if successful, else it will return negative error code.
  5133. **/
  5134. int
  5135. lpfc_sli_hba_setup(struct lpfc_hba *phba)
  5136. {
  5137. uint32_t rc;
  5138. int i;
  5139. int longs;
  5140. /* Enable ISR already does config_port because of config_msi mbx */
  5141. if (test_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag)) {
  5142. rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
  5143. if (rc)
  5144. return -EIO;
  5145. clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
  5146. }
  5147. phba->fcp_embed_io = 0; /* SLI4 FC support only */
  5148. if (phba->sli_rev == 3) {
  5149. phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
  5150. phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
  5151. } else {
  5152. phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
  5153. phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
  5154. phba->sli3_options = 0;
  5155. }
  5156. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  5157. "0444 Firmware in SLI %x mode. Max_vpi %d\n",
  5158. phba->sli_rev, phba->max_vpi);
  5159. rc = lpfc_sli_ring_map(phba);
  5160. if (rc)
  5161. goto lpfc_sli_hba_setup_error;
  5162. /* Initialize VPIs. */
  5163. if (phba->sli_rev == LPFC_SLI_REV3) {
  5164. /*
  5165. * The VPI bitmask and physical ID array are allocated
  5166. * and initialized once only - at driver load. A port
  5167. * reset doesn't need to reinitialize this memory.
  5168. */
  5169. if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
  5170. longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
  5171. phba->vpi_bmask = kcalloc(longs,
  5172. sizeof(unsigned long),
  5173. GFP_KERNEL);
  5174. if (!phba->vpi_bmask) {
  5175. rc = -ENOMEM;
  5176. goto lpfc_sli_hba_setup_error;
  5177. }
  5178. phba->vpi_ids = kcalloc(phba->max_vpi + 1,
  5179. sizeof(uint16_t),
  5180. GFP_KERNEL);
  5181. if (!phba->vpi_ids) {
  5182. kfree(phba->vpi_bmask);
  5183. rc = -ENOMEM;
  5184. goto lpfc_sli_hba_setup_error;
  5185. }
  5186. for (i = 0; i < phba->max_vpi; i++)
  5187. phba->vpi_ids[i] = i;
  5188. }
  5189. }
  5190. /* Init HBQs */
  5191. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  5192. rc = lpfc_sli_hbq_setup(phba);
  5193. if (rc)
  5194. goto lpfc_sli_hba_setup_error;
  5195. }
  5196. spin_lock_irq(&phba->hbalock);
  5197. phba->sli.sli_flag |= LPFC_PROCESS_LA;
  5198. spin_unlock_irq(&phba->hbalock);
  5199. rc = lpfc_config_port_post(phba);
  5200. if (rc)
  5201. goto lpfc_sli_hba_setup_error;
  5202. return rc;
  5203. lpfc_sli_hba_setup_error:
  5204. phba->link_state = LPFC_HBA_ERROR;
  5205. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  5206. "0445 Firmware initialization failed\n");
  5207. return rc;
  5208. }
  5209. /**
  5210. * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
  5211. * @phba: Pointer to HBA context object.
  5212. *
  5213. * This function issue a dump mailbox command to read config region
  5214. * 23 and parse the records in the region and populate driver
  5215. * data structure.
  5216. **/
  5217. static int
  5218. lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
  5219. {
  5220. LPFC_MBOXQ_t *mboxq;
  5221. struct lpfc_dmabuf *mp;
  5222. struct lpfc_mqe *mqe;
  5223. uint32_t data_length;
  5224. int rc;
  5225. /* Program the default value of vlan_id and fc_map */
  5226. phba->valid_vlan = 0;
  5227. phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
  5228. phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
  5229. phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
  5230. mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  5231. if (!mboxq)
  5232. return -ENOMEM;
  5233. mqe = &mboxq->u.mqe;
  5234. if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
  5235. rc = -ENOMEM;
  5236. goto out_free_mboxq;
  5237. }
  5238. mp = mboxq->ctx_buf;
  5239. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  5240. lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  5241. "(%d):2571 Mailbox cmd x%x Status x%x "
  5242. "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
  5243. "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
  5244. "CQ: x%x x%x x%x x%x\n",
  5245. mboxq->vport ? mboxq->vport->vpi : 0,
  5246. bf_get(lpfc_mqe_command, mqe),
  5247. bf_get(lpfc_mqe_status, mqe),
  5248. mqe->un.mb_words[0], mqe->un.mb_words[1],
  5249. mqe->un.mb_words[2], mqe->un.mb_words[3],
  5250. mqe->un.mb_words[4], mqe->un.mb_words[5],
  5251. mqe->un.mb_words[6], mqe->un.mb_words[7],
  5252. mqe->un.mb_words[8], mqe->un.mb_words[9],
  5253. mqe->un.mb_words[10], mqe->un.mb_words[11],
  5254. mqe->un.mb_words[12], mqe->un.mb_words[13],
  5255. mqe->un.mb_words[14], mqe->un.mb_words[15],
  5256. mqe->un.mb_words[16], mqe->un.mb_words[50],
  5257. mboxq->mcqe.word0,
  5258. mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
  5259. mboxq->mcqe.trailer);
  5260. if (rc) {
  5261. rc = -EIO;
  5262. goto out_free_mboxq;
  5263. }
  5264. data_length = mqe->un.mb_words[5];
  5265. if (data_length > DMP_RGN23_SIZE) {
  5266. rc = -EIO;
  5267. goto out_free_mboxq;
  5268. }
  5269. lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
  5270. rc = 0;
  5271. out_free_mboxq:
  5272. lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
  5273. return rc;
  5274. }
  5275. /**
  5276. * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
  5277. * @phba: pointer to lpfc hba data structure.
  5278. * @mboxq: pointer to the LPFC_MBOXQ_t structure.
  5279. * @vpd: pointer to the memory to hold resulting port vpd data.
  5280. * @vpd_size: On input, the number of bytes allocated to @vpd.
  5281. * On output, the number of data bytes in @vpd.
  5282. *
  5283. * This routine executes a READ_REV SLI4 mailbox command. In
  5284. * addition, this routine gets the port vpd data.
  5285. *
  5286. * Return codes
  5287. * 0 - successful
  5288. * -ENOMEM - could not allocated memory.
  5289. **/
  5290. static int
  5291. lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
  5292. uint8_t *vpd, uint32_t *vpd_size)
  5293. {
  5294. int rc = 0;
  5295. uint32_t dma_size;
  5296. struct lpfc_dmabuf *dmabuf;
  5297. struct lpfc_mqe *mqe;
  5298. dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  5299. if (!dmabuf)
  5300. return -ENOMEM;
  5301. /*
  5302. * Get a DMA buffer for the vpd data resulting from the READ_REV
  5303. * mailbox command.
  5304. */
  5305. dma_size = *vpd_size;
  5306. dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
  5307. &dmabuf->phys, GFP_KERNEL);
  5308. if (!dmabuf->virt) {
  5309. kfree(dmabuf);
  5310. return -ENOMEM;
  5311. }
  5312. /*
  5313. * The SLI4 implementation of READ_REV conflicts at word1,
  5314. * bits 31:16 and SLI4 adds vpd functionality not present
  5315. * in SLI3. This code corrects the conflicts.
  5316. */
  5317. lpfc_read_rev(phba, mboxq);
  5318. mqe = &mboxq->u.mqe;
  5319. mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
  5320. mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
  5321. mqe->un.read_rev.word1 &= 0x0000FFFF;
  5322. bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
  5323. bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
  5324. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  5325. if (rc) {
  5326. dma_free_coherent(&phba->pcidev->dev, dma_size,
  5327. dmabuf->virt, dmabuf->phys);
  5328. kfree(dmabuf);
  5329. return -EIO;
  5330. }
  5331. /*
  5332. * The available vpd length cannot be bigger than the
  5333. * DMA buffer passed to the port. Catch the less than
  5334. * case and update the caller's size.
  5335. */
  5336. if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
  5337. *vpd_size = mqe->un.read_rev.avail_vpd_len;
  5338. memcpy(vpd, dmabuf->virt, *vpd_size);
  5339. dma_free_coherent(&phba->pcidev->dev, dma_size,
  5340. dmabuf->virt, dmabuf->phys);
  5341. kfree(dmabuf);
  5342. return 0;
  5343. }
  5344. /**
  5345. * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
  5346. * @phba: pointer to lpfc hba data structure.
  5347. *
  5348. * This routine retrieves SLI4 device physical port name this PCI function
  5349. * is attached to.
  5350. *
  5351. * Return codes
  5352. * 0 - successful
  5353. * otherwise - failed to retrieve controller attributes
  5354. **/
  5355. static int
  5356. lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
  5357. {
  5358. LPFC_MBOXQ_t *mboxq;
  5359. struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
  5360. struct lpfc_controller_attribute *cntl_attr;
  5361. void *virtaddr = NULL;
  5362. uint32_t alloclen, reqlen;
  5363. uint32_t shdr_status, shdr_add_status;
  5364. union lpfc_sli4_cfg_shdr *shdr;
  5365. int rc;
  5366. mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  5367. if (!mboxq)
  5368. return -ENOMEM;
  5369. /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
  5370. reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
  5371. alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
  5372. LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
  5373. LPFC_SLI4_MBX_NEMBED);
  5374. if (alloclen < reqlen) {
  5375. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  5376. "3084 Allocated DMA memory size (%d) is "
  5377. "less than the requested DMA memory size "
  5378. "(%d)\n", alloclen, reqlen);
  5379. rc = -ENOMEM;
  5380. goto out_free_mboxq;
  5381. }
  5382. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  5383. virtaddr = mboxq->sge_array->addr[0];
  5384. mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
  5385. shdr = &mbx_cntl_attr->cfg_shdr;
  5386. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  5387. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  5388. if (shdr_status || shdr_add_status || rc) {
  5389. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  5390. "3085 Mailbox x%x (x%x/x%x) failed, "
  5391. "rc:x%x, status:x%x, add_status:x%x\n",
  5392. bf_get(lpfc_mqe_command, &mboxq->u.mqe),
  5393. lpfc_sli_config_mbox_subsys_get(phba, mboxq),
  5394. lpfc_sli_config_mbox_opcode_get(phba, mboxq),
  5395. rc, shdr_status, shdr_add_status);
  5396. rc = -ENXIO;
  5397. goto out_free_mboxq;
  5398. }
  5399. cntl_attr = &mbx_cntl_attr->cntl_attr;
  5400. phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
  5401. phba->sli4_hba.lnk_info.lnk_tp =
  5402. bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
  5403. phba->sli4_hba.lnk_info.lnk_no =
  5404. bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
  5405. phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
  5406. phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
  5407. memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
  5408. strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
  5409. sizeof(phba->BIOSVersion));
  5410. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  5411. "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
  5412. "flash_id: x%02x, asic_rev: x%02x\n",
  5413. phba->sli4_hba.lnk_info.lnk_tp,
  5414. phba->sli4_hba.lnk_info.lnk_no,
  5415. phba->BIOSVersion, phba->sli4_hba.flash_id,
  5416. phba->sli4_hba.asic_rev);
  5417. out_free_mboxq:
  5418. if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
  5419. lpfc_sli4_mbox_cmd_free(phba, mboxq);
  5420. else
  5421. mempool_free(mboxq, phba->mbox_mem_pool);
  5422. return rc;
  5423. }
  5424. /**
  5425. * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
  5426. * @phba: pointer to lpfc hba data structure.
  5427. *
  5428. * This routine retrieves SLI4 device physical port name this PCI function
  5429. * is attached to.
  5430. *
  5431. * Return codes
  5432. * 0 - successful
  5433. * otherwise - failed to retrieve physical port name
  5434. **/
  5435. static int
  5436. lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
  5437. {
  5438. LPFC_MBOXQ_t *mboxq;
  5439. struct lpfc_mbx_get_port_name *get_port_name;
  5440. uint32_t shdr_status, shdr_add_status;
  5441. union lpfc_sli4_cfg_shdr *shdr;
  5442. char cport_name = 0;
  5443. int rc;
  5444. /* We assume nothing at this point */
  5445. phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
  5446. phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
  5447. mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  5448. if (!mboxq)
  5449. return -ENOMEM;
  5450. /* obtain link type and link number via READ_CONFIG */
  5451. phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
  5452. lpfc_sli4_read_config(phba);
  5453. if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
  5454. phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
  5455. if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
  5456. goto retrieve_ppname;
  5457. /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
  5458. rc = lpfc_sli4_get_ctl_attr(phba);
  5459. if (rc)
  5460. goto out_free_mboxq;
  5461. retrieve_ppname:
  5462. lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
  5463. LPFC_MBOX_OPCODE_GET_PORT_NAME,
  5464. sizeof(struct lpfc_mbx_get_port_name) -
  5465. sizeof(struct lpfc_sli4_cfg_mhdr),
  5466. LPFC_SLI4_MBX_EMBED);
  5467. get_port_name = &mboxq->u.mqe.un.get_port_name;
  5468. shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
  5469. bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
  5470. bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
  5471. phba->sli4_hba.lnk_info.lnk_tp);
  5472. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  5473. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  5474. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  5475. if (shdr_status || shdr_add_status || rc) {
  5476. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  5477. "3087 Mailbox x%x (x%x/x%x) failed: "
  5478. "rc:x%x, status:x%x, add_status:x%x\n",
  5479. bf_get(lpfc_mqe_command, &mboxq->u.mqe),
  5480. lpfc_sli_config_mbox_subsys_get(phba, mboxq),
  5481. lpfc_sli_config_mbox_opcode_get(phba, mboxq),
  5482. rc, shdr_status, shdr_add_status);
  5483. rc = -ENXIO;
  5484. goto out_free_mboxq;
  5485. }
  5486. switch (phba->sli4_hba.lnk_info.lnk_no) {
  5487. case LPFC_LINK_NUMBER_0:
  5488. cport_name = bf_get(lpfc_mbx_get_port_name_name0,
  5489. &get_port_name->u.response);
  5490. phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
  5491. break;
  5492. case LPFC_LINK_NUMBER_1:
  5493. cport_name = bf_get(lpfc_mbx_get_port_name_name1,
  5494. &get_port_name->u.response);
  5495. phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
  5496. break;
  5497. case LPFC_LINK_NUMBER_2:
  5498. cport_name = bf_get(lpfc_mbx_get_port_name_name2,
  5499. &get_port_name->u.response);
  5500. phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
  5501. break;
  5502. case LPFC_LINK_NUMBER_3:
  5503. cport_name = bf_get(lpfc_mbx_get_port_name_name3,
  5504. &get_port_name->u.response);
  5505. phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
  5506. break;
  5507. default:
  5508. break;
  5509. }
  5510. if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
  5511. phba->Port[0] = cport_name;
  5512. phba->Port[1] = '\0';
  5513. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  5514. "3091 SLI get port name: %s\n", phba->Port);
  5515. }
  5516. out_free_mboxq:
  5517. if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
  5518. lpfc_sli4_mbox_cmd_free(phba, mboxq);
  5519. else
  5520. mempool_free(mboxq, phba->mbox_mem_pool);
  5521. return rc;
  5522. }
  5523. /**
  5524. * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
  5525. * @phba: pointer to lpfc hba data structure.
  5526. *
  5527. * This routine is called to explicitly arm the SLI4 device's completion and
  5528. * event queues
  5529. **/
  5530. static void
  5531. lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
  5532. {
  5533. int qidx;
  5534. struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
  5535. struct lpfc_sli4_hdw_queue *qp;
  5536. struct lpfc_queue *eq;
  5537. sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
  5538. sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
  5539. if (sli4_hba->nvmels_cq)
  5540. sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
  5541. LPFC_QUEUE_REARM);
  5542. if (sli4_hba->hdwq) {
  5543. /* Loop thru all Hardware Queues */
  5544. for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
  5545. qp = &sli4_hba->hdwq[qidx];
  5546. /* ARM the corresponding CQ */
  5547. sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
  5548. LPFC_QUEUE_REARM);
  5549. }
  5550. /* Loop thru all IRQ vectors */
  5551. for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
  5552. eq = sli4_hba->hba_eq_hdl[qidx].eq;
  5553. /* ARM the corresponding EQ */
  5554. sli4_hba->sli4_write_eq_db(phba, eq,
  5555. 0, LPFC_QUEUE_REARM);
  5556. }
  5557. }
  5558. if (phba->nvmet_support) {
  5559. for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
  5560. sli4_hba->sli4_write_cq_db(phba,
  5561. sli4_hba->nvmet_cqset[qidx], 0,
  5562. LPFC_QUEUE_REARM);
  5563. }
  5564. }
  5565. }
  5566. /**
  5567. * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
  5568. * @phba: Pointer to HBA context object.
  5569. * @type: The resource extent type.
  5570. * @extnt_count: buffer to hold port available extent count.
  5571. * @extnt_size: buffer to hold element count per extent.
  5572. *
  5573. * This function calls the port and retrievs the number of available
  5574. * extents and their size for a particular extent type.
  5575. *
  5576. * Returns: 0 if successful. Nonzero otherwise.
  5577. **/
  5578. int
  5579. lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
  5580. uint16_t *extnt_count, uint16_t *extnt_size)
  5581. {
  5582. int rc = 0;
  5583. uint32_t length;
  5584. uint32_t mbox_tmo;
  5585. struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
  5586. LPFC_MBOXQ_t *mbox;
  5587. *extnt_count = 0;
  5588. *extnt_size = 0;
  5589. mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  5590. if (!mbox)
  5591. return -ENOMEM;
  5592. /* Find out how many extents are available for this resource type */
  5593. length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
  5594. sizeof(struct lpfc_sli4_cfg_mhdr));
  5595. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  5596. LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
  5597. length, LPFC_SLI4_MBX_EMBED);
  5598. /* Send an extents count of 0 - the GET doesn't use it. */
  5599. rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
  5600. LPFC_SLI4_MBX_EMBED);
  5601. if (unlikely(rc)) {
  5602. rc = -EIO;
  5603. goto err_exit;
  5604. }
  5605. if (!phba->sli4_hba.intr_enable)
  5606. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  5607. else {
  5608. mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
  5609. rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
  5610. }
  5611. if (unlikely(rc)) {
  5612. rc = -EIO;
  5613. goto err_exit;
  5614. }
  5615. rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
  5616. if (bf_get(lpfc_mbox_hdr_status,
  5617. &rsrc_info->header.cfg_shdr.response)) {
  5618. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  5619. "2930 Failed to get resource extents "
  5620. "Status 0x%x Add'l Status 0x%x\n",
  5621. bf_get(lpfc_mbox_hdr_status,
  5622. &rsrc_info->header.cfg_shdr.response),
  5623. bf_get(lpfc_mbox_hdr_add_status,
  5624. &rsrc_info->header.cfg_shdr.response));
  5625. rc = -EIO;
  5626. goto err_exit;
  5627. }
  5628. *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
  5629. &rsrc_info->u.rsp);
  5630. *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
  5631. &rsrc_info->u.rsp);
  5632. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  5633. "3162 Retrieved extents type-%d from port: count:%d, "
  5634. "size:%d\n", type, *extnt_count, *extnt_size);
  5635. err_exit:
  5636. mempool_free(mbox, phba->mbox_mem_pool);
  5637. return rc;
  5638. }
  5639. /**
  5640. * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
  5641. * @phba: Pointer to HBA context object.
  5642. * @type: The extent type to check.
  5643. *
  5644. * This function reads the current available extents from the port and checks
  5645. * if the extent count or extent size has changed since the last access.
  5646. * Callers use this routine post port reset to understand if there is a
  5647. * extent reprovisioning requirement.
  5648. *
  5649. * Returns:
  5650. * -Error: error indicates problem.
  5651. * 1: Extent count or size has changed.
  5652. * 0: No changes.
  5653. **/
  5654. static int
  5655. lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
  5656. {
  5657. uint16_t curr_ext_cnt, rsrc_ext_cnt;
  5658. uint16_t size_diff, rsrc_ext_size;
  5659. int rc = 0;
  5660. struct lpfc_rsrc_blks *rsrc_entry;
  5661. struct list_head *rsrc_blk_list = NULL;
  5662. size_diff = 0;
  5663. curr_ext_cnt = 0;
  5664. rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
  5665. &rsrc_ext_cnt,
  5666. &rsrc_ext_size);
  5667. if (unlikely(rc))
  5668. return -EIO;
  5669. switch (type) {
  5670. case LPFC_RSC_TYPE_FCOE_RPI:
  5671. rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
  5672. break;
  5673. case LPFC_RSC_TYPE_FCOE_VPI:
  5674. rsrc_blk_list = &phba->lpfc_vpi_blk_list;
  5675. break;
  5676. case LPFC_RSC_TYPE_FCOE_XRI:
  5677. rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
  5678. break;
  5679. case LPFC_RSC_TYPE_FCOE_VFI:
  5680. rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
  5681. break;
  5682. default:
  5683. break;
  5684. }
  5685. list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
  5686. curr_ext_cnt++;
  5687. if (rsrc_entry->rsrc_size != rsrc_ext_size)
  5688. size_diff++;
  5689. }
  5690. if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
  5691. rc = 1;
  5692. return rc;
  5693. }
  5694. /**
  5695. * lpfc_sli4_cfg_post_extnts -
  5696. * @phba: Pointer to HBA context object.
  5697. * @extnt_cnt: number of available extents.
  5698. * @type: the extent type (rpi, xri, vfi, vpi).
  5699. * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
  5700. * @mbox: pointer to the caller's allocated mailbox structure.
  5701. *
  5702. * This function executes the extents allocation request. It also
  5703. * takes care of the amount of memory needed to allocate or get the
  5704. * allocated extents. It is the caller's responsibility to evaluate
  5705. * the response.
  5706. *
  5707. * Returns:
  5708. * -Error: Error value describes the condition found.
  5709. * 0: if successful
  5710. **/
  5711. static int
  5712. lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
  5713. uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
  5714. {
  5715. int rc = 0;
  5716. uint32_t req_len;
  5717. uint32_t emb_len;
  5718. uint32_t alloc_len, mbox_tmo;
  5719. /* Calculate the total requested length of the dma memory */
  5720. req_len = extnt_cnt * sizeof(uint16_t);
  5721. /*
  5722. * Calculate the size of an embedded mailbox. The uint32_t
  5723. * accounts for extents-specific word.
  5724. */
  5725. emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
  5726. sizeof(uint32_t);
  5727. /*
  5728. * Presume the allocation and response will fit into an embedded
  5729. * mailbox. If not true, reconfigure to a non-embedded mailbox.
  5730. */
  5731. *emb = LPFC_SLI4_MBX_EMBED;
  5732. if (req_len > emb_len) {
  5733. req_len = extnt_cnt * sizeof(uint16_t) +
  5734. sizeof(union lpfc_sli4_cfg_shdr) +
  5735. sizeof(uint32_t);
  5736. *emb = LPFC_SLI4_MBX_NEMBED;
  5737. }
  5738. alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  5739. LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
  5740. req_len, *emb);
  5741. if (alloc_len < req_len) {
  5742. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  5743. "2982 Allocated DMA memory size (x%x) is "
  5744. "less than the requested DMA memory "
  5745. "size (x%x)\n", alloc_len, req_len);
  5746. return -ENOMEM;
  5747. }
  5748. rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
  5749. if (unlikely(rc))
  5750. return -EIO;
  5751. if (!phba->sli4_hba.intr_enable)
  5752. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  5753. else {
  5754. mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
  5755. rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
  5756. }
  5757. if (unlikely(rc))
  5758. rc = -EIO;
  5759. return rc;
  5760. }
  5761. /**
  5762. * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
  5763. * @phba: Pointer to HBA context object.
  5764. * @type: The resource extent type to allocate.
  5765. *
  5766. * This function allocates the number of elements for the specified
  5767. * resource type.
  5768. **/
  5769. static int
  5770. lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
  5771. {
  5772. bool emb = false;
  5773. uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
  5774. uint16_t rsrc_id, rsrc_start, j, k;
  5775. uint16_t *ids;
  5776. int i, rc;
  5777. unsigned long longs;
  5778. unsigned long *bmask;
  5779. struct lpfc_rsrc_blks *rsrc_blks;
  5780. LPFC_MBOXQ_t *mbox;
  5781. uint32_t length;
  5782. struct lpfc_id_range *id_array = NULL;
  5783. void *virtaddr = NULL;
  5784. struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
  5785. struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
  5786. struct list_head *ext_blk_list;
  5787. rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
  5788. &rsrc_cnt,
  5789. &rsrc_size);
  5790. if (unlikely(rc))
  5791. return -EIO;
  5792. if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
  5793. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  5794. "3009 No available Resource Extents "
  5795. "for resource type 0x%x: Count: 0x%x, "
  5796. "Size 0x%x\n", type, rsrc_cnt,
  5797. rsrc_size);
  5798. return -ENOMEM;
  5799. }
  5800. lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
  5801. "2903 Post resource extents type-0x%x: "
  5802. "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
  5803. mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  5804. if (!mbox)
  5805. return -ENOMEM;
  5806. rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
  5807. if (unlikely(rc)) {
  5808. rc = -EIO;
  5809. goto err_exit;
  5810. }
  5811. /*
  5812. * Figure out where the response is located. Then get local pointers
  5813. * to the response data. The port does not guarantee to respond to
  5814. * all extents counts request so update the local variable with the
  5815. * allocated count from the port.
  5816. */
  5817. if (emb == LPFC_SLI4_MBX_EMBED) {
  5818. rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
  5819. id_array = &rsrc_ext->u.rsp.id[0];
  5820. rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
  5821. } else {
  5822. virtaddr = mbox->sge_array->addr[0];
  5823. n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
  5824. rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
  5825. id_array = &n_rsrc->id;
  5826. }
  5827. longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
  5828. rsrc_id_cnt = rsrc_cnt * rsrc_size;
  5829. /*
  5830. * Based on the resource size and count, correct the base and max
  5831. * resource values.
  5832. */
  5833. length = sizeof(struct lpfc_rsrc_blks);
  5834. switch (type) {
  5835. case LPFC_RSC_TYPE_FCOE_RPI:
  5836. phba->sli4_hba.rpi_bmask = kcalloc(longs,
  5837. sizeof(unsigned long),
  5838. GFP_KERNEL);
  5839. if (unlikely(!phba->sli4_hba.rpi_bmask)) {
  5840. rc = -ENOMEM;
  5841. goto err_exit;
  5842. }
  5843. phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
  5844. sizeof(uint16_t),
  5845. GFP_KERNEL);
  5846. if (unlikely(!phba->sli4_hba.rpi_ids)) {
  5847. kfree(phba->sli4_hba.rpi_bmask);
  5848. rc = -ENOMEM;
  5849. goto err_exit;
  5850. }
  5851. /*
  5852. * The next_rpi was initialized with the maximum available
  5853. * count but the port may allocate a smaller number. Catch
  5854. * that case and update the next_rpi.
  5855. */
  5856. phba->sli4_hba.next_rpi = rsrc_id_cnt;
  5857. /* Initialize local ptrs for common extent processing later. */
  5858. bmask = phba->sli4_hba.rpi_bmask;
  5859. ids = phba->sli4_hba.rpi_ids;
  5860. ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
  5861. break;
  5862. case LPFC_RSC_TYPE_FCOE_VPI:
  5863. phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
  5864. GFP_KERNEL);
  5865. if (unlikely(!phba->vpi_bmask)) {
  5866. rc = -ENOMEM;
  5867. goto err_exit;
  5868. }
  5869. phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
  5870. GFP_KERNEL);
  5871. if (unlikely(!phba->vpi_ids)) {
  5872. kfree(phba->vpi_bmask);
  5873. rc = -ENOMEM;
  5874. goto err_exit;
  5875. }
  5876. /* Initialize local ptrs for common extent processing later. */
  5877. bmask = phba->vpi_bmask;
  5878. ids = phba->vpi_ids;
  5879. ext_blk_list = &phba->lpfc_vpi_blk_list;
  5880. break;
  5881. case LPFC_RSC_TYPE_FCOE_XRI:
  5882. phba->sli4_hba.xri_bmask = kcalloc(longs,
  5883. sizeof(unsigned long),
  5884. GFP_KERNEL);
  5885. if (unlikely(!phba->sli4_hba.xri_bmask)) {
  5886. rc = -ENOMEM;
  5887. goto err_exit;
  5888. }
  5889. phba->sli4_hba.max_cfg_param.xri_used = 0;
  5890. phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
  5891. sizeof(uint16_t),
  5892. GFP_KERNEL);
  5893. if (unlikely(!phba->sli4_hba.xri_ids)) {
  5894. kfree(phba->sli4_hba.xri_bmask);
  5895. rc = -ENOMEM;
  5896. goto err_exit;
  5897. }
  5898. /* Initialize local ptrs for common extent processing later. */
  5899. bmask = phba->sli4_hba.xri_bmask;
  5900. ids = phba->sli4_hba.xri_ids;
  5901. ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
  5902. break;
  5903. case LPFC_RSC_TYPE_FCOE_VFI:
  5904. phba->sli4_hba.vfi_bmask = kcalloc(longs,
  5905. sizeof(unsigned long),
  5906. GFP_KERNEL);
  5907. if (unlikely(!phba->sli4_hba.vfi_bmask)) {
  5908. rc = -ENOMEM;
  5909. goto err_exit;
  5910. }
  5911. phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
  5912. sizeof(uint16_t),
  5913. GFP_KERNEL);
  5914. if (unlikely(!phba->sli4_hba.vfi_ids)) {
  5915. kfree(phba->sli4_hba.vfi_bmask);
  5916. rc = -ENOMEM;
  5917. goto err_exit;
  5918. }
  5919. /* Initialize local ptrs for common extent processing later. */
  5920. bmask = phba->sli4_hba.vfi_bmask;
  5921. ids = phba->sli4_hba.vfi_ids;
  5922. ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
  5923. break;
  5924. default:
  5925. /* Unsupported Opcode. Fail call. */
  5926. id_array = NULL;
  5927. bmask = NULL;
  5928. ids = NULL;
  5929. ext_blk_list = NULL;
  5930. goto err_exit;
  5931. }
  5932. /*
  5933. * Complete initializing the extent configuration with the
  5934. * allocated ids assigned to this function. The bitmask serves
  5935. * as an index into the array and manages the available ids. The
  5936. * array just stores the ids communicated to the port via the wqes.
  5937. */
  5938. for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
  5939. if ((i % 2) == 0)
  5940. rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
  5941. &id_array[k]);
  5942. else
  5943. rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
  5944. &id_array[k]);
  5945. rsrc_blks = kzalloc(length, GFP_KERNEL);
  5946. if (unlikely(!rsrc_blks)) {
  5947. rc = -ENOMEM;
  5948. kfree(bmask);
  5949. kfree(ids);
  5950. goto err_exit;
  5951. }
  5952. rsrc_blks->rsrc_start = rsrc_id;
  5953. rsrc_blks->rsrc_size = rsrc_size;
  5954. list_add_tail(&rsrc_blks->list, ext_blk_list);
  5955. rsrc_start = rsrc_id;
  5956. if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
  5957. phba->sli4_hba.io_xri_start = rsrc_start +
  5958. lpfc_sli4_get_iocb_cnt(phba);
  5959. }
  5960. while (rsrc_id < (rsrc_start + rsrc_size)) {
  5961. ids[j] = rsrc_id;
  5962. rsrc_id++;
  5963. j++;
  5964. }
  5965. /* Entire word processed. Get next word.*/
  5966. if ((i % 2) == 1)
  5967. k++;
  5968. }
  5969. err_exit:
  5970. lpfc_sli4_mbox_cmd_free(phba, mbox);
  5971. return rc;
  5972. }
  5973. /**
  5974. * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
  5975. * @phba: Pointer to HBA context object.
  5976. * @type: the extent's type.
  5977. *
  5978. * This function deallocates all extents of a particular resource type.
  5979. * SLI4 does not allow for deallocating a particular extent range. It
  5980. * is the caller's responsibility to release all kernel memory resources.
  5981. **/
  5982. static int
  5983. lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
  5984. {
  5985. int rc;
  5986. uint32_t length, mbox_tmo = 0;
  5987. LPFC_MBOXQ_t *mbox;
  5988. struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
  5989. struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
  5990. mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  5991. if (!mbox)
  5992. return -ENOMEM;
  5993. /*
  5994. * This function sends an embedded mailbox because it only sends the
  5995. * the resource type. All extents of this type are released by the
  5996. * port.
  5997. */
  5998. length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
  5999. sizeof(struct lpfc_sli4_cfg_mhdr));
  6000. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  6001. LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
  6002. length, LPFC_SLI4_MBX_EMBED);
  6003. /* Send an extents count of 0 - the dealloc doesn't use it. */
  6004. rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
  6005. LPFC_SLI4_MBX_EMBED);
  6006. if (unlikely(rc)) {
  6007. rc = -EIO;
  6008. goto out_free_mbox;
  6009. }
  6010. if (!phba->sli4_hba.intr_enable)
  6011. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  6012. else {
  6013. mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
  6014. rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
  6015. }
  6016. if (unlikely(rc)) {
  6017. rc = -EIO;
  6018. goto out_free_mbox;
  6019. }
  6020. dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
  6021. if (bf_get(lpfc_mbox_hdr_status,
  6022. &dealloc_rsrc->header.cfg_shdr.response)) {
  6023. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  6024. "2919 Failed to release resource extents "
  6025. "for type %d - Status 0x%x Add'l Status 0x%x. "
  6026. "Resource memory not released.\n",
  6027. type,
  6028. bf_get(lpfc_mbox_hdr_status,
  6029. &dealloc_rsrc->header.cfg_shdr.response),
  6030. bf_get(lpfc_mbox_hdr_add_status,
  6031. &dealloc_rsrc->header.cfg_shdr.response));
  6032. rc = -EIO;
  6033. goto out_free_mbox;
  6034. }
  6035. /* Release kernel memory resources for the specific type. */
  6036. switch (type) {
  6037. case LPFC_RSC_TYPE_FCOE_VPI:
  6038. kfree(phba->vpi_bmask);
  6039. kfree(phba->vpi_ids);
  6040. bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
  6041. list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
  6042. &phba->lpfc_vpi_blk_list, list) {
  6043. list_del_init(&rsrc_blk->list);
  6044. kfree(rsrc_blk);
  6045. }
  6046. phba->sli4_hba.max_cfg_param.vpi_used = 0;
  6047. break;
  6048. case LPFC_RSC_TYPE_FCOE_XRI:
  6049. kfree(phba->sli4_hba.xri_bmask);
  6050. kfree(phba->sli4_hba.xri_ids);
  6051. list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
  6052. &phba->sli4_hba.lpfc_xri_blk_list, list) {
  6053. list_del_init(&rsrc_blk->list);
  6054. kfree(rsrc_blk);
  6055. }
  6056. break;
  6057. case LPFC_RSC_TYPE_FCOE_VFI:
  6058. kfree(phba->sli4_hba.vfi_bmask);
  6059. kfree(phba->sli4_hba.vfi_ids);
  6060. bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
  6061. list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
  6062. &phba->sli4_hba.lpfc_vfi_blk_list, list) {
  6063. list_del_init(&rsrc_blk->list);
  6064. kfree(rsrc_blk);
  6065. }
  6066. break;
  6067. case LPFC_RSC_TYPE_FCOE_RPI:
  6068. /* RPI bitmask and physical id array are cleaned up earlier. */
  6069. list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
  6070. &phba->sli4_hba.lpfc_rpi_blk_list, list) {
  6071. list_del_init(&rsrc_blk->list);
  6072. kfree(rsrc_blk);
  6073. }
  6074. break;
  6075. default:
  6076. break;
  6077. }
  6078. bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
  6079. out_free_mbox:
  6080. mempool_free(mbox, phba->mbox_mem_pool);
  6081. return rc;
  6082. }
  6083. static void
  6084. lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
  6085. uint32_t feature)
  6086. {
  6087. uint32_t len;
  6088. u32 sig_freq = 0;
  6089. len = sizeof(struct lpfc_mbx_set_feature) -
  6090. sizeof(struct lpfc_sli4_cfg_mhdr);
  6091. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  6092. LPFC_MBOX_OPCODE_SET_FEATURES, len,
  6093. LPFC_SLI4_MBX_EMBED);
  6094. switch (feature) {
  6095. case LPFC_SET_UE_RECOVERY:
  6096. bf_set(lpfc_mbx_set_feature_UER,
  6097. &mbox->u.mqe.un.set_feature, 1);
  6098. mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
  6099. mbox->u.mqe.un.set_feature.param_len = 8;
  6100. break;
  6101. case LPFC_SET_MDS_DIAGS:
  6102. bf_set(lpfc_mbx_set_feature_mds,
  6103. &mbox->u.mqe.un.set_feature, 1);
  6104. bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
  6105. &mbox->u.mqe.un.set_feature, 1);
  6106. mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
  6107. mbox->u.mqe.un.set_feature.param_len = 8;
  6108. break;
  6109. case LPFC_SET_CGN_SIGNAL:
  6110. if (phba->cmf_active_mode == LPFC_CFG_OFF)
  6111. sig_freq = 0;
  6112. else
  6113. sig_freq = phba->cgn_sig_freq;
  6114. if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
  6115. bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
  6116. &mbox->u.mqe.un.set_feature, sig_freq);
  6117. bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
  6118. &mbox->u.mqe.un.set_feature, sig_freq);
  6119. }
  6120. if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
  6121. bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
  6122. &mbox->u.mqe.un.set_feature, sig_freq);
  6123. if (phba->cmf_active_mode == LPFC_CFG_OFF ||
  6124. phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
  6125. sig_freq = 0;
  6126. else
  6127. sig_freq = lpfc_acqe_cgn_frequency;
  6128. bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
  6129. &mbox->u.mqe.un.set_feature, sig_freq);
  6130. mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
  6131. mbox->u.mqe.un.set_feature.param_len = 12;
  6132. break;
  6133. case LPFC_SET_DUAL_DUMP:
  6134. bf_set(lpfc_mbx_set_feature_dd,
  6135. &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
  6136. bf_set(lpfc_mbx_set_feature_ddquery,
  6137. &mbox->u.mqe.un.set_feature, 0);
  6138. mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
  6139. mbox->u.mqe.un.set_feature.param_len = 4;
  6140. break;
  6141. case LPFC_SET_ENABLE_MI:
  6142. mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
  6143. mbox->u.mqe.un.set_feature.param_len = 4;
  6144. bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
  6145. phba->pport->cfg_lun_queue_depth);
  6146. bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
  6147. phba->sli4_hba.pc_sli4_params.mi_ver);
  6148. break;
  6149. case LPFC_SET_LD_SIGNAL:
  6150. mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL;
  6151. mbox->u.mqe.un.set_feature.param_len = 16;
  6152. bf_set(lpfc_mbx_set_feature_lds_qry,
  6153. &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP);
  6154. break;
  6155. case LPFC_SET_ENABLE_CMF:
  6156. mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
  6157. mbox->u.mqe.un.set_feature.param_len = 4;
  6158. bf_set(lpfc_mbx_set_feature_cmf,
  6159. &mbox->u.mqe.un.set_feature, 1);
  6160. break;
  6161. }
  6162. return;
  6163. }
  6164. /**
  6165. * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
  6166. * @phba: Pointer to HBA context object.
  6167. *
  6168. * Disable FW logging into host memory on the adapter. To
  6169. * be done before reading logs from the host memory.
  6170. **/
  6171. void
  6172. lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
  6173. {
  6174. struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
  6175. spin_lock_irq(&phba->ras_fwlog_lock);
  6176. ras_fwlog->state = INACTIVE;
  6177. spin_unlock_irq(&phba->ras_fwlog_lock);
  6178. /* Disable FW logging to host memory */
  6179. writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
  6180. phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
  6181. /* Wait 10ms for firmware to stop using DMA buffer */
  6182. usleep_range(10 * 1000, 20 * 1000);
  6183. }
  6184. /**
  6185. * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
  6186. * @phba: Pointer to HBA context object.
  6187. *
  6188. * This function is called to free memory allocated for RAS FW logging
  6189. * support in the driver.
  6190. **/
  6191. void
  6192. lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
  6193. {
  6194. struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
  6195. struct lpfc_dmabuf *dmabuf, *next;
  6196. if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
  6197. list_for_each_entry_safe(dmabuf, next,
  6198. &ras_fwlog->fwlog_buff_list,
  6199. list) {
  6200. list_del(&dmabuf->list);
  6201. dma_free_coherent(&phba->pcidev->dev,
  6202. LPFC_RAS_MAX_ENTRY_SIZE,
  6203. dmabuf->virt, dmabuf->phys);
  6204. kfree(dmabuf);
  6205. }
  6206. }
  6207. if (ras_fwlog->lwpd.virt) {
  6208. dma_free_coherent(&phba->pcidev->dev,
  6209. sizeof(uint32_t) * 2,
  6210. ras_fwlog->lwpd.virt,
  6211. ras_fwlog->lwpd.phys);
  6212. ras_fwlog->lwpd.virt = NULL;
  6213. }
  6214. spin_lock_irq(&phba->ras_fwlog_lock);
  6215. ras_fwlog->state = INACTIVE;
  6216. spin_unlock_irq(&phba->ras_fwlog_lock);
  6217. }
  6218. /**
  6219. * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
  6220. * @phba: Pointer to HBA context object.
  6221. * @fwlog_buff_count: Count of buffers to be created.
  6222. *
  6223. * This routine DMA memory for Log Write Position Data[LPWD] and buffer
  6224. * to update FW log is posted to the adapter.
  6225. * Buffer count is calculated based on module param ras_fwlog_buffsize
  6226. * Size of each buffer posted to FW is 64K.
  6227. **/
  6228. static int
  6229. lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
  6230. uint32_t fwlog_buff_count)
  6231. {
  6232. struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
  6233. struct lpfc_dmabuf *dmabuf;
  6234. int rc = 0, i = 0;
  6235. /* Initialize List */
  6236. INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
  6237. /* Allocate memory for the LWPD */
  6238. ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
  6239. sizeof(uint32_t) * 2,
  6240. &ras_fwlog->lwpd.phys,
  6241. GFP_KERNEL);
  6242. if (!ras_fwlog->lwpd.virt) {
  6243. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  6244. "6185 LWPD Memory Alloc Failed\n");
  6245. return -ENOMEM;
  6246. }
  6247. ras_fwlog->fw_buffcount = fwlog_buff_count;
  6248. for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
  6249. dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
  6250. GFP_KERNEL);
  6251. if (!dmabuf) {
  6252. rc = -ENOMEM;
  6253. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  6254. "6186 Memory Alloc failed FW logging");
  6255. goto free_mem;
  6256. }
  6257. dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
  6258. LPFC_RAS_MAX_ENTRY_SIZE,
  6259. &dmabuf->phys, GFP_KERNEL);
  6260. if (!dmabuf->virt) {
  6261. kfree(dmabuf);
  6262. rc = -ENOMEM;
  6263. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  6264. "6187 DMA Alloc Failed FW logging");
  6265. goto free_mem;
  6266. }
  6267. dmabuf->buffer_tag = i;
  6268. list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
  6269. }
  6270. free_mem:
  6271. if (rc)
  6272. lpfc_sli4_ras_dma_free(phba);
  6273. return rc;
  6274. }
  6275. /**
  6276. * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
  6277. * @phba: pointer to lpfc hba data structure.
  6278. * @pmb: pointer to the driver internal queue element for mailbox command.
  6279. *
  6280. * Completion handler for driver's RAS MBX command to the device.
  6281. **/
  6282. static void
  6283. lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
  6284. {
  6285. MAILBOX_t *mb;
  6286. union lpfc_sli4_cfg_shdr *shdr;
  6287. uint32_t shdr_status, shdr_add_status;
  6288. struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
  6289. mb = &pmb->u.mb;
  6290. shdr = (union lpfc_sli4_cfg_shdr *)
  6291. &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
  6292. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  6293. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  6294. if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
  6295. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  6296. "6188 FW LOG mailbox "
  6297. "completed with status x%x add_status x%x,"
  6298. " mbx status x%x\n",
  6299. shdr_status, shdr_add_status, mb->mbxStatus);
  6300. ras_fwlog->ras_hwsupport = false;
  6301. goto disable_ras;
  6302. }
  6303. spin_lock_irq(&phba->ras_fwlog_lock);
  6304. ras_fwlog->state = ACTIVE;
  6305. spin_unlock_irq(&phba->ras_fwlog_lock);
  6306. mempool_free(pmb, phba->mbox_mem_pool);
  6307. return;
  6308. disable_ras:
  6309. /* Free RAS DMA memory */
  6310. lpfc_sli4_ras_dma_free(phba);
  6311. mempool_free(pmb, phba->mbox_mem_pool);
  6312. }
  6313. /**
  6314. * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
  6315. * @phba: pointer to lpfc hba data structure.
  6316. * @fwlog_level: Logging verbosity level.
  6317. * @fwlog_enable: Enable/Disable logging.
  6318. *
  6319. * Initialize memory and post mailbox command to enable FW logging in host
  6320. * memory.
  6321. **/
  6322. int
  6323. lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
  6324. uint32_t fwlog_level,
  6325. uint32_t fwlog_enable)
  6326. {
  6327. struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
  6328. struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
  6329. struct lpfc_dmabuf *dmabuf;
  6330. LPFC_MBOXQ_t *mbox;
  6331. uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
  6332. int rc = 0;
  6333. spin_lock_irq(&phba->ras_fwlog_lock);
  6334. ras_fwlog->state = INACTIVE;
  6335. spin_unlock_irq(&phba->ras_fwlog_lock);
  6336. fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
  6337. phba->cfg_ras_fwlog_buffsize);
  6338. fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
  6339. /*
  6340. * If re-enabling FW logging support use earlier allocated
  6341. * DMA buffers while posting MBX command.
  6342. **/
  6343. if (!ras_fwlog->lwpd.virt) {
  6344. rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
  6345. if (rc) {
  6346. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  6347. "6189 FW Log Memory Allocation Failed");
  6348. return rc;
  6349. }
  6350. }
  6351. /* Setup Mailbox command */
  6352. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  6353. if (!mbox) {
  6354. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  6355. "6190 RAS MBX Alloc Failed");
  6356. rc = -ENOMEM;
  6357. goto mem_free;
  6358. }
  6359. ras_fwlog->fw_loglevel = fwlog_level;
  6360. len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
  6361. sizeof(struct lpfc_sli4_cfg_mhdr));
  6362. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
  6363. LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
  6364. len, LPFC_SLI4_MBX_EMBED);
  6365. mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
  6366. bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
  6367. fwlog_enable);
  6368. bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
  6369. ras_fwlog->fw_loglevel);
  6370. bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
  6371. ras_fwlog->fw_buffcount);
  6372. bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
  6373. LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
  6374. /* Update DMA buffer address */
  6375. list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
  6376. memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
  6377. mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
  6378. putPaddrLow(dmabuf->phys);
  6379. mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
  6380. putPaddrHigh(dmabuf->phys);
  6381. }
  6382. /* Update LPWD address */
  6383. mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
  6384. mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
  6385. spin_lock_irq(&phba->ras_fwlog_lock);
  6386. ras_fwlog->state = REG_INPROGRESS;
  6387. spin_unlock_irq(&phba->ras_fwlog_lock);
  6388. mbox->vport = phba->pport;
  6389. mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
  6390. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
  6391. if (rc == MBX_NOT_FINISHED) {
  6392. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  6393. "6191 FW-Log Mailbox failed. "
  6394. "status %d mbxStatus : x%x", rc,
  6395. bf_get(lpfc_mqe_status, &mbox->u.mqe));
  6396. mempool_free(mbox, phba->mbox_mem_pool);
  6397. rc = -EIO;
  6398. goto mem_free;
  6399. } else
  6400. rc = 0;
  6401. mem_free:
  6402. if (rc)
  6403. lpfc_sli4_ras_dma_free(phba);
  6404. return rc;
  6405. }
  6406. /**
  6407. * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
  6408. * @phba: Pointer to HBA context object.
  6409. *
  6410. * Check if RAS is supported on the adapter and initialize it.
  6411. **/
  6412. void
  6413. lpfc_sli4_ras_setup(struct lpfc_hba *phba)
  6414. {
  6415. /* Check RAS FW Log needs to be enabled or not */
  6416. if (lpfc_check_fwlog_support(phba))
  6417. return;
  6418. lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
  6419. LPFC_RAS_ENABLE_LOGGING);
  6420. }
  6421. /**
  6422. * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
  6423. * @phba: Pointer to HBA context object.
  6424. *
  6425. * This function allocates all SLI4 resource identifiers.
  6426. **/
  6427. int
  6428. lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
  6429. {
  6430. int i, rc, error = 0;
  6431. uint16_t count, base;
  6432. unsigned long longs;
  6433. if (!phba->sli4_hba.rpi_hdrs_in_use)
  6434. phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
  6435. if (phba->sli4_hba.extents_in_use) {
  6436. /*
  6437. * The port supports resource extents. The XRI, VPI, VFI, RPI
  6438. * resource extent count must be read and allocated before
  6439. * provisioning the resource id arrays.
  6440. */
  6441. if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
  6442. LPFC_IDX_RSRC_RDY) {
  6443. /*
  6444. * Extent-based resources are set - the driver could
  6445. * be in a port reset. Figure out if any corrective
  6446. * actions need to be taken.
  6447. */
  6448. rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
  6449. LPFC_RSC_TYPE_FCOE_VFI);
  6450. if (rc != 0)
  6451. error++;
  6452. rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
  6453. LPFC_RSC_TYPE_FCOE_VPI);
  6454. if (rc != 0)
  6455. error++;
  6456. rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
  6457. LPFC_RSC_TYPE_FCOE_XRI);
  6458. if (rc != 0)
  6459. error++;
  6460. rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
  6461. LPFC_RSC_TYPE_FCOE_RPI);
  6462. if (rc != 0)
  6463. error++;
  6464. /*
  6465. * It's possible that the number of resources
  6466. * provided to this port instance changed between
  6467. * resets. Detect this condition and reallocate
  6468. * resources. Otherwise, there is no action.
  6469. */
  6470. if (error) {
  6471. lpfc_printf_log(phba, KERN_INFO,
  6472. LOG_MBOX | LOG_INIT,
  6473. "2931 Detected extent resource "
  6474. "change. Reallocating all "
  6475. "extents.\n");
  6476. rc = lpfc_sli4_dealloc_extent(phba,
  6477. LPFC_RSC_TYPE_FCOE_VFI);
  6478. rc = lpfc_sli4_dealloc_extent(phba,
  6479. LPFC_RSC_TYPE_FCOE_VPI);
  6480. rc = lpfc_sli4_dealloc_extent(phba,
  6481. LPFC_RSC_TYPE_FCOE_XRI);
  6482. rc = lpfc_sli4_dealloc_extent(phba,
  6483. LPFC_RSC_TYPE_FCOE_RPI);
  6484. } else
  6485. return 0;
  6486. }
  6487. rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
  6488. if (unlikely(rc))
  6489. goto err_exit;
  6490. rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
  6491. if (unlikely(rc))
  6492. goto err_exit;
  6493. rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
  6494. if (unlikely(rc))
  6495. goto err_exit;
  6496. rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
  6497. if (unlikely(rc))
  6498. goto err_exit;
  6499. bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
  6500. LPFC_IDX_RSRC_RDY);
  6501. return rc;
  6502. } else {
  6503. /*
  6504. * The port does not support resource extents. The XRI, VPI,
  6505. * VFI, RPI resource ids were determined from READ_CONFIG.
  6506. * Just allocate the bitmasks and provision the resource id
  6507. * arrays. If a port reset is active, the resources don't
  6508. * need any action - just exit.
  6509. */
  6510. if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
  6511. LPFC_IDX_RSRC_RDY) {
  6512. lpfc_sli4_dealloc_resource_identifiers(phba);
  6513. lpfc_sli4_remove_rpis(phba);
  6514. }
  6515. /* RPIs. */
  6516. count = phba->sli4_hba.max_cfg_param.max_rpi;
  6517. if (count <= 0) {
  6518. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  6519. "3279 Invalid provisioning of "
  6520. "rpi:%d\n", count);
  6521. rc = -EINVAL;
  6522. goto err_exit;
  6523. }
  6524. base = phba->sli4_hba.max_cfg_param.rpi_base;
  6525. longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
  6526. phba->sli4_hba.rpi_bmask = kcalloc(longs,
  6527. sizeof(unsigned long),
  6528. GFP_KERNEL);
  6529. if (unlikely(!phba->sli4_hba.rpi_bmask)) {
  6530. rc = -ENOMEM;
  6531. goto err_exit;
  6532. }
  6533. phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
  6534. GFP_KERNEL);
  6535. if (unlikely(!phba->sli4_hba.rpi_ids)) {
  6536. rc = -ENOMEM;
  6537. goto free_rpi_bmask;
  6538. }
  6539. for (i = 0; i < count; i++)
  6540. phba->sli4_hba.rpi_ids[i] = base + i;
  6541. /* VPIs. */
  6542. count = phba->sli4_hba.max_cfg_param.max_vpi;
  6543. if (count <= 0) {
  6544. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  6545. "3280 Invalid provisioning of "
  6546. "vpi:%d\n", count);
  6547. rc = -EINVAL;
  6548. goto free_rpi_ids;
  6549. }
  6550. base = phba->sli4_hba.max_cfg_param.vpi_base;
  6551. longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
  6552. phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
  6553. GFP_KERNEL);
  6554. if (unlikely(!phba->vpi_bmask)) {
  6555. rc = -ENOMEM;
  6556. goto free_rpi_ids;
  6557. }
  6558. phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
  6559. GFP_KERNEL);
  6560. if (unlikely(!phba->vpi_ids)) {
  6561. rc = -ENOMEM;
  6562. goto free_vpi_bmask;
  6563. }
  6564. for (i = 0; i < count; i++)
  6565. phba->vpi_ids[i] = base + i;
  6566. /* XRIs. */
  6567. count = phba->sli4_hba.max_cfg_param.max_xri;
  6568. if (count <= 0) {
  6569. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  6570. "3281 Invalid provisioning of "
  6571. "xri:%d\n", count);
  6572. rc = -EINVAL;
  6573. goto free_vpi_ids;
  6574. }
  6575. base = phba->sli4_hba.max_cfg_param.xri_base;
  6576. longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
  6577. phba->sli4_hba.xri_bmask = kcalloc(longs,
  6578. sizeof(unsigned long),
  6579. GFP_KERNEL);
  6580. if (unlikely(!phba->sli4_hba.xri_bmask)) {
  6581. rc = -ENOMEM;
  6582. goto free_vpi_ids;
  6583. }
  6584. phba->sli4_hba.max_cfg_param.xri_used = 0;
  6585. phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
  6586. GFP_KERNEL);
  6587. if (unlikely(!phba->sli4_hba.xri_ids)) {
  6588. rc = -ENOMEM;
  6589. goto free_xri_bmask;
  6590. }
  6591. for (i = 0; i < count; i++)
  6592. phba->sli4_hba.xri_ids[i] = base + i;
  6593. /* VFIs. */
  6594. count = phba->sli4_hba.max_cfg_param.max_vfi;
  6595. if (count <= 0) {
  6596. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  6597. "3282 Invalid provisioning of "
  6598. "vfi:%d\n", count);
  6599. rc = -EINVAL;
  6600. goto free_xri_ids;
  6601. }
  6602. base = phba->sli4_hba.max_cfg_param.vfi_base;
  6603. longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
  6604. phba->sli4_hba.vfi_bmask = kcalloc(longs,
  6605. sizeof(unsigned long),
  6606. GFP_KERNEL);
  6607. if (unlikely(!phba->sli4_hba.vfi_bmask)) {
  6608. rc = -ENOMEM;
  6609. goto free_xri_ids;
  6610. }
  6611. phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
  6612. GFP_KERNEL);
  6613. if (unlikely(!phba->sli4_hba.vfi_ids)) {
  6614. rc = -ENOMEM;
  6615. goto free_vfi_bmask;
  6616. }
  6617. for (i = 0; i < count; i++)
  6618. phba->sli4_hba.vfi_ids[i] = base + i;
  6619. /*
  6620. * Mark all resources ready. An HBA reset doesn't need
  6621. * to reset the initialization.
  6622. */
  6623. bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
  6624. LPFC_IDX_RSRC_RDY);
  6625. return 0;
  6626. }
  6627. free_vfi_bmask:
  6628. kfree(phba->sli4_hba.vfi_bmask);
  6629. phba->sli4_hba.vfi_bmask = NULL;
  6630. free_xri_ids:
  6631. kfree(phba->sli4_hba.xri_ids);
  6632. phba->sli4_hba.xri_ids = NULL;
  6633. free_xri_bmask:
  6634. kfree(phba->sli4_hba.xri_bmask);
  6635. phba->sli4_hba.xri_bmask = NULL;
  6636. free_vpi_ids:
  6637. kfree(phba->vpi_ids);
  6638. phba->vpi_ids = NULL;
  6639. free_vpi_bmask:
  6640. kfree(phba->vpi_bmask);
  6641. phba->vpi_bmask = NULL;
  6642. free_rpi_ids:
  6643. kfree(phba->sli4_hba.rpi_ids);
  6644. phba->sli4_hba.rpi_ids = NULL;
  6645. free_rpi_bmask:
  6646. kfree(phba->sli4_hba.rpi_bmask);
  6647. phba->sli4_hba.rpi_bmask = NULL;
  6648. err_exit:
  6649. return rc;
  6650. }
  6651. /**
  6652. * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
  6653. * @phba: Pointer to HBA context object.
  6654. *
  6655. * This function allocates the number of elements for the specified
  6656. * resource type.
  6657. **/
  6658. int
  6659. lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
  6660. {
  6661. if (phba->sli4_hba.extents_in_use) {
  6662. lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
  6663. lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
  6664. lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
  6665. lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
  6666. } else {
  6667. kfree(phba->vpi_bmask);
  6668. phba->sli4_hba.max_cfg_param.vpi_used = 0;
  6669. kfree(phba->vpi_ids);
  6670. bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
  6671. kfree(phba->sli4_hba.xri_bmask);
  6672. kfree(phba->sli4_hba.xri_ids);
  6673. kfree(phba->sli4_hba.vfi_bmask);
  6674. kfree(phba->sli4_hba.vfi_ids);
  6675. bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
  6676. bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
  6677. }
  6678. return 0;
  6679. }
  6680. /**
  6681. * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
  6682. * @phba: Pointer to HBA context object.
  6683. * @type: The resource extent type.
  6684. * @extnt_cnt: buffer to hold port extent count response
  6685. * @extnt_size: buffer to hold port extent size response.
  6686. *
  6687. * This function calls the port to read the host allocated extents
  6688. * for a particular type.
  6689. **/
  6690. int
  6691. lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
  6692. uint16_t *extnt_cnt, uint16_t *extnt_size)
  6693. {
  6694. bool emb;
  6695. int rc = 0;
  6696. uint16_t curr_blks = 0;
  6697. uint32_t req_len, emb_len;
  6698. uint32_t alloc_len, mbox_tmo;
  6699. struct list_head *blk_list_head;
  6700. struct lpfc_rsrc_blks *rsrc_blk;
  6701. LPFC_MBOXQ_t *mbox;
  6702. void *virtaddr = NULL;
  6703. struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
  6704. struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
  6705. union lpfc_sli4_cfg_shdr *shdr;
  6706. switch (type) {
  6707. case LPFC_RSC_TYPE_FCOE_VPI:
  6708. blk_list_head = &phba->lpfc_vpi_blk_list;
  6709. break;
  6710. case LPFC_RSC_TYPE_FCOE_XRI:
  6711. blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
  6712. break;
  6713. case LPFC_RSC_TYPE_FCOE_VFI:
  6714. blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
  6715. break;
  6716. case LPFC_RSC_TYPE_FCOE_RPI:
  6717. blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
  6718. break;
  6719. default:
  6720. return -EIO;
  6721. }
  6722. /* Count the number of extents currently allocatd for this type. */
  6723. list_for_each_entry(rsrc_blk, blk_list_head, list) {
  6724. if (curr_blks == 0) {
  6725. /*
  6726. * The GET_ALLOCATED mailbox does not return the size,
  6727. * just the count. The size should be just the size
  6728. * stored in the current allocated block and all sizes
  6729. * for an extent type are the same so set the return
  6730. * value now.
  6731. */
  6732. *extnt_size = rsrc_blk->rsrc_size;
  6733. }
  6734. curr_blks++;
  6735. }
  6736. /*
  6737. * Calculate the size of an embedded mailbox. The uint32_t
  6738. * accounts for extents-specific word.
  6739. */
  6740. emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
  6741. sizeof(uint32_t);
  6742. /*
  6743. * Presume the allocation and response will fit into an embedded
  6744. * mailbox. If not true, reconfigure to a non-embedded mailbox.
  6745. */
  6746. emb = LPFC_SLI4_MBX_EMBED;
  6747. req_len = emb_len;
  6748. if (req_len > emb_len) {
  6749. req_len = curr_blks * sizeof(uint16_t) +
  6750. sizeof(union lpfc_sli4_cfg_shdr) +
  6751. sizeof(uint32_t);
  6752. emb = LPFC_SLI4_MBX_NEMBED;
  6753. }
  6754. mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  6755. if (!mbox)
  6756. return -ENOMEM;
  6757. memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
  6758. alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  6759. LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
  6760. req_len, emb);
  6761. if (alloc_len < req_len) {
  6762. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  6763. "2983 Allocated DMA memory size (x%x) is "
  6764. "less than the requested DMA memory "
  6765. "size (x%x)\n", alloc_len, req_len);
  6766. rc = -ENOMEM;
  6767. goto err_exit;
  6768. }
  6769. rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
  6770. if (unlikely(rc)) {
  6771. rc = -EIO;
  6772. goto err_exit;
  6773. }
  6774. if (!phba->sli4_hba.intr_enable)
  6775. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  6776. else {
  6777. mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
  6778. rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
  6779. }
  6780. if (unlikely(rc)) {
  6781. rc = -EIO;
  6782. goto err_exit;
  6783. }
  6784. /*
  6785. * Figure out where the response is located. Then get local pointers
  6786. * to the response data. The port does not guarantee to respond to
  6787. * all extents counts request so update the local variable with the
  6788. * allocated count from the port.
  6789. */
  6790. if (emb == LPFC_SLI4_MBX_EMBED) {
  6791. rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
  6792. shdr = &rsrc_ext->header.cfg_shdr;
  6793. *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
  6794. } else {
  6795. virtaddr = mbox->sge_array->addr[0];
  6796. n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
  6797. shdr = &n_rsrc->cfg_shdr;
  6798. *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
  6799. }
  6800. if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
  6801. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  6802. "2984 Failed to read allocated resources "
  6803. "for type %d - Status 0x%x Add'l Status 0x%x.\n",
  6804. type,
  6805. bf_get(lpfc_mbox_hdr_status, &shdr->response),
  6806. bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
  6807. rc = -EIO;
  6808. goto err_exit;
  6809. }
  6810. err_exit:
  6811. lpfc_sli4_mbox_cmd_free(phba, mbox);
  6812. return rc;
  6813. }
  6814. /**
  6815. * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
  6816. * @phba: pointer to lpfc hba data structure.
  6817. * @sgl_list: linked link of sgl buffers to post
  6818. * @cnt: number of linked list buffers
  6819. *
  6820. * This routine walks the list of buffers that have been allocated and
  6821. * repost them to the port by using SGL block post. This is needed after a
  6822. * pci_function_reset/warm_start or start. It attempts to construct blocks
  6823. * of buffer sgls which contains contiguous xris and uses the non-embedded
  6824. * SGL block post mailbox commands to post them to the port. For single
  6825. * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
  6826. * mailbox command for posting.
  6827. *
  6828. * Returns: 0 = success, non-zero failure.
  6829. **/
  6830. static int
  6831. lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
  6832. struct list_head *sgl_list, int cnt)
  6833. {
  6834. struct lpfc_sglq *sglq_entry = NULL;
  6835. struct lpfc_sglq *sglq_entry_next = NULL;
  6836. struct lpfc_sglq *sglq_entry_first = NULL;
  6837. int status = 0, total_cnt;
  6838. int post_cnt = 0, num_posted = 0, block_cnt = 0;
  6839. int last_xritag = NO_XRI;
  6840. LIST_HEAD(prep_sgl_list);
  6841. LIST_HEAD(blck_sgl_list);
  6842. LIST_HEAD(allc_sgl_list);
  6843. LIST_HEAD(post_sgl_list);
  6844. LIST_HEAD(free_sgl_list);
  6845. spin_lock_irq(&phba->hbalock);
  6846. spin_lock(&phba->sli4_hba.sgl_list_lock);
  6847. list_splice_init(sgl_list, &allc_sgl_list);
  6848. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  6849. spin_unlock_irq(&phba->hbalock);
  6850. total_cnt = cnt;
  6851. list_for_each_entry_safe(sglq_entry, sglq_entry_next,
  6852. &allc_sgl_list, list) {
  6853. list_del_init(&sglq_entry->list);
  6854. block_cnt++;
  6855. if ((last_xritag != NO_XRI) &&
  6856. (sglq_entry->sli4_xritag != last_xritag + 1)) {
  6857. /* a hole in xri block, form a sgl posting block */
  6858. list_splice_init(&prep_sgl_list, &blck_sgl_list);
  6859. post_cnt = block_cnt - 1;
  6860. /* prepare list for next posting block */
  6861. list_add_tail(&sglq_entry->list, &prep_sgl_list);
  6862. block_cnt = 1;
  6863. } else {
  6864. /* prepare list for next posting block */
  6865. list_add_tail(&sglq_entry->list, &prep_sgl_list);
  6866. /* enough sgls for non-embed sgl mbox command */
  6867. if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
  6868. list_splice_init(&prep_sgl_list,
  6869. &blck_sgl_list);
  6870. post_cnt = block_cnt;
  6871. block_cnt = 0;
  6872. }
  6873. }
  6874. num_posted++;
  6875. /* keep track of last sgl's xritag */
  6876. last_xritag = sglq_entry->sli4_xritag;
  6877. /* end of repost sgl list condition for buffers */
  6878. if (num_posted == total_cnt) {
  6879. if (post_cnt == 0) {
  6880. list_splice_init(&prep_sgl_list,
  6881. &blck_sgl_list);
  6882. post_cnt = block_cnt;
  6883. } else if (block_cnt == 1) {
  6884. status = lpfc_sli4_post_sgl(phba,
  6885. sglq_entry->phys, 0,
  6886. sglq_entry->sli4_xritag);
  6887. if (!status) {
  6888. /* successful, put sgl to posted list */
  6889. list_add_tail(&sglq_entry->list,
  6890. &post_sgl_list);
  6891. } else {
  6892. /* Failure, put sgl to free list */
  6893. lpfc_printf_log(phba, KERN_WARNING,
  6894. LOG_SLI,
  6895. "3159 Failed to post "
  6896. "sgl, xritag:x%x\n",
  6897. sglq_entry->sli4_xritag);
  6898. list_add_tail(&sglq_entry->list,
  6899. &free_sgl_list);
  6900. total_cnt--;
  6901. }
  6902. }
  6903. }
  6904. /* continue until a nembed page worth of sgls */
  6905. if (post_cnt == 0)
  6906. continue;
  6907. /* post the buffer list sgls as a block */
  6908. status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
  6909. post_cnt);
  6910. if (!status) {
  6911. /* success, put sgl list to posted sgl list */
  6912. list_splice_init(&blck_sgl_list, &post_sgl_list);
  6913. } else {
  6914. /* Failure, put sgl list to free sgl list */
  6915. sglq_entry_first = list_first_entry(&blck_sgl_list,
  6916. struct lpfc_sglq,
  6917. list);
  6918. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  6919. "3160 Failed to post sgl-list, "
  6920. "xritag:x%x-x%x\n",
  6921. sglq_entry_first->sli4_xritag,
  6922. (sglq_entry_first->sli4_xritag +
  6923. post_cnt - 1));
  6924. list_splice_init(&blck_sgl_list, &free_sgl_list);
  6925. total_cnt -= post_cnt;
  6926. }
  6927. /* don't reset xirtag due to hole in xri block */
  6928. if (block_cnt == 0)
  6929. last_xritag = NO_XRI;
  6930. /* reset sgl post count for next round of posting */
  6931. post_cnt = 0;
  6932. }
  6933. /* free the sgls failed to post */
  6934. lpfc_free_sgl_list(phba, &free_sgl_list);
  6935. /* push sgls posted to the available list */
  6936. if (!list_empty(&post_sgl_list)) {
  6937. spin_lock_irq(&phba->hbalock);
  6938. spin_lock(&phba->sli4_hba.sgl_list_lock);
  6939. list_splice_init(&post_sgl_list, sgl_list);
  6940. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  6941. spin_unlock_irq(&phba->hbalock);
  6942. } else {
  6943. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  6944. "3161 Failure to post sgl to port,status %x "
  6945. "blkcnt %d totalcnt %d postcnt %d\n",
  6946. status, block_cnt, total_cnt, post_cnt);
  6947. return -EIO;
  6948. }
  6949. /* return the number of XRIs actually posted */
  6950. return total_cnt;
  6951. }
  6952. /**
  6953. * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
  6954. * @phba: pointer to lpfc hba data structure.
  6955. *
  6956. * This routine walks the list of nvme buffers that have been allocated and
  6957. * repost them to the port by using SGL block post. This is needed after a
  6958. * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
  6959. * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
  6960. * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
  6961. *
  6962. * Returns: 0 = success, non-zero failure.
  6963. **/
  6964. static int
  6965. lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
  6966. {
  6967. LIST_HEAD(post_nblist);
  6968. int num_posted, rc = 0;
  6969. /* get all NVME buffers need to repost to a local list */
  6970. lpfc_io_buf_flush(phba, &post_nblist);
  6971. /* post the list of nvme buffer sgls to port if available */
  6972. if (!list_empty(&post_nblist)) {
  6973. num_posted = lpfc_sli4_post_io_sgl_list(
  6974. phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
  6975. /* failed to post any nvme buffer, return error */
  6976. if (num_posted == 0)
  6977. rc = -EIO;
  6978. }
  6979. return rc;
  6980. }
  6981. static void
  6982. lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
  6983. {
  6984. uint32_t len;
  6985. len = sizeof(struct lpfc_mbx_set_host_data) -
  6986. sizeof(struct lpfc_sli4_cfg_mhdr);
  6987. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  6988. LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
  6989. LPFC_SLI4_MBX_EMBED);
  6990. mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
  6991. mbox->u.mqe.un.set_host_data.param_len =
  6992. LPFC_HOST_OS_DRIVER_VERSION_SIZE;
  6993. snprintf(mbox->u.mqe.un.set_host_data.un.data,
  6994. LPFC_HOST_OS_DRIVER_VERSION_SIZE,
  6995. "Linux %s v"LPFC_DRIVER_VERSION,
  6996. test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? "FCoE" : "FC");
  6997. }
  6998. int
  6999. lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
  7000. struct lpfc_queue *drq, int count, int idx)
  7001. {
  7002. int rc, i;
  7003. struct lpfc_rqe hrqe;
  7004. struct lpfc_rqe drqe;
  7005. struct lpfc_rqb *rqbp;
  7006. unsigned long flags;
  7007. struct rqb_dmabuf *rqb_buffer;
  7008. LIST_HEAD(rqb_buf_list);
  7009. rqbp = hrq->rqbp;
  7010. for (i = 0; i < count; i++) {
  7011. spin_lock_irqsave(&phba->hbalock, flags);
  7012. /* IF RQ is already full, don't bother */
  7013. if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
  7014. spin_unlock_irqrestore(&phba->hbalock, flags);
  7015. break;
  7016. }
  7017. spin_unlock_irqrestore(&phba->hbalock, flags);
  7018. rqb_buffer = rqbp->rqb_alloc_buffer(phba);
  7019. if (!rqb_buffer)
  7020. break;
  7021. rqb_buffer->hrq = hrq;
  7022. rqb_buffer->drq = drq;
  7023. rqb_buffer->idx = idx;
  7024. list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
  7025. }
  7026. spin_lock_irqsave(&phba->hbalock, flags);
  7027. while (!list_empty(&rqb_buf_list)) {
  7028. list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
  7029. hbuf.list);
  7030. hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
  7031. hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
  7032. drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
  7033. drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
  7034. rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
  7035. if (rc < 0) {
  7036. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  7037. "6421 Cannot post to HRQ %d: %x %x %x "
  7038. "DRQ %x %x\n",
  7039. hrq->queue_id,
  7040. hrq->host_index,
  7041. hrq->hba_index,
  7042. hrq->entry_count,
  7043. drq->host_index,
  7044. drq->hba_index);
  7045. rqbp->rqb_free_buffer(phba, rqb_buffer);
  7046. } else {
  7047. list_add_tail(&rqb_buffer->hbuf.list,
  7048. &rqbp->rqb_buffer_list);
  7049. rqbp->buffer_count++;
  7050. }
  7051. }
  7052. spin_unlock_irqrestore(&phba->hbalock, flags);
  7053. return 1;
  7054. }
  7055. static void
  7056. lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
  7057. {
  7058. union lpfc_sli4_cfg_shdr *shdr;
  7059. u32 shdr_status, shdr_add_status;
  7060. shdr = (union lpfc_sli4_cfg_shdr *)
  7061. &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
  7062. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  7063. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  7064. if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
  7065. lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX,
  7066. "4622 SET_FEATURE (x%x) mbox failed, "
  7067. "status x%x add_status x%x, mbx status x%x\n",
  7068. LPFC_SET_LD_SIGNAL, shdr_status,
  7069. shdr_add_status, pmb->u.mb.mbxStatus);
  7070. phba->degrade_activate_threshold = 0;
  7071. phba->degrade_deactivate_threshold = 0;
  7072. phba->fec_degrade_interval = 0;
  7073. goto out;
  7074. }
  7075. phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7;
  7076. phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8;
  7077. phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10;
  7078. lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT,
  7079. "4624 Success: da x%x dd x%x interval x%x\n",
  7080. phba->degrade_activate_threshold,
  7081. phba->degrade_deactivate_threshold,
  7082. phba->fec_degrade_interval);
  7083. out:
  7084. mempool_free(pmb, phba->mbox_mem_pool);
  7085. }
  7086. int
  7087. lpfc_read_lds_params(struct lpfc_hba *phba)
  7088. {
  7089. LPFC_MBOXQ_t *mboxq;
  7090. int rc;
  7091. mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  7092. if (!mboxq)
  7093. return -ENOMEM;
  7094. lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL);
  7095. mboxq->vport = phba->pport;
  7096. mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params;
  7097. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  7098. if (rc == MBX_NOT_FINISHED) {
  7099. mempool_free(mboxq, phba->mbox_mem_pool);
  7100. return -EIO;
  7101. }
  7102. return 0;
  7103. }
  7104. static void
  7105. lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
  7106. {
  7107. struct lpfc_vport *vport = pmb->vport;
  7108. union lpfc_sli4_cfg_shdr *shdr;
  7109. u32 shdr_status, shdr_add_status;
  7110. u32 sig, acqe;
  7111. /* Two outcomes. (1) Set featurs was successul and EDC negotiation
  7112. * is done. (2) Mailbox failed and send FPIN support only.
  7113. */
  7114. shdr = (union lpfc_sli4_cfg_shdr *)
  7115. &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
  7116. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  7117. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  7118. if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
  7119. lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
  7120. "2516 CGN SET_FEATURE mbox failed with "
  7121. "status x%x add_status x%x, mbx status x%x "
  7122. "Reset Congestion to FPINs only\n",
  7123. shdr_status, shdr_add_status,
  7124. pmb->u.mb.mbxStatus);
  7125. /* If there is a mbox error, move on to RDF */
  7126. phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
  7127. phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
  7128. goto out;
  7129. }
  7130. /* Zero out Congestion Signal ACQE counter */
  7131. phba->cgn_acqe_cnt = 0;
  7132. acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
  7133. &pmb->u.mqe.un.set_feature);
  7134. sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
  7135. &pmb->u.mqe.un.set_feature);
  7136. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  7137. "4620 SET_FEATURES Success: Freq: %ds %dms "
  7138. " Reg: x%x x%x\n", acqe, sig,
  7139. phba->cgn_reg_signal, phba->cgn_reg_fpin);
  7140. out:
  7141. mempool_free(pmb, phba->mbox_mem_pool);
  7142. /* Register for FPIN events from the fabric now that the
  7143. * EDC common_set_features has completed.
  7144. */
  7145. lpfc_issue_els_rdf(vport, 0);
  7146. }
  7147. int
  7148. lpfc_config_cgn_signal(struct lpfc_hba *phba)
  7149. {
  7150. LPFC_MBOXQ_t *mboxq;
  7151. u32 rc;
  7152. mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  7153. if (!mboxq)
  7154. goto out_rdf;
  7155. lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
  7156. mboxq->vport = phba->pport;
  7157. mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
  7158. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  7159. "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
  7160. "Reg: x%x x%x\n",
  7161. phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
  7162. phba->cgn_reg_signal, phba->cgn_reg_fpin);
  7163. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  7164. if (rc == MBX_NOT_FINISHED)
  7165. goto out;
  7166. return 0;
  7167. out:
  7168. mempool_free(mboxq, phba->mbox_mem_pool);
  7169. out_rdf:
  7170. /* If there is a mbox error, move on to RDF */
  7171. phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
  7172. phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
  7173. lpfc_issue_els_rdf(phba->pport, 0);
  7174. return -EIO;
  7175. }
  7176. /**
  7177. * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
  7178. * @phba: pointer to lpfc hba data structure.
  7179. *
  7180. * This routine initializes the per-eq idle_stat to dynamically dictate
  7181. * polling decisions.
  7182. *
  7183. * Return codes:
  7184. * None
  7185. **/
  7186. static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
  7187. {
  7188. int i;
  7189. struct lpfc_sli4_hdw_queue *hdwq;
  7190. struct lpfc_queue *eq;
  7191. struct lpfc_idle_stat *idle_stat;
  7192. u64 wall;
  7193. for_each_present_cpu(i) {
  7194. hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
  7195. eq = hdwq->hba_eq;
  7196. /* Skip if we've already handled this eq's primary CPU */
  7197. if (eq->chann != i)
  7198. continue;
  7199. idle_stat = &phba->sli4_hba.idle_stat[i];
  7200. idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
  7201. idle_stat->prev_wall = wall;
  7202. if (phba->nvmet_support ||
  7203. phba->cmf_active_mode != LPFC_CFG_OFF ||
  7204. phba->intr_type != MSIX)
  7205. eq->poll_mode = LPFC_QUEUE_WORK;
  7206. else
  7207. eq->poll_mode = LPFC_THREADED_IRQ;
  7208. }
  7209. if (!phba->nvmet_support && phba->intr_type == MSIX)
  7210. schedule_delayed_work(&phba->idle_stat_delay_work,
  7211. msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
  7212. }
  7213. static void lpfc_sli4_dip(struct lpfc_hba *phba)
  7214. {
  7215. uint32_t if_type;
  7216. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  7217. if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
  7218. if_type == LPFC_SLI_INTF_IF_TYPE_6) {
  7219. struct lpfc_register reg_data;
  7220. if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
  7221. &reg_data.word0))
  7222. return;
  7223. if (bf_get(lpfc_sliport_status_dip, &reg_data))
  7224. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  7225. "2904 Firmware Dump Image Present"
  7226. " on Adapter");
  7227. }
  7228. }
  7229. /**
  7230. * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
  7231. * @rx_monitor: Pointer to lpfc_rx_info_monitor object
  7232. * @entries: Number of rx_info_entry objects to allocate in ring
  7233. *
  7234. * Return:
  7235. * 0 - Success
  7236. * ENOMEM - Failure to kmalloc
  7237. **/
  7238. int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
  7239. u32 entries)
  7240. {
  7241. rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
  7242. GFP_KERNEL);
  7243. if (!rx_monitor->ring)
  7244. return -ENOMEM;
  7245. rx_monitor->head_idx = 0;
  7246. rx_monitor->tail_idx = 0;
  7247. spin_lock_init(&rx_monitor->lock);
  7248. rx_monitor->entries = entries;
  7249. return 0;
  7250. }
  7251. /**
  7252. * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
  7253. * @rx_monitor: Pointer to lpfc_rx_info_monitor object
  7254. *
  7255. * Called after cancellation of cmf_timer.
  7256. **/
  7257. void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
  7258. {
  7259. kfree(rx_monitor->ring);
  7260. rx_monitor->ring = NULL;
  7261. rx_monitor->entries = 0;
  7262. rx_monitor->head_idx = 0;
  7263. rx_monitor->tail_idx = 0;
  7264. }
  7265. /**
  7266. * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
  7267. * @rx_monitor: Pointer to lpfc_rx_info_monitor object
  7268. * @entry: Pointer to rx_info_entry
  7269. *
  7270. * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a
  7271. * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
  7272. *
  7273. * This is called from lpfc_cmf_timer, which is in timer/softirq context.
  7274. *
  7275. * In cases of old data overflow, we do a best effort of FIFO order.
  7276. **/
  7277. void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
  7278. struct rx_info_entry *entry)
  7279. {
  7280. struct rx_info_entry *ring = rx_monitor->ring;
  7281. u32 *head_idx = &rx_monitor->head_idx;
  7282. u32 *tail_idx = &rx_monitor->tail_idx;
  7283. spinlock_t *ring_lock = &rx_monitor->lock;
  7284. u32 ring_size = rx_monitor->entries;
  7285. spin_lock(ring_lock);
  7286. memcpy(&ring[*tail_idx], entry, sizeof(*entry));
  7287. *tail_idx = (*tail_idx + 1) % ring_size;
  7288. /* Best effort of FIFO saved data */
  7289. if (*tail_idx == *head_idx)
  7290. *head_idx = (*head_idx + 1) % ring_size;
  7291. spin_unlock(ring_lock);
  7292. }
  7293. /**
  7294. * lpfc_rx_monitor_report - Read out rx_monitor's ring
  7295. * @phba: Pointer to lpfc_hba object
  7296. * @rx_monitor: Pointer to lpfc_rx_info_monitor object
  7297. * @buf: Pointer to char buffer that will contain rx monitor info data
  7298. * @buf_len: Length buf including null char
  7299. * @max_read_entries: Maximum number of entries to read out of ring
  7300. *
  7301. * Used to dump/read what's in rx_monitor's ring buffer.
  7302. *
  7303. * If buf is NULL || buf_len == 0, then it is implied that we want to log the
  7304. * information to kmsg instead of filling out buf.
  7305. *
  7306. * Return:
  7307. * Number of entries read out of the ring
  7308. **/
  7309. u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
  7310. struct lpfc_rx_info_monitor *rx_monitor, char *buf,
  7311. u32 buf_len, u32 max_read_entries)
  7312. {
  7313. struct rx_info_entry *ring = rx_monitor->ring;
  7314. struct rx_info_entry *entry;
  7315. u32 *head_idx = &rx_monitor->head_idx;
  7316. u32 *tail_idx = &rx_monitor->tail_idx;
  7317. spinlock_t *ring_lock = &rx_monitor->lock;
  7318. u32 ring_size = rx_monitor->entries;
  7319. u32 cnt = 0;
  7320. char tmp[DBG_LOG_STR_SZ] = {0};
  7321. bool log_to_kmsg = (!buf || !buf_len) ? true : false;
  7322. if (!log_to_kmsg) {
  7323. /* clear the buffer to be sure */
  7324. memset(buf, 0, buf_len);
  7325. scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
  7326. "%-8s%-8s%-8s%-16s\n",
  7327. "MaxBPI", "Tot_Data_CMF",
  7328. "Tot_Data_Cmd", "Tot_Data_Cmpl",
  7329. "Lat(us)", "Avg_IO", "Max_IO", "Bsy",
  7330. "IO_cnt", "Info", "BWutil(ms)");
  7331. }
  7332. /* Needs to be _irq because record is called from timer interrupt
  7333. * context
  7334. */
  7335. spin_lock_irq(ring_lock);
  7336. while (*head_idx != *tail_idx) {
  7337. entry = &ring[*head_idx];
  7338. /* Read out this entry's data. */
  7339. if (!log_to_kmsg) {
  7340. /* If !log_to_kmsg, then store to buf. */
  7341. scnprintf(tmp, sizeof(tmp),
  7342. "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
  7343. "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
  7344. *head_idx, entry->max_bytes_per_interval,
  7345. entry->cmf_bytes, entry->total_bytes,
  7346. entry->rcv_bytes, entry->avg_io_latency,
  7347. entry->avg_io_size, entry->max_read_cnt,
  7348. entry->cmf_busy, entry->io_cnt,
  7349. entry->cmf_info, entry->timer_utilization,
  7350. entry->timer_interval);
  7351. /* Check for buffer overflow */
  7352. if ((strlen(buf) + strlen(tmp)) >= buf_len)
  7353. break;
  7354. /* Append entry's data to buffer */
  7355. strlcat(buf, tmp, buf_len);
  7356. } else {
  7357. lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
  7358. "4410 %02u: MBPI %llu Xmit %llu "
  7359. "Cmpl %llu Lat %llu ASz %llu Info %02u "
  7360. "BWUtil %u Int %u slot %u\n",
  7361. cnt, entry->max_bytes_per_interval,
  7362. entry->total_bytes, entry->rcv_bytes,
  7363. entry->avg_io_latency,
  7364. entry->avg_io_size, entry->cmf_info,
  7365. entry->timer_utilization,
  7366. entry->timer_interval, *head_idx);
  7367. }
  7368. *head_idx = (*head_idx + 1) % ring_size;
  7369. /* Don't feed more than max_read_entries */
  7370. cnt++;
  7371. if (cnt >= max_read_entries)
  7372. break;
  7373. }
  7374. spin_unlock_irq(ring_lock);
  7375. return cnt;
  7376. }
  7377. /**
  7378. * lpfc_cmf_setup - Initialize idle_stat tracking
  7379. * @phba: Pointer to HBA context object.
  7380. *
  7381. * This is called from HBA setup during driver load or when the HBA
  7382. * comes online. this does all the initialization to support CMF and MI.
  7383. **/
  7384. static int
  7385. lpfc_cmf_setup(struct lpfc_hba *phba)
  7386. {
  7387. LPFC_MBOXQ_t *mboxq;
  7388. struct lpfc_dmabuf *mp;
  7389. struct lpfc_pc_sli4_params *sli4_params;
  7390. int rc, cmf, mi_ver;
  7391. rc = lpfc_sli4_refresh_params(phba);
  7392. if (unlikely(rc))
  7393. return rc;
  7394. mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  7395. if (!mboxq)
  7396. return -ENOMEM;
  7397. sli4_params = &phba->sli4_hba.pc_sli4_params;
  7398. /* Always try to enable MI feature if we can */
  7399. if (sli4_params->mi_ver) {
  7400. lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
  7401. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7402. mi_ver = bf_get(lpfc_mbx_set_feature_mi,
  7403. &mboxq->u.mqe.un.set_feature);
  7404. if (rc == MBX_SUCCESS) {
  7405. if (mi_ver) {
  7406. lpfc_printf_log(phba,
  7407. KERN_WARNING, LOG_CGN_MGMT,
  7408. "6215 MI is enabled\n");
  7409. sli4_params->mi_ver = mi_ver;
  7410. } else {
  7411. lpfc_printf_log(phba,
  7412. KERN_WARNING, LOG_CGN_MGMT,
  7413. "6338 MI is disabled\n");
  7414. sli4_params->mi_ver = 0;
  7415. }
  7416. } else {
  7417. /* mi_ver is already set from GET_SLI4_PARAMETERS */
  7418. lpfc_printf_log(phba, KERN_INFO,
  7419. LOG_CGN_MGMT | LOG_INIT,
  7420. "6245 Enable MI Mailbox x%x (x%x/x%x) "
  7421. "failed, rc:x%x mi:x%x\n",
  7422. bf_get(lpfc_mqe_command, &mboxq->u.mqe),
  7423. lpfc_sli_config_mbox_subsys_get
  7424. (phba, mboxq),
  7425. lpfc_sli_config_mbox_opcode_get
  7426. (phba, mboxq),
  7427. rc, sli4_params->mi_ver);
  7428. }
  7429. } else {
  7430. lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
  7431. "6217 MI is disabled\n");
  7432. }
  7433. /* Ensure FDMI is enabled for MI if enable_mi is set */
  7434. if (sli4_params->mi_ver)
  7435. phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
  7436. /* Always try to enable CMF feature if we can */
  7437. if (sli4_params->cmf) {
  7438. lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
  7439. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7440. cmf = bf_get(lpfc_mbx_set_feature_cmf,
  7441. &mboxq->u.mqe.un.set_feature);
  7442. if (rc == MBX_SUCCESS && cmf) {
  7443. lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
  7444. "6218 CMF is enabled: mode %d\n",
  7445. phba->cmf_active_mode);
  7446. } else {
  7447. lpfc_printf_log(phba, KERN_WARNING,
  7448. LOG_CGN_MGMT | LOG_INIT,
  7449. "6219 Enable CMF Mailbox x%x (x%x/x%x) "
  7450. "failed, rc:x%x dd:x%x\n",
  7451. bf_get(lpfc_mqe_command, &mboxq->u.mqe),
  7452. lpfc_sli_config_mbox_subsys_get
  7453. (phba, mboxq),
  7454. lpfc_sli_config_mbox_opcode_get
  7455. (phba, mboxq),
  7456. rc, cmf);
  7457. sli4_params->cmf = 0;
  7458. phba->cmf_active_mode = LPFC_CFG_OFF;
  7459. goto no_cmf;
  7460. }
  7461. /* Allocate Congestion Information Buffer */
  7462. if (!phba->cgn_i) {
  7463. mp = kmalloc(sizeof(*mp), GFP_KERNEL);
  7464. if (mp)
  7465. mp->virt = dma_alloc_coherent
  7466. (&phba->pcidev->dev,
  7467. sizeof(struct lpfc_cgn_info),
  7468. &mp->phys, GFP_KERNEL);
  7469. if (!mp || !mp->virt) {
  7470. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7471. "2640 Failed to alloc memory "
  7472. "for Congestion Info\n");
  7473. kfree(mp);
  7474. sli4_params->cmf = 0;
  7475. phba->cmf_active_mode = LPFC_CFG_OFF;
  7476. goto no_cmf;
  7477. }
  7478. phba->cgn_i = mp;
  7479. /* initialize congestion buffer info */
  7480. lpfc_init_congestion_buf(phba);
  7481. lpfc_init_congestion_stat(phba);
  7482. /* Zero out Congestion Signal counters */
  7483. atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
  7484. atomic64_set(&phba->cgn_acqe_stat.warn, 0);
  7485. }
  7486. rc = lpfc_sli4_cgn_params_read(phba);
  7487. if (rc < 0) {
  7488. lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
  7489. "6242 Error reading Cgn Params (%d)\n",
  7490. rc);
  7491. /* Ensure CGN Mode is off */
  7492. sli4_params->cmf = 0;
  7493. } else if (!rc) {
  7494. lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
  7495. "6243 CGN Event empty object.\n");
  7496. /* Ensure CGN Mode is off */
  7497. sli4_params->cmf = 0;
  7498. }
  7499. } else {
  7500. no_cmf:
  7501. lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
  7502. "6220 CMF is disabled\n");
  7503. }
  7504. /* Only register congestion buffer with firmware if BOTH
  7505. * CMF and E2E are enabled.
  7506. */
  7507. if (sli4_params->cmf && sli4_params->mi_ver) {
  7508. rc = lpfc_reg_congestion_buf(phba);
  7509. if (rc) {
  7510. dma_free_coherent(&phba->pcidev->dev,
  7511. sizeof(struct lpfc_cgn_info),
  7512. phba->cgn_i->virt, phba->cgn_i->phys);
  7513. kfree(phba->cgn_i);
  7514. phba->cgn_i = NULL;
  7515. /* Ensure CGN Mode is off */
  7516. phba->cmf_active_mode = LPFC_CFG_OFF;
  7517. sli4_params->cmf = 0;
  7518. return 0;
  7519. }
  7520. }
  7521. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7522. "6470 Setup MI version %d CMF %d mode %d\n",
  7523. sli4_params->mi_ver, sli4_params->cmf,
  7524. phba->cmf_active_mode);
  7525. mempool_free(mboxq, phba->mbox_mem_pool);
  7526. /* Initialize atomic counters */
  7527. atomic_set(&phba->cgn_fabric_warn_cnt, 0);
  7528. atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
  7529. atomic_set(&phba->cgn_sync_alarm_cnt, 0);
  7530. atomic_set(&phba->cgn_sync_warn_cnt, 0);
  7531. atomic_set(&phba->cgn_driver_evt_cnt, 0);
  7532. atomic_set(&phba->cgn_latency_evt_cnt, 0);
  7533. atomic64_set(&phba->cgn_latency_evt, 0);
  7534. phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
  7535. /* Allocate RX Monitor Buffer */
  7536. if (!phba->rx_monitor) {
  7537. phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
  7538. GFP_KERNEL);
  7539. if (!phba->rx_monitor) {
  7540. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7541. "2644 Failed to alloc memory "
  7542. "for RX Monitor Buffer\n");
  7543. return -ENOMEM;
  7544. }
  7545. /* Instruct the rx_monitor object to instantiate its ring */
  7546. if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
  7547. LPFC_MAX_RXMONITOR_ENTRY)) {
  7548. kfree(phba->rx_monitor);
  7549. phba->rx_monitor = NULL;
  7550. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7551. "2645 Failed to alloc memory "
  7552. "for RX Monitor's Ring\n");
  7553. return -ENOMEM;
  7554. }
  7555. }
  7556. return 0;
  7557. }
  7558. static int
  7559. lpfc_set_host_tm(struct lpfc_hba *phba)
  7560. {
  7561. LPFC_MBOXQ_t *mboxq;
  7562. uint32_t len, rc;
  7563. struct timespec64 cur_time;
  7564. struct tm broken;
  7565. uint32_t month, day, year;
  7566. uint32_t hour, minute, second;
  7567. struct lpfc_mbx_set_host_date_time *tm;
  7568. mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  7569. if (!mboxq)
  7570. return -ENOMEM;
  7571. len = sizeof(struct lpfc_mbx_set_host_data) -
  7572. sizeof(struct lpfc_sli4_cfg_mhdr);
  7573. lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
  7574. LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
  7575. LPFC_SLI4_MBX_EMBED);
  7576. mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
  7577. mboxq->u.mqe.un.set_host_data.param_len =
  7578. sizeof(struct lpfc_mbx_set_host_date_time);
  7579. tm = &mboxq->u.mqe.un.set_host_data.un.tm;
  7580. ktime_get_real_ts64(&cur_time);
  7581. time64_to_tm(cur_time.tv_sec, 0, &broken);
  7582. month = broken.tm_mon + 1;
  7583. day = broken.tm_mday;
  7584. year = broken.tm_year - 100;
  7585. hour = broken.tm_hour;
  7586. minute = broken.tm_min;
  7587. second = broken.tm_sec;
  7588. bf_set(lpfc_mbx_set_host_month, tm, month);
  7589. bf_set(lpfc_mbx_set_host_day, tm, day);
  7590. bf_set(lpfc_mbx_set_host_year, tm, year);
  7591. bf_set(lpfc_mbx_set_host_hour, tm, hour);
  7592. bf_set(lpfc_mbx_set_host_min, tm, minute);
  7593. bf_set(lpfc_mbx_set_host_sec, tm, second);
  7594. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7595. mempool_free(mboxq, phba->mbox_mem_pool);
  7596. return rc;
  7597. }
  7598. /**
  7599. * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
  7600. * @phba: Pointer to HBA context object.
  7601. *
  7602. * This function is the main SLI4 device initialization PCI function. This
  7603. * function is called by the HBA initialization code, HBA reset code and
  7604. * HBA error attention handler code. Caller is not required to hold any
  7605. * locks.
  7606. **/
  7607. int
  7608. lpfc_sli4_hba_setup(struct lpfc_hba *phba)
  7609. {
  7610. int rc, i, cnt, len, dd;
  7611. LPFC_MBOXQ_t *mboxq;
  7612. struct lpfc_mqe *mqe;
  7613. uint8_t *vpd;
  7614. uint32_t vpd_size;
  7615. uint32_t ftr_rsp = 0;
  7616. struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
  7617. struct lpfc_vport *vport = phba->pport;
  7618. struct lpfc_dmabuf *mp;
  7619. struct lpfc_rqb *rqbp;
  7620. u32 flg;
  7621. /* Perform a PCI function reset to start from clean */
  7622. rc = lpfc_pci_function_reset(phba);
  7623. if (unlikely(rc))
  7624. return -ENODEV;
  7625. /* Check the HBA Host Status Register for readyness */
  7626. rc = lpfc_sli4_post_status_check(phba);
  7627. if (unlikely(rc))
  7628. return -ENODEV;
  7629. else {
  7630. spin_lock_irq(&phba->hbalock);
  7631. phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
  7632. flg = phba->sli.sli_flag;
  7633. spin_unlock_irq(&phba->hbalock);
  7634. /* Allow a little time after setting SLI_ACTIVE for any polled
  7635. * MBX commands to complete via BSG.
  7636. */
  7637. for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
  7638. msleep(20);
  7639. spin_lock_irq(&phba->hbalock);
  7640. flg = phba->sli.sli_flag;
  7641. spin_unlock_irq(&phba->hbalock);
  7642. }
  7643. }
  7644. clear_bit(HBA_SETUP, &phba->hba_flag);
  7645. lpfc_sli4_dip(phba);
  7646. /*
  7647. * Allocate a single mailbox container for initializing the
  7648. * port.
  7649. */
  7650. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  7651. if (!mboxq)
  7652. return -ENOMEM;
  7653. /* Issue READ_REV to collect vpd and FW information. */
  7654. vpd_size = SLI4_PAGE_SIZE;
  7655. vpd = kzalloc(vpd_size, GFP_KERNEL);
  7656. if (!vpd) {
  7657. rc = -ENOMEM;
  7658. goto out_free_mbox;
  7659. }
  7660. rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
  7661. if (unlikely(rc)) {
  7662. kfree(vpd);
  7663. goto out_free_mbox;
  7664. }
  7665. mqe = &mboxq->u.mqe;
  7666. phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
  7667. if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
  7668. set_bit(HBA_FCOE_MODE, &phba->hba_flag);
  7669. phba->fcp_embed_io = 0; /* SLI4 FC support only */
  7670. } else {
  7671. clear_bit(HBA_FCOE_MODE, &phba->hba_flag);
  7672. }
  7673. if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
  7674. LPFC_DCBX_CEE_MODE)
  7675. set_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
  7676. else
  7677. clear_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
  7678. clear_bit(HBA_IOQ_FLUSH, &phba->hba_flag);
  7679. if (phba->sli_rev != LPFC_SLI_REV4) {
  7680. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  7681. "0376 READ_REV Error. SLI Level %d "
  7682. "FCoE enabled %d\n",
  7683. phba->sli_rev,
  7684. test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? 1 : 0);
  7685. rc = -EIO;
  7686. kfree(vpd);
  7687. goto out_free_mbox;
  7688. }
  7689. rc = lpfc_set_host_tm(phba);
  7690. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
  7691. "6468 Set host date / time: Status x%x:\n", rc);
  7692. /*
  7693. * Continue initialization with default values even if driver failed
  7694. * to read FCoE param config regions, only read parameters if the
  7695. * board is FCoE
  7696. */
  7697. if (test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
  7698. lpfc_sli4_read_fcoe_params(phba))
  7699. lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
  7700. "2570 Failed to read FCoE parameters\n");
  7701. /*
  7702. * Retrieve sli4 device physical port name, failure of doing it
  7703. * is considered as non-fatal.
  7704. */
  7705. rc = lpfc_sli4_retrieve_pport_name(phba);
  7706. if (!rc)
  7707. lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  7708. "3080 Successful retrieving SLI4 device "
  7709. "physical port name: %s.\n", phba->Port);
  7710. rc = lpfc_sli4_get_ctl_attr(phba);
  7711. if (!rc)
  7712. lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  7713. "8351 Successful retrieving SLI4 device "
  7714. "CTL ATTR\n");
  7715. /*
  7716. * Evaluate the read rev and vpd data. Populate the driver
  7717. * state with the results. If this routine fails, the failure
  7718. * is not fatal as the driver will use generic values.
  7719. */
  7720. rc = lpfc_parse_vpd(phba, vpd, vpd_size);
  7721. if (unlikely(!rc))
  7722. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  7723. "0377 Error %d parsing vpd. "
  7724. "Using defaults.\n", rc);
  7725. kfree(vpd);
  7726. /* Save information as VPD data */
  7727. phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
  7728. phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
  7729. /*
  7730. * This is because first G7 ASIC doesn't support the standard
  7731. * 0x5a NVME cmd descriptor type/subtype
  7732. */
  7733. if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
  7734. LPFC_SLI_INTF_IF_TYPE_6) &&
  7735. (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
  7736. (phba->vpd.rev.smRev == 0) &&
  7737. (phba->cfg_nvme_embed_cmd == 1))
  7738. phba->cfg_nvme_embed_cmd = 0;
  7739. phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
  7740. phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
  7741. &mqe->un.read_rev);
  7742. phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
  7743. &mqe->un.read_rev);
  7744. phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
  7745. &mqe->un.read_rev);
  7746. phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
  7747. &mqe->un.read_rev);
  7748. phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
  7749. memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
  7750. phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
  7751. memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
  7752. phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
  7753. memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
  7754. lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  7755. "(%d):0380 READ_REV Status x%x "
  7756. "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
  7757. mboxq->vport ? mboxq->vport->vpi : 0,
  7758. bf_get(lpfc_mqe_status, mqe),
  7759. phba->vpd.rev.opFwName,
  7760. phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
  7761. phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
  7762. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
  7763. LPFC_SLI_INTF_IF_TYPE_0) {
  7764. lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
  7765. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7766. if (rc == MBX_SUCCESS) {
  7767. set_bit(HBA_RECOVERABLE_UE, &phba->hba_flag);
  7768. /* Set 1Sec interval to detect UE */
  7769. phba->eratt_poll_interval = 1;
  7770. phba->sli4_hba.ue_to_sr = bf_get(
  7771. lpfc_mbx_set_feature_UESR,
  7772. &mboxq->u.mqe.un.set_feature);
  7773. phba->sli4_hba.ue_to_rp = bf_get(
  7774. lpfc_mbx_set_feature_UERP,
  7775. &mboxq->u.mqe.un.set_feature);
  7776. }
  7777. }
  7778. if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
  7779. /* Enable MDS Diagnostics only if the SLI Port supports it */
  7780. lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
  7781. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7782. if (rc != MBX_SUCCESS)
  7783. phba->mds_diags_support = 0;
  7784. }
  7785. /*
  7786. * Discover the port's supported feature set and match it against the
  7787. * hosts requests.
  7788. */
  7789. lpfc_request_features(phba, mboxq);
  7790. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7791. if (unlikely(rc)) {
  7792. rc = -EIO;
  7793. goto out_free_mbox;
  7794. }
  7795. /* Disable VMID if app header is not supported */
  7796. if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
  7797. &mqe->un.req_ftrs))) {
  7798. bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
  7799. phba->cfg_vmid_app_header = 0;
  7800. lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
  7801. "1242 vmid feature not supported\n");
  7802. }
  7803. /*
  7804. * The port must support FCP initiator mode as this is the
  7805. * only mode running in the host.
  7806. */
  7807. if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
  7808. lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
  7809. "0378 No support for fcpi mode.\n");
  7810. ftr_rsp++;
  7811. }
  7812. /* Performance Hints are ONLY for FCoE */
  7813. if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
  7814. if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
  7815. phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
  7816. else
  7817. phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
  7818. }
  7819. /*
  7820. * If the port cannot support the host's requested features
  7821. * then turn off the global config parameters to disable the
  7822. * feature in the driver. This is not a fatal error.
  7823. */
  7824. if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
  7825. if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
  7826. phba->cfg_enable_bg = 0;
  7827. phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
  7828. ftr_rsp++;
  7829. }
  7830. }
  7831. if (phba->max_vpi && phba->cfg_enable_npiv &&
  7832. !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
  7833. ftr_rsp++;
  7834. if (ftr_rsp) {
  7835. lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
  7836. "0379 Feature Mismatch Data: x%08x %08x "
  7837. "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
  7838. mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
  7839. phba->cfg_enable_npiv, phba->max_vpi);
  7840. if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
  7841. phba->cfg_enable_bg = 0;
  7842. if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
  7843. phba->cfg_enable_npiv = 0;
  7844. }
  7845. /* These SLI3 features are assumed in SLI4 */
  7846. spin_lock_irq(&phba->hbalock);
  7847. phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
  7848. spin_unlock_irq(&phba->hbalock);
  7849. /* Always try to enable dual dump feature if we can */
  7850. lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
  7851. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7852. dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
  7853. if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
  7854. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  7855. "6448 Dual Dump is enabled\n");
  7856. else
  7857. lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
  7858. "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
  7859. "rc:x%x dd:x%x\n",
  7860. bf_get(lpfc_mqe_command, &mboxq->u.mqe),
  7861. lpfc_sli_config_mbox_subsys_get(
  7862. phba, mboxq),
  7863. lpfc_sli_config_mbox_opcode_get(
  7864. phba, mboxq),
  7865. rc, dd);
  7866. /*
  7867. * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
  7868. * calls depends on these resources to complete port setup.
  7869. */
  7870. rc = lpfc_sli4_alloc_resource_identifiers(phba);
  7871. if (rc) {
  7872. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  7873. "2920 Failed to alloc Resource IDs "
  7874. "rc = x%x\n", rc);
  7875. goto out_free_mbox;
  7876. }
  7877. lpfc_set_host_data(phba, mboxq);
  7878. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7879. if (rc) {
  7880. lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
  7881. "2134 Failed to set host os driver version %x",
  7882. rc);
  7883. }
  7884. /* Read the port's service parameters. */
  7885. rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
  7886. if (rc) {
  7887. phba->link_state = LPFC_HBA_ERROR;
  7888. rc = -ENOMEM;
  7889. goto out_free_mbox;
  7890. }
  7891. mboxq->vport = vport;
  7892. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7893. mp = mboxq->ctx_buf;
  7894. if (rc == MBX_SUCCESS) {
  7895. memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
  7896. rc = 0;
  7897. }
  7898. /*
  7899. * This memory was allocated by the lpfc_read_sparam routine but is
  7900. * no longer needed. It is released and ctx_buf NULLed to prevent
  7901. * unintended pointer access as the mbox is reused.
  7902. */
  7903. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  7904. kfree(mp);
  7905. mboxq->ctx_buf = NULL;
  7906. if (unlikely(rc)) {
  7907. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  7908. "0382 READ_SPARAM command failed "
  7909. "status %d, mbxStatus x%x\n",
  7910. rc, bf_get(lpfc_mqe_status, mqe));
  7911. phba->link_state = LPFC_HBA_ERROR;
  7912. rc = -EIO;
  7913. goto out_free_mbox;
  7914. }
  7915. lpfc_update_vport_wwn(vport);
  7916. /* Update the fc_host data structures with new wwn. */
  7917. fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
  7918. fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
  7919. /* Create all the SLI4 queues */
  7920. rc = lpfc_sli4_queue_create(phba);
  7921. if (rc) {
  7922. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  7923. "3089 Failed to allocate queues\n");
  7924. rc = -ENODEV;
  7925. goto out_free_mbox;
  7926. }
  7927. /* Set up all the queues to the device */
  7928. rc = lpfc_sli4_queue_setup(phba);
  7929. if (unlikely(rc)) {
  7930. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  7931. "0381 Error %d during queue setup.\n", rc);
  7932. goto out_stop_timers;
  7933. }
  7934. /* Initialize the driver internal SLI layer lists. */
  7935. lpfc_sli4_setup(phba);
  7936. lpfc_sli4_queue_init(phba);
  7937. /* update host els xri-sgl sizes and mappings */
  7938. rc = lpfc_sli4_els_sgl_update(phba);
  7939. if (unlikely(rc)) {
  7940. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  7941. "1400 Failed to update xri-sgl size and "
  7942. "mapping: %d\n", rc);
  7943. goto out_destroy_queue;
  7944. }
  7945. /* register the els sgl pool to the port */
  7946. rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
  7947. phba->sli4_hba.els_xri_cnt);
  7948. if (unlikely(rc < 0)) {
  7949. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  7950. "0582 Error %d during els sgl post "
  7951. "operation\n", rc);
  7952. rc = -ENODEV;
  7953. goto out_destroy_queue;
  7954. }
  7955. phba->sli4_hba.els_xri_cnt = rc;
  7956. if (phba->nvmet_support) {
  7957. /* update host nvmet xri-sgl sizes and mappings */
  7958. rc = lpfc_sli4_nvmet_sgl_update(phba);
  7959. if (unlikely(rc)) {
  7960. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  7961. "6308 Failed to update nvmet-sgl size "
  7962. "and mapping: %d\n", rc);
  7963. goto out_destroy_queue;
  7964. }
  7965. /* register the nvmet sgl pool to the port */
  7966. rc = lpfc_sli4_repost_sgl_list(
  7967. phba,
  7968. &phba->sli4_hba.lpfc_nvmet_sgl_list,
  7969. phba->sli4_hba.nvmet_xri_cnt);
  7970. if (unlikely(rc < 0)) {
  7971. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  7972. "3117 Error %d during nvmet "
  7973. "sgl post\n", rc);
  7974. rc = -ENODEV;
  7975. goto out_destroy_queue;
  7976. }
  7977. phba->sli4_hba.nvmet_xri_cnt = rc;
  7978. /* We allocate an iocbq for every receive context SGL.
  7979. * The additional allocation is for abort and ls handling.
  7980. */
  7981. cnt = phba->sli4_hba.nvmet_xri_cnt +
  7982. phba->sli4_hba.max_cfg_param.max_xri;
  7983. } else {
  7984. /* update host common xri-sgl sizes and mappings */
  7985. rc = lpfc_sli4_io_sgl_update(phba);
  7986. if (unlikely(rc)) {
  7987. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  7988. "6082 Failed to update nvme-sgl size "
  7989. "and mapping: %d\n", rc);
  7990. goto out_destroy_queue;
  7991. }
  7992. /* register the allocated common sgl pool to the port */
  7993. rc = lpfc_sli4_repost_io_sgl_list(phba);
  7994. if (unlikely(rc)) {
  7995. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  7996. "6116 Error %d during nvme sgl post "
  7997. "operation\n", rc);
  7998. /* Some NVME buffers were moved to abort nvme list */
  7999. /* A pci function reset will repost them */
  8000. rc = -ENODEV;
  8001. goto out_destroy_queue;
  8002. }
  8003. /* Each lpfc_io_buf job structure has an iocbq element.
  8004. * This cnt provides for abort, els, ct and ls requests.
  8005. */
  8006. cnt = phba->sli4_hba.max_cfg_param.max_xri;
  8007. }
  8008. if (!phba->sli.iocbq_lookup) {
  8009. /* Initialize and populate the iocb list per host */
  8010. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8011. "2821 initialize iocb list with %d entries\n",
  8012. cnt);
  8013. rc = lpfc_init_iocb_list(phba, cnt);
  8014. if (rc) {
  8015. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  8016. "1413 Failed to init iocb list.\n");
  8017. goto out_destroy_queue;
  8018. }
  8019. }
  8020. if (phba->nvmet_support)
  8021. lpfc_nvmet_create_targetport(phba);
  8022. if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
  8023. /* Post initial buffers to all RQs created */
  8024. for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
  8025. rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
  8026. INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
  8027. rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
  8028. rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
  8029. rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
  8030. rqbp->buffer_count = 0;
  8031. lpfc_post_rq_buffer(
  8032. phba, phba->sli4_hba.nvmet_mrq_hdr[i],
  8033. phba->sli4_hba.nvmet_mrq_data[i],
  8034. phba->cfg_nvmet_mrq_post, i);
  8035. }
  8036. }
  8037. /* Post the rpi header region to the device. */
  8038. rc = lpfc_sli4_post_all_rpi_hdrs(phba);
  8039. if (unlikely(rc)) {
  8040. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  8041. "0393 Error %d during rpi post operation\n",
  8042. rc);
  8043. rc = -ENODEV;
  8044. goto out_free_iocblist;
  8045. }
  8046. lpfc_sli4_node_prep(phba);
  8047. if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
  8048. if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
  8049. /*
  8050. * The FC Port needs to register FCFI (index 0)
  8051. */
  8052. lpfc_reg_fcfi(phba, mboxq);
  8053. mboxq->vport = phba->pport;
  8054. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  8055. if (rc != MBX_SUCCESS)
  8056. goto out_unset_queue;
  8057. rc = 0;
  8058. phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
  8059. &mboxq->u.mqe.un.reg_fcfi);
  8060. } else {
  8061. /* We are a NVME Target mode with MRQ > 1 */
  8062. /* First register the FCFI */
  8063. lpfc_reg_fcfi_mrq(phba, mboxq, 0);
  8064. mboxq->vport = phba->pport;
  8065. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  8066. if (rc != MBX_SUCCESS)
  8067. goto out_unset_queue;
  8068. rc = 0;
  8069. phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
  8070. &mboxq->u.mqe.un.reg_fcfi_mrq);
  8071. /* Next register the MRQs */
  8072. lpfc_reg_fcfi_mrq(phba, mboxq, 1);
  8073. mboxq->vport = phba->pport;
  8074. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  8075. if (rc != MBX_SUCCESS)
  8076. goto out_unset_queue;
  8077. rc = 0;
  8078. }
  8079. /* Check if the port is configured to be disabled */
  8080. lpfc_sli_read_link_ste(phba);
  8081. }
  8082. /* Don't post more new bufs if repost already recovered
  8083. * the nvme sgls.
  8084. */
  8085. if (phba->nvmet_support == 0) {
  8086. if (phba->sli4_hba.io_xri_cnt == 0) {
  8087. len = lpfc_new_io_buf(
  8088. phba, phba->sli4_hba.io_xri_max);
  8089. if (len == 0) {
  8090. rc = -ENOMEM;
  8091. goto out_unset_queue;
  8092. }
  8093. if (phba->cfg_xri_rebalancing)
  8094. lpfc_create_multixri_pools(phba);
  8095. }
  8096. } else {
  8097. phba->cfg_xri_rebalancing = 0;
  8098. }
  8099. /* Allow asynchronous mailbox command to go through */
  8100. spin_lock_irq(&phba->hbalock);
  8101. phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
  8102. spin_unlock_irq(&phba->hbalock);
  8103. /* Post receive buffers to the device */
  8104. lpfc_sli4_rb_setup(phba);
  8105. /* Reset HBA FCF states after HBA reset */
  8106. phba->fcf.fcf_flag = 0;
  8107. phba->fcf.current_rec.flag = 0;
  8108. /* Start the ELS watchdog timer */
  8109. mod_timer(&vport->els_tmofunc,
  8110. jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
  8111. /* Start heart beat timer */
  8112. mod_timer(&phba->hb_tmofunc,
  8113. jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  8114. clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
  8115. clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
  8116. phba->last_completion_time = jiffies;
  8117. /* start eq_delay heartbeat */
  8118. if (phba->cfg_auto_imax)
  8119. queue_delayed_work(phba->wq, &phba->eq_delay_work,
  8120. msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
  8121. /* start per phba idle_stat_delay heartbeat */
  8122. lpfc_init_idle_stat_hb(phba);
  8123. /* Start error attention (ERATT) polling timer */
  8124. mod_timer(&phba->eratt_poll,
  8125. jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
  8126. /*
  8127. * The port is ready, set the host's link state to LINK_DOWN
  8128. * in preparation for link interrupts.
  8129. */
  8130. spin_lock_irq(&phba->hbalock);
  8131. phba->link_state = LPFC_LINK_DOWN;
  8132. /* Check if physical ports are trunked */
  8133. if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
  8134. phba->trunk_link.link0.state = LPFC_LINK_DOWN;
  8135. if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
  8136. phba->trunk_link.link1.state = LPFC_LINK_DOWN;
  8137. if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
  8138. phba->trunk_link.link2.state = LPFC_LINK_DOWN;
  8139. if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
  8140. phba->trunk_link.link3.state = LPFC_LINK_DOWN;
  8141. spin_unlock_irq(&phba->hbalock);
  8142. /* Arm the CQs and then EQs on device */
  8143. lpfc_sli4_arm_cqeq_intr(phba);
  8144. /* Indicate device interrupt mode */
  8145. phba->sli4_hba.intr_enable = 1;
  8146. /* Setup CMF after HBA is initialized */
  8147. lpfc_cmf_setup(phba);
  8148. if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
  8149. test_bit(LINK_DISABLED, &phba->hba_flag)) {
  8150. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  8151. "3103 Adapter Link is disabled.\n");
  8152. lpfc_down_link(phba, mboxq);
  8153. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  8154. if (rc != MBX_SUCCESS) {
  8155. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  8156. "3104 Adapter failed to issue "
  8157. "DOWN_LINK mbox cmd, rc:x%x\n", rc);
  8158. goto out_io_buff_free;
  8159. }
  8160. } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
  8161. /* don't perform init_link on SLI4 FC port loopback test */
  8162. if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
  8163. rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
  8164. if (rc)
  8165. goto out_io_buff_free;
  8166. }
  8167. }
  8168. mempool_free(mboxq, phba->mbox_mem_pool);
  8169. /* Enable RAS FW log support */
  8170. lpfc_sli4_ras_setup(phba);
  8171. set_bit(HBA_SETUP, &phba->hba_flag);
  8172. return rc;
  8173. out_io_buff_free:
  8174. /* Free allocated IO Buffers */
  8175. lpfc_io_free(phba);
  8176. out_unset_queue:
  8177. /* Unset all the queues set up in this routine when error out */
  8178. lpfc_sli4_queue_unset(phba);
  8179. out_free_iocblist:
  8180. lpfc_free_iocb_list(phba);
  8181. out_destroy_queue:
  8182. lpfc_sli4_queue_destroy(phba);
  8183. out_stop_timers:
  8184. lpfc_stop_hba_timers(phba);
  8185. out_free_mbox:
  8186. mempool_free(mboxq, phba->mbox_mem_pool);
  8187. return rc;
  8188. }
  8189. /**
  8190. * lpfc_mbox_timeout - Timeout call back function for mbox timer
  8191. * @t: Context to fetch pointer to hba structure from.
  8192. *
  8193. * This is the callback function for mailbox timer. The mailbox
  8194. * timer is armed when a new mailbox command is issued and the timer
  8195. * is deleted when the mailbox complete. The function is called by
  8196. * the kernel timer code when a mailbox does not complete within
  8197. * expected time. This function wakes up the worker thread to
  8198. * process the mailbox timeout and returns. All the processing is
  8199. * done by the worker thread function lpfc_mbox_timeout_handler.
  8200. **/
  8201. void
  8202. lpfc_mbox_timeout(struct timer_list *t)
  8203. {
  8204. struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
  8205. unsigned long iflag;
  8206. uint32_t tmo_posted;
  8207. spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
  8208. tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
  8209. if (!tmo_posted)
  8210. phba->pport->work_port_events |= WORKER_MBOX_TMO;
  8211. spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
  8212. if (!tmo_posted)
  8213. lpfc_worker_wake_up(phba);
  8214. return;
  8215. }
  8216. /**
  8217. * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
  8218. * are pending
  8219. * @phba: Pointer to HBA context object.
  8220. *
  8221. * This function checks if any mailbox completions are present on the mailbox
  8222. * completion queue.
  8223. **/
  8224. static bool
  8225. lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
  8226. {
  8227. uint32_t idx;
  8228. struct lpfc_queue *mcq;
  8229. struct lpfc_mcqe *mcqe;
  8230. bool pending_completions = false;
  8231. uint8_t qe_valid;
  8232. if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
  8233. return false;
  8234. /* Check for completions on mailbox completion queue */
  8235. mcq = phba->sli4_hba.mbx_cq;
  8236. idx = mcq->hba_index;
  8237. qe_valid = mcq->qe_valid;
  8238. while (bf_get_le32(lpfc_cqe_valid,
  8239. (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
  8240. mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
  8241. if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
  8242. (!bf_get_le32(lpfc_trailer_async, mcqe))) {
  8243. pending_completions = true;
  8244. break;
  8245. }
  8246. idx = (idx + 1) % mcq->entry_count;
  8247. if (mcq->hba_index == idx)
  8248. break;
  8249. /* if the index wrapped around, toggle the valid bit */
  8250. if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
  8251. qe_valid = (qe_valid) ? 0 : 1;
  8252. }
  8253. return pending_completions;
  8254. }
  8255. /**
  8256. * lpfc_sli4_process_missed_mbox_completions - process mbox completions
  8257. * that were missed.
  8258. * @phba: Pointer to HBA context object.
  8259. *
  8260. * For sli4, it is possible to miss an interrupt. As such mbox completions
  8261. * maybe missed causing erroneous mailbox timeouts to occur. This function
  8262. * checks to see if mbox completions are on the mailbox completion queue
  8263. * and will process all the completions associated with the eq for the
  8264. * mailbox completion queue.
  8265. **/
  8266. static bool
  8267. lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
  8268. {
  8269. struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
  8270. uint32_t eqidx;
  8271. struct lpfc_queue *fpeq = NULL;
  8272. struct lpfc_queue *eq;
  8273. bool mbox_pending;
  8274. if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
  8275. return false;
  8276. /* Find the EQ associated with the mbox CQ */
  8277. if (sli4_hba->hdwq) {
  8278. for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
  8279. eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
  8280. if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
  8281. fpeq = eq;
  8282. break;
  8283. }
  8284. }
  8285. }
  8286. if (!fpeq)
  8287. return false;
  8288. /* Turn off interrupts from this EQ */
  8289. sli4_hba->sli4_eq_clr_intr(fpeq);
  8290. /* Check to see if a mbox completion is pending */
  8291. mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
  8292. /*
  8293. * If a mbox completion is pending, process all the events on EQ
  8294. * associated with the mbox completion queue (this could include
  8295. * mailbox commands, async events, els commands, receive queue data
  8296. * and fcp commands)
  8297. */
  8298. if (mbox_pending)
  8299. /* process and rearm the EQ */
  8300. lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
  8301. LPFC_QUEUE_WORK);
  8302. else
  8303. /* Always clear and re-arm the EQ */
  8304. sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
  8305. return mbox_pending;
  8306. }
  8307. /**
  8308. * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
  8309. * @phba: Pointer to HBA context object.
  8310. *
  8311. * This function is called from worker thread when a mailbox command times out.
  8312. * The caller is not required to hold any locks. This function will reset the
  8313. * HBA and recover all the pending commands.
  8314. **/
  8315. void
  8316. lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
  8317. {
  8318. LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
  8319. MAILBOX_t *mb = NULL;
  8320. struct lpfc_sli *psli = &phba->sli;
  8321. /* If the mailbox completed, process the completion */
  8322. lpfc_sli4_process_missed_mbox_completions(phba);
  8323. if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
  8324. return;
  8325. if (pmbox != NULL)
  8326. mb = &pmbox->u.mb;
  8327. /* Check the pmbox pointer first. There is a race condition
  8328. * between the mbox timeout handler getting executed in the
  8329. * worklist and the mailbox actually completing. When this
  8330. * race condition occurs, the mbox_active will be NULL.
  8331. */
  8332. spin_lock_irq(&phba->hbalock);
  8333. if (pmbox == NULL) {
  8334. lpfc_printf_log(phba, KERN_WARNING,
  8335. LOG_MBOX | LOG_SLI,
  8336. "0353 Active Mailbox cleared - mailbox timeout "
  8337. "exiting\n");
  8338. spin_unlock_irq(&phba->hbalock);
  8339. return;
  8340. }
  8341. /* Mbox cmd <mbxCommand> timeout */
  8342. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  8343. "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
  8344. mb->mbxCommand,
  8345. phba->pport->port_state,
  8346. phba->sli.sli_flag,
  8347. phba->sli.mbox_active);
  8348. spin_unlock_irq(&phba->hbalock);
  8349. /* Setting state unknown so lpfc_sli_abort_iocb_ring
  8350. * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
  8351. * it to fail all outstanding SCSI IO.
  8352. */
  8353. set_bit(MBX_TMO_ERR, &phba->bit_flags);
  8354. spin_lock_irq(&phba->pport->work_port_lock);
  8355. phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
  8356. spin_unlock_irq(&phba->pport->work_port_lock);
  8357. spin_lock_irq(&phba->hbalock);
  8358. phba->link_state = LPFC_LINK_UNKNOWN;
  8359. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  8360. spin_unlock_irq(&phba->hbalock);
  8361. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  8362. "0345 Resetting board due to mailbox timeout\n");
  8363. /* Reset the HBA device */
  8364. lpfc_reset_hba(phba);
  8365. }
  8366. /**
  8367. * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
  8368. * @phba: Pointer to HBA context object.
  8369. * @pmbox: Pointer to mailbox object.
  8370. * @flag: Flag indicating how the mailbox need to be processed.
  8371. *
  8372. * This function is called by discovery code and HBA management code
  8373. * to submit a mailbox command to firmware with SLI-3 interface spec. This
  8374. * function gets the hbalock to protect the data structures.
  8375. * The mailbox command can be submitted in polling mode, in which case
  8376. * this function will wait in a polling loop for the completion of the
  8377. * mailbox.
  8378. * If the mailbox is submitted in no_wait mode (not polling) the
  8379. * function will submit the command and returns immediately without waiting
  8380. * for the mailbox completion. The no_wait is supported only when HBA
  8381. * is in SLI2/SLI3 mode - interrupts are enabled.
  8382. * The SLI interface allows only one mailbox pending at a time. If the
  8383. * mailbox is issued in polling mode and there is already a mailbox
  8384. * pending, then the function will return an error. If the mailbox is issued
  8385. * in NO_WAIT mode and there is a mailbox pending already, the function
  8386. * will return MBX_BUSY after queuing the mailbox into mailbox queue.
  8387. * The sli layer owns the mailbox object until the completion of mailbox
  8388. * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
  8389. * return codes the caller owns the mailbox command after the return of
  8390. * the function.
  8391. **/
  8392. static int
  8393. lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
  8394. uint32_t flag)
  8395. {
  8396. MAILBOX_t *mbx;
  8397. struct lpfc_sli *psli = &phba->sli;
  8398. uint32_t status, evtctr;
  8399. uint32_t ha_copy, hc_copy;
  8400. int i;
  8401. unsigned long timeout;
  8402. unsigned long drvr_flag = 0;
  8403. uint32_t word0, ldata;
  8404. void __iomem *to_slim;
  8405. int processing_queue = 0;
  8406. spin_lock_irqsave(&phba->hbalock, drvr_flag);
  8407. if (!pmbox) {
  8408. phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  8409. /* processing mbox queue from intr_handler */
  8410. if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
  8411. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  8412. return MBX_SUCCESS;
  8413. }
  8414. processing_queue = 1;
  8415. pmbox = lpfc_mbox_get(phba);
  8416. if (!pmbox) {
  8417. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  8418. return MBX_SUCCESS;
  8419. }
  8420. }
  8421. if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
  8422. pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
  8423. if(!pmbox->vport) {
  8424. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  8425. lpfc_printf_log(phba, KERN_ERR,
  8426. LOG_MBOX | LOG_VPORT,
  8427. "1806 Mbox x%x failed. No vport\n",
  8428. pmbox->u.mb.mbxCommand);
  8429. dump_stack();
  8430. goto out_not_finished;
  8431. }
  8432. }
  8433. /* If the PCI channel is in offline state, do not post mbox. */
  8434. if (unlikely(pci_channel_offline(phba->pcidev))) {
  8435. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  8436. goto out_not_finished;
  8437. }
  8438. /* If HBA has a deferred error attention, fail the iocb. */
  8439. if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
  8440. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  8441. goto out_not_finished;
  8442. }
  8443. psli = &phba->sli;
  8444. mbx = &pmbox->u.mb;
  8445. status = MBX_SUCCESS;
  8446. if (phba->link_state == LPFC_HBA_ERROR) {
  8447. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  8448. /* Mbox command <mbxCommand> cannot issue */
  8449. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  8450. "(%d):0311 Mailbox command x%x cannot "
  8451. "issue Data: x%x x%x\n",
  8452. pmbox->vport ? pmbox->vport->vpi : 0,
  8453. pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
  8454. goto out_not_finished;
  8455. }
  8456. if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
  8457. if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
  8458. !(hc_copy & HC_MBINT_ENA)) {
  8459. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  8460. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  8461. "(%d):2528 Mailbox command x%x cannot "
  8462. "issue Data: x%x x%x\n",
  8463. pmbox->vport ? pmbox->vport->vpi : 0,
  8464. pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
  8465. goto out_not_finished;
  8466. }
  8467. }
  8468. if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
  8469. /* Polling for a mbox command when another one is already active
  8470. * is not allowed in SLI. Also, the driver must have established
  8471. * SLI2 mode to queue and process multiple mbox commands.
  8472. */
  8473. if (flag & MBX_POLL) {
  8474. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  8475. /* Mbox command <mbxCommand> cannot issue */
  8476. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  8477. "(%d):2529 Mailbox command x%x "
  8478. "cannot issue Data: x%x x%x\n",
  8479. pmbox->vport ? pmbox->vport->vpi : 0,
  8480. pmbox->u.mb.mbxCommand,
  8481. psli->sli_flag, flag);
  8482. goto out_not_finished;
  8483. }
  8484. if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
  8485. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  8486. /* Mbox command <mbxCommand> cannot issue */
  8487. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  8488. "(%d):2530 Mailbox command x%x "
  8489. "cannot issue Data: x%x x%x\n",
  8490. pmbox->vport ? pmbox->vport->vpi : 0,
  8491. pmbox->u.mb.mbxCommand,
  8492. psli->sli_flag, flag);
  8493. goto out_not_finished;
  8494. }
  8495. /* Another mailbox command is still being processed, queue this
  8496. * command to be processed later.
  8497. */
  8498. lpfc_mbox_put(phba, pmbox);
  8499. /* Mbox cmd issue - BUSY */
  8500. lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  8501. "(%d):0308 Mbox cmd issue - BUSY Data: "
  8502. "x%x x%x x%x x%x\n",
  8503. pmbox->vport ? pmbox->vport->vpi : 0xffffff,
  8504. mbx->mbxCommand,
  8505. phba->pport ? phba->pport->port_state : 0xff,
  8506. psli->sli_flag, flag);
  8507. psli->slistat.mbox_busy++;
  8508. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  8509. if (pmbox->vport) {
  8510. lpfc_debugfs_disc_trc(pmbox->vport,
  8511. LPFC_DISC_TRC_MBOX_VPORT,
  8512. "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
  8513. (uint32_t)mbx->mbxCommand,
  8514. mbx->un.varWords[0], mbx->un.varWords[1]);
  8515. }
  8516. else {
  8517. lpfc_debugfs_disc_trc(phba->pport,
  8518. LPFC_DISC_TRC_MBOX,
  8519. "MBOX Bsy: cmd:x%x mb:x%x x%x",
  8520. (uint32_t)mbx->mbxCommand,
  8521. mbx->un.varWords[0], mbx->un.varWords[1]);
  8522. }
  8523. return MBX_BUSY;
  8524. }
  8525. psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
  8526. /* If we are not polling, we MUST be in SLI2 mode */
  8527. if (flag != MBX_POLL) {
  8528. if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
  8529. (mbx->mbxCommand != MBX_KILL_BOARD)) {
  8530. psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  8531. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  8532. /* Mbox command <mbxCommand> cannot issue */
  8533. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  8534. "(%d):2531 Mailbox command x%x "
  8535. "cannot issue Data: x%x x%x\n",
  8536. pmbox->vport ? pmbox->vport->vpi : 0,
  8537. pmbox->u.mb.mbxCommand,
  8538. psli->sli_flag, flag);
  8539. goto out_not_finished;
  8540. }
  8541. /* timeout active mbox command */
  8542. timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
  8543. 1000);
  8544. mod_timer(&psli->mbox_tmo, jiffies + timeout);
  8545. }
  8546. /* Mailbox cmd <cmd> issue */
  8547. lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  8548. "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
  8549. "x%x\n",
  8550. pmbox->vport ? pmbox->vport->vpi : 0,
  8551. mbx->mbxCommand,
  8552. phba->pport ? phba->pport->port_state : 0xff,
  8553. psli->sli_flag, flag);
  8554. if (mbx->mbxCommand != MBX_HEARTBEAT) {
  8555. if (pmbox->vport) {
  8556. lpfc_debugfs_disc_trc(pmbox->vport,
  8557. LPFC_DISC_TRC_MBOX_VPORT,
  8558. "MBOX Send vport: cmd:x%x mb:x%x x%x",
  8559. (uint32_t)mbx->mbxCommand,
  8560. mbx->un.varWords[0], mbx->un.varWords[1]);
  8561. }
  8562. else {
  8563. lpfc_debugfs_disc_trc(phba->pport,
  8564. LPFC_DISC_TRC_MBOX,
  8565. "MBOX Send: cmd:x%x mb:x%x x%x",
  8566. (uint32_t)mbx->mbxCommand,
  8567. mbx->un.varWords[0], mbx->un.varWords[1]);
  8568. }
  8569. }
  8570. psli->slistat.mbox_cmd++;
  8571. evtctr = psli->slistat.mbox_event;
  8572. /* next set own bit for the adapter and copy over command word */
  8573. mbx->mbxOwner = OWN_CHIP;
  8574. if (psli->sli_flag & LPFC_SLI_ACTIVE) {
  8575. /* Populate mbox extension offset word. */
  8576. if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
  8577. *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
  8578. = (uint8_t *)phba->mbox_ext
  8579. - (uint8_t *)phba->mbox;
  8580. }
  8581. /* Copy the mailbox extension data */
  8582. if (pmbox->in_ext_byte_len && pmbox->ext_buf) {
  8583. lpfc_sli_pcimem_bcopy(pmbox->ext_buf,
  8584. (uint8_t *)phba->mbox_ext,
  8585. pmbox->in_ext_byte_len);
  8586. }
  8587. /* Copy command data to host SLIM area */
  8588. lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
  8589. } else {
  8590. /* Populate mbox extension offset word. */
  8591. if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
  8592. *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
  8593. = MAILBOX_HBA_EXT_OFFSET;
  8594. /* Copy the mailbox extension data */
  8595. if (pmbox->in_ext_byte_len && pmbox->ext_buf)
  8596. lpfc_memcpy_to_slim(phba->MBslimaddr +
  8597. MAILBOX_HBA_EXT_OFFSET,
  8598. pmbox->ext_buf, pmbox->in_ext_byte_len);
  8599. if (mbx->mbxCommand == MBX_CONFIG_PORT)
  8600. /* copy command data into host mbox for cmpl */
  8601. lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
  8602. MAILBOX_CMD_SIZE);
  8603. /* First copy mbox command data to HBA SLIM, skip past first
  8604. word */
  8605. to_slim = phba->MBslimaddr + sizeof (uint32_t);
  8606. lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
  8607. MAILBOX_CMD_SIZE - sizeof (uint32_t));
  8608. /* Next copy over first word, with mbxOwner set */
  8609. ldata = *((uint32_t *)mbx);
  8610. to_slim = phba->MBslimaddr;
  8611. writel(ldata, to_slim);
  8612. readl(to_slim); /* flush */
  8613. if (mbx->mbxCommand == MBX_CONFIG_PORT)
  8614. /* switch over to host mailbox */
  8615. psli->sli_flag |= LPFC_SLI_ACTIVE;
  8616. }
  8617. wmb();
  8618. switch (flag) {
  8619. case MBX_NOWAIT:
  8620. /* Set up reference to mailbox command */
  8621. psli->mbox_active = pmbox;
  8622. /* Interrupt board to do it */
  8623. writel(CA_MBATT, phba->CAregaddr);
  8624. readl(phba->CAregaddr); /* flush */
  8625. /* Don't wait for it to finish, just return */
  8626. break;
  8627. case MBX_POLL:
  8628. /* Set up null reference to mailbox command */
  8629. psli->mbox_active = NULL;
  8630. /* Interrupt board to do it */
  8631. writel(CA_MBATT, phba->CAregaddr);
  8632. readl(phba->CAregaddr); /* flush */
  8633. if (psli->sli_flag & LPFC_SLI_ACTIVE) {
  8634. /* First read mbox status word */
  8635. word0 = *((uint32_t *)phba->mbox);
  8636. word0 = le32_to_cpu(word0);
  8637. } else {
  8638. /* First read mbox status word */
  8639. if (lpfc_readl(phba->MBslimaddr, &word0)) {
  8640. spin_unlock_irqrestore(&phba->hbalock,
  8641. drvr_flag);
  8642. goto out_not_finished;
  8643. }
  8644. }
  8645. /* Read the HBA Host Attention Register */
  8646. if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
  8647. spin_unlock_irqrestore(&phba->hbalock,
  8648. drvr_flag);
  8649. goto out_not_finished;
  8650. }
  8651. timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
  8652. 1000) + jiffies;
  8653. i = 0;
  8654. /* Wait for command to complete */
  8655. while (((word0 & OWN_CHIP) == OWN_CHIP) ||
  8656. (!(ha_copy & HA_MBATT) &&
  8657. (phba->link_state > LPFC_WARM_START))) {
  8658. if (time_after(jiffies, timeout)) {
  8659. psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  8660. spin_unlock_irqrestore(&phba->hbalock,
  8661. drvr_flag);
  8662. goto out_not_finished;
  8663. }
  8664. /* Check if we took a mbox interrupt while we were
  8665. polling */
  8666. if (((word0 & OWN_CHIP) != OWN_CHIP)
  8667. && (evtctr != psli->slistat.mbox_event))
  8668. break;
  8669. if (i++ > 10) {
  8670. spin_unlock_irqrestore(&phba->hbalock,
  8671. drvr_flag);
  8672. msleep(1);
  8673. spin_lock_irqsave(&phba->hbalock, drvr_flag);
  8674. }
  8675. if (psli->sli_flag & LPFC_SLI_ACTIVE) {
  8676. /* First copy command data */
  8677. word0 = *((uint32_t *)phba->mbox);
  8678. word0 = le32_to_cpu(word0);
  8679. if (mbx->mbxCommand == MBX_CONFIG_PORT) {
  8680. MAILBOX_t *slimmb;
  8681. uint32_t slimword0;
  8682. /* Check real SLIM for any errors */
  8683. slimword0 = readl(phba->MBslimaddr);
  8684. slimmb = (MAILBOX_t *) & slimword0;
  8685. if (((slimword0 & OWN_CHIP) != OWN_CHIP)
  8686. && slimmb->mbxStatus) {
  8687. psli->sli_flag &=
  8688. ~LPFC_SLI_ACTIVE;
  8689. word0 = slimword0;
  8690. }
  8691. }
  8692. } else {
  8693. /* First copy command data */
  8694. word0 = readl(phba->MBslimaddr);
  8695. }
  8696. /* Read the HBA Host Attention Register */
  8697. if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
  8698. spin_unlock_irqrestore(&phba->hbalock,
  8699. drvr_flag);
  8700. goto out_not_finished;
  8701. }
  8702. }
  8703. if (psli->sli_flag & LPFC_SLI_ACTIVE) {
  8704. /* copy results back to user */
  8705. lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
  8706. MAILBOX_CMD_SIZE);
  8707. /* Copy the mailbox extension data */
  8708. if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
  8709. lpfc_sli_pcimem_bcopy(phba->mbox_ext,
  8710. pmbox->ext_buf,
  8711. pmbox->out_ext_byte_len);
  8712. }
  8713. } else {
  8714. /* First copy command data */
  8715. lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
  8716. MAILBOX_CMD_SIZE);
  8717. /* Copy the mailbox extension data */
  8718. if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
  8719. lpfc_memcpy_from_slim(
  8720. pmbox->ext_buf,
  8721. phba->MBslimaddr +
  8722. MAILBOX_HBA_EXT_OFFSET,
  8723. pmbox->out_ext_byte_len);
  8724. }
  8725. }
  8726. writel(HA_MBATT, phba->HAregaddr);
  8727. readl(phba->HAregaddr); /* flush */
  8728. psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  8729. status = mbx->mbxStatus;
  8730. }
  8731. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  8732. return status;
  8733. out_not_finished:
  8734. if (processing_queue) {
  8735. pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
  8736. lpfc_mbox_cmpl_put(phba, pmbox);
  8737. }
  8738. return MBX_NOT_FINISHED;
  8739. }
  8740. /**
  8741. * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
  8742. * @phba: Pointer to HBA context object.
  8743. *
  8744. * The function blocks the posting of SLI4 asynchronous mailbox commands from
  8745. * the driver internal pending mailbox queue. It will then try to wait out the
  8746. * possible outstanding mailbox command before return.
  8747. *
  8748. * Returns:
  8749. * 0 - the outstanding mailbox command completed; otherwise, the wait for
  8750. * the outstanding mailbox command timed out.
  8751. **/
  8752. static int
  8753. lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
  8754. {
  8755. struct lpfc_sli *psli = &phba->sli;
  8756. LPFC_MBOXQ_t *mboxq;
  8757. int rc = 0;
  8758. unsigned long timeout = 0;
  8759. u32 sli_flag;
  8760. u8 cmd, subsys, opcode;
  8761. /* Mark the asynchronous mailbox command posting as blocked */
  8762. spin_lock_irq(&phba->hbalock);
  8763. psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
  8764. /* Determine how long we might wait for the active mailbox
  8765. * command to be gracefully completed by firmware.
  8766. */
  8767. if (phba->sli.mbox_active)
  8768. timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
  8769. phba->sli.mbox_active) *
  8770. 1000) + jiffies;
  8771. spin_unlock_irq(&phba->hbalock);
  8772. /* Make sure the mailbox is really active */
  8773. if (timeout)
  8774. lpfc_sli4_process_missed_mbox_completions(phba);
  8775. /* Wait for the outstanding mailbox command to complete */
  8776. while (phba->sli.mbox_active) {
  8777. /* Check active mailbox complete status every 2ms */
  8778. msleep(2);
  8779. if (time_after(jiffies, timeout)) {
  8780. /* Timeout, mark the outstanding cmd not complete */
  8781. /* Sanity check sli.mbox_active has not completed or
  8782. * cancelled from another context during last 2ms sleep,
  8783. * so take hbalock to be sure before logging.
  8784. */
  8785. spin_lock_irq(&phba->hbalock);
  8786. if (phba->sli.mbox_active) {
  8787. mboxq = phba->sli.mbox_active;
  8788. cmd = mboxq->u.mb.mbxCommand;
  8789. subsys = lpfc_sli_config_mbox_subsys_get(phba,
  8790. mboxq);
  8791. opcode = lpfc_sli_config_mbox_opcode_get(phba,
  8792. mboxq);
  8793. sli_flag = psli->sli_flag;
  8794. spin_unlock_irq(&phba->hbalock);
  8795. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  8796. "2352 Mailbox command x%x "
  8797. "(x%x/x%x) sli_flag x%x could "
  8798. "not complete\n",
  8799. cmd, subsys, opcode,
  8800. sli_flag);
  8801. } else {
  8802. spin_unlock_irq(&phba->hbalock);
  8803. }
  8804. rc = 1;
  8805. break;
  8806. }
  8807. }
  8808. /* Can not cleanly block async mailbox command, fails it */
  8809. if (rc) {
  8810. spin_lock_irq(&phba->hbalock);
  8811. psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
  8812. spin_unlock_irq(&phba->hbalock);
  8813. }
  8814. return rc;
  8815. }
  8816. /**
  8817. * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
  8818. * @phba: Pointer to HBA context object.
  8819. *
  8820. * The function unblocks and resume posting of SLI4 asynchronous mailbox
  8821. * commands from the driver internal pending mailbox queue. It makes sure
  8822. * that there is no outstanding mailbox command before resuming posting
  8823. * asynchronous mailbox commands. If, for any reason, there is outstanding
  8824. * mailbox command, it will try to wait it out before resuming asynchronous
  8825. * mailbox command posting.
  8826. **/
  8827. static void
  8828. lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
  8829. {
  8830. struct lpfc_sli *psli = &phba->sli;
  8831. spin_lock_irq(&phba->hbalock);
  8832. if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
  8833. /* Asynchronous mailbox posting is not blocked, do nothing */
  8834. spin_unlock_irq(&phba->hbalock);
  8835. return;
  8836. }
  8837. /* Outstanding synchronous mailbox command is guaranteed to be done,
  8838. * successful or timeout, after timing-out the outstanding mailbox
  8839. * command shall always be removed, so just unblock posting async
  8840. * mailbox command and resume
  8841. */
  8842. psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
  8843. spin_unlock_irq(&phba->hbalock);
  8844. /* wake up worker thread to post asynchronous mailbox command */
  8845. lpfc_worker_wake_up(phba);
  8846. }
  8847. /**
  8848. * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
  8849. * @phba: Pointer to HBA context object.
  8850. * @mboxq: Pointer to mailbox object.
  8851. *
  8852. * The function waits for the bootstrap mailbox register ready bit from
  8853. * port for twice the regular mailbox command timeout value.
  8854. *
  8855. * 0 - no timeout on waiting for bootstrap mailbox register ready.
  8856. * MBXERR_ERROR - wait for bootstrap mailbox register timed out or port
  8857. * is in an unrecoverable state.
  8858. **/
  8859. static int
  8860. lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
  8861. {
  8862. uint32_t db_ready;
  8863. unsigned long timeout;
  8864. struct lpfc_register bmbx_reg;
  8865. struct lpfc_register portstat_reg = {-1};
  8866. /* Sanity check - there is no point to wait if the port is in an
  8867. * unrecoverable state.
  8868. */
  8869. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
  8870. LPFC_SLI_INTF_IF_TYPE_2) {
  8871. if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
  8872. &portstat_reg.word0) ||
  8873. lpfc_sli4_unrecoverable_port(&portstat_reg)) {
  8874. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8875. "3858 Skipping bmbx ready because "
  8876. "Port Status x%x\n",
  8877. portstat_reg.word0);
  8878. return MBXERR_ERROR;
  8879. }
  8880. }
  8881. timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
  8882. * 1000) + jiffies;
  8883. do {
  8884. bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
  8885. db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
  8886. if (!db_ready)
  8887. mdelay(2);
  8888. if (time_after(jiffies, timeout))
  8889. return MBXERR_ERROR;
  8890. } while (!db_ready);
  8891. return 0;
  8892. }
  8893. /**
  8894. * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
  8895. * @phba: Pointer to HBA context object.
  8896. * @mboxq: Pointer to mailbox object.
  8897. *
  8898. * The function posts a mailbox to the port. The mailbox is expected
  8899. * to be comletely filled in and ready for the port to operate on it.
  8900. * This routine executes a synchronous completion operation on the
  8901. * mailbox by polling for its completion.
  8902. *
  8903. * The caller must not be holding any locks when calling this routine.
  8904. *
  8905. * Returns:
  8906. * MBX_SUCCESS - mailbox posted successfully
  8907. * Any of the MBX error values.
  8908. **/
  8909. static int
  8910. lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
  8911. {
  8912. int rc = MBX_SUCCESS;
  8913. unsigned long iflag;
  8914. uint32_t mcqe_status;
  8915. uint32_t mbx_cmnd;
  8916. struct lpfc_sli *psli = &phba->sli;
  8917. struct lpfc_mqe *mb = &mboxq->u.mqe;
  8918. struct lpfc_bmbx_create *mbox_rgn;
  8919. struct dma_address *dma_address;
  8920. /*
  8921. * Only one mailbox can be active to the bootstrap mailbox region
  8922. * at a time and there is no queueing provided.
  8923. */
  8924. spin_lock_irqsave(&phba->hbalock, iflag);
  8925. if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
  8926. spin_unlock_irqrestore(&phba->hbalock, iflag);
  8927. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  8928. "(%d):2532 Mailbox command x%x (x%x/x%x) "
  8929. "cannot issue Data: x%x x%x\n",
  8930. mboxq->vport ? mboxq->vport->vpi : 0,
  8931. mboxq->u.mb.mbxCommand,
  8932. lpfc_sli_config_mbox_subsys_get(phba, mboxq),
  8933. lpfc_sli_config_mbox_opcode_get(phba, mboxq),
  8934. psli->sli_flag, MBX_POLL);
  8935. return MBXERR_ERROR;
  8936. }
  8937. /* The server grabs the token and owns it until release */
  8938. psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
  8939. phba->sli.mbox_active = mboxq;
  8940. spin_unlock_irqrestore(&phba->hbalock, iflag);
  8941. /* wait for bootstrap mbox register for readyness */
  8942. rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
  8943. if (rc)
  8944. goto exit;
  8945. /*
  8946. * Initialize the bootstrap memory region to avoid stale data areas
  8947. * in the mailbox post. Then copy the caller's mailbox contents to
  8948. * the bmbx mailbox region.
  8949. */
  8950. mbx_cmnd = bf_get(lpfc_mqe_command, mb);
  8951. memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
  8952. lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
  8953. sizeof(struct lpfc_mqe));
  8954. /* Post the high mailbox dma address to the port and wait for ready. */
  8955. dma_address = &phba->sli4_hba.bmbx.dma_address;
  8956. writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
  8957. /* wait for bootstrap mbox register for hi-address write done */
  8958. rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
  8959. if (rc)
  8960. goto exit;
  8961. /* Post the low mailbox dma address to the port. */
  8962. writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
  8963. /* wait for bootstrap mbox register for low address write done */
  8964. rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
  8965. if (rc)
  8966. goto exit;
  8967. /*
  8968. * Read the CQ to ensure the mailbox has completed.
  8969. * If so, update the mailbox status so that the upper layers
  8970. * can complete the request normally.
  8971. */
  8972. lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
  8973. sizeof(struct lpfc_mqe));
  8974. mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
  8975. lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
  8976. sizeof(struct lpfc_mcqe));
  8977. mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
  8978. /*
  8979. * When the CQE status indicates a failure and the mailbox status
  8980. * indicates success then copy the CQE status into the mailbox status
  8981. * (and prefix it with x4000).
  8982. */
  8983. if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
  8984. if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
  8985. bf_set(lpfc_mqe_status, mb,
  8986. (LPFC_MBX_ERROR_RANGE | mcqe_status));
  8987. rc = MBXERR_ERROR;
  8988. } else
  8989. lpfc_sli4_swap_str(phba, mboxq);
  8990. lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  8991. "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
  8992. "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
  8993. " x%x x%x CQ: x%x x%x x%x x%x\n",
  8994. mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
  8995. lpfc_sli_config_mbox_subsys_get(phba, mboxq),
  8996. lpfc_sli_config_mbox_opcode_get(phba, mboxq),
  8997. bf_get(lpfc_mqe_status, mb),
  8998. mb->un.mb_words[0], mb->un.mb_words[1],
  8999. mb->un.mb_words[2], mb->un.mb_words[3],
  9000. mb->un.mb_words[4], mb->un.mb_words[5],
  9001. mb->un.mb_words[6], mb->un.mb_words[7],
  9002. mb->un.mb_words[8], mb->un.mb_words[9],
  9003. mb->un.mb_words[10], mb->un.mb_words[11],
  9004. mb->un.mb_words[12], mboxq->mcqe.word0,
  9005. mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
  9006. mboxq->mcqe.trailer);
  9007. exit:
  9008. /* We are holding the token, no needed for lock when release */
  9009. spin_lock_irqsave(&phba->hbalock, iflag);
  9010. psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  9011. phba->sli.mbox_active = NULL;
  9012. spin_unlock_irqrestore(&phba->hbalock, iflag);
  9013. return rc;
  9014. }
  9015. /**
  9016. * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
  9017. * @phba: Pointer to HBA context object.
  9018. * @mboxq: Pointer to mailbox object.
  9019. * @flag: Flag indicating how the mailbox need to be processed.
  9020. *
  9021. * This function is called by discovery code and HBA management code to submit
  9022. * a mailbox command to firmware with SLI-4 interface spec.
  9023. *
  9024. * Return codes the caller owns the mailbox command after the return of the
  9025. * function.
  9026. **/
  9027. static int
  9028. lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
  9029. uint32_t flag)
  9030. {
  9031. struct lpfc_sli *psli = &phba->sli;
  9032. unsigned long iflags;
  9033. int rc;
  9034. /* dump from issue mailbox command if setup */
  9035. lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
  9036. rc = lpfc_mbox_dev_check(phba);
  9037. if (unlikely(rc)) {
  9038. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  9039. "(%d):2544 Mailbox command x%x (x%x/x%x) "
  9040. "cannot issue Data: x%x x%x\n",
  9041. mboxq->vport ? mboxq->vport->vpi : 0,
  9042. mboxq->u.mb.mbxCommand,
  9043. lpfc_sli_config_mbox_subsys_get(phba, mboxq),
  9044. lpfc_sli_config_mbox_opcode_get(phba, mboxq),
  9045. psli->sli_flag, flag);
  9046. goto out_not_finished;
  9047. }
  9048. /* Detect polling mode and jump to a handler */
  9049. if (!phba->sli4_hba.intr_enable) {
  9050. if (flag == MBX_POLL)
  9051. rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
  9052. else
  9053. rc = -EIO;
  9054. if (rc != MBX_SUCCESS)
  9055. lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
  9056. "(%d):2541 Mailbox command x%x "
  9057. "(x%x/x%x) failure: "
  9058. "mqe_sta: x%x mcqe_sta: x%x/x%x "
  9059. "Data: x%x x%x\n",
  9060. mboxq->vport ? mboxq->vport->vpi : 0,
  9061. mboxq->u.mb.mbxCommand,
  9062. lpfc_sli_config_mbox_subsys_get(phba,
  9063. mboxq),
  9064. lpfc_sli_config_mbox_opcode_get(phba,
  9065. mboxq),
  9066. bf_get(lpfc_mqe_status, &mboxq->u.mqe),
  9067. bf_get(lpfc_mcqe_status, &mboxq->mcqe),
  9068. bf_get(lpfc_mcqe_ext_status,
  9069. &mboxq->mcqe),
  9070. psli->sli_flag, flag);
  9071. return rc;
  9072. } else if (flag == MBX_POLL) {
  9073. lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
  9074. "(%d):2542 Try to issue mailbox command "
  9075. "x%x (x%x/x%x) synchronously ahead of async "
  9076. "mailbox command queue: x%x x%x\n",
  9077. mboxq->vport ? mboxq->vport->vpi : 0,
  9078. mboxq->u.mb.mbxCommand,
  9079. lpfc_sli_config_mbox_subsys_get(phba, mboxq),
  9080. lpfc_sli_config_mbox_opcode_get(phba, mboxq),
  9081. psli->sli_flag, flag);
  9082. /* Try to block the asynchronous mailbox posting */
  9083. rc = lpfc_sli4_async_mbox_block(phba);
  9084. if (!rc) {
  9085. /* Successfully blocked, now issue sync mbox cmd */
  9086. rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
  9087. if (rc != MBX_SUCCESS)
  9088. lpfc_printf_log(phba, KERN_WARNING,
  9089. LOG_MBOX | LOG_SLI,
  9090. "(%d):2597 Sync Mailbox command "
  9091. "x%x (x%x/x%x) failure: "
  9092. "mqe_sta: x%x mcqe_sta: x%x/x%x "
  9093. "Data: x%x x%x\n",
  9094. mboxq->vport ? mboxq->vport->vpi : 0,
  9095. mboxq->u.mb.mbxCommand,
  9096. lpfc_sli_config_mbox_subsys_get(phba,
  9097. mboxq),
  9098. lpfc_sli_config_mbox_opcode_get(phba,
  9099. mboxq),
  9100. bf_get(lpfc_mqe_status, &mboxq->u.mqe),
  9101. bf_get(lpfc_mcqe_status, &mboxq->mcqe),
  9102. bf_get(lpfc_mcqe_ext_status,
  9103. &mboxq->mcqe),
  9104. psli->sli_flag, flag);
  9105. /* Unblock the async mailbox posting afterward */
  9106. lpfc_sli4_async_mbox_unblock(phba);
  9107. }
  9108. return rc;
  9109. }
  9110. /* Now, interrupt mode asynchronous mailbox command */
  9111. rc = lpfc_mbox_cmd_check(phba, mboxq);
  9112. if (rc) {
  9113. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  9114. "(%d):2543 Mailbox command x%x (x%x/x%x) "
  9115. "cannot issue Data: x%x x%x\n",
  9116. mboxq->vport ? mboxq->vport->vpi : 0,
  9117. mboxq->u.mb.mbxCommand,
  9118. lpfc_sli_config_mbox_subsys_get(phba, mboxq),
  9119. lpfc_sli_config_mbox_opcode_get(phba, mboxq),
  9120. psli->sli_flag, flag);
  9121. goto out_not_finished;
  9122. }
  9123. /* Put the mailbox command to the driver internal FIFO */
  9124. psli->slistat.mbox_busy++;
  9125. spin_lock_irqsave(&phba->hbalock, iflags);
  9126. lpfc_mbox_put(phba, mboxq);
  9127. spin_unlock_irqrestore(&phba->hbalock, iflags);
  9128. lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  9129. "(%d):0354 Mbox cmd issue - Enqueue Data: "
  9130. "x%x (x%x/x%x) x%x x%x x%x x%x\n",
  9131. mboxq->vport ? mboxq->vport->vpi : 0xffffff,
  9132. bf_get(lpfc_mqe_command, &mboxq->u.mqe),
  9133. lpfc_sli_config_mbox_subsys_get(phba, mboxq),
  9134. lpfc_sli_config_mbox_opcode_get(phba, mboxq),
  9135. mboxq->u.mb.un.varUnregLogin.rpi,
  9136. phba->pport->port_state,
  9137. psli->sli_flag, MBX_NOWAIT);
  9138. /* Wake up worker thread to transport mailbox command from head */
  9139. lpfc_worker_wake_up(phba);
  9140. return MBX_BUSY;
  9141. out_not_finished:
  9142. return MBX_NOT_FINISHED;
  9143. }
  9144. /**
  9145. * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
  9146. * @phba: Pointer to HBA context object.
  9147. *
  9148. * This function is called by worker thread to send a mailbox command to
  9149. * SLI4 HBA firmware.
  9150. *
  9151. **/
  9152. int
  9153. lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
  9154. {
  9155. struct lpfc_sli *psli = &phba->sli;
  9156. LPFC_MBOXQ_t *mboxq;
  9157. int rc = MBX_SUCCESS;
  9158. unsigned long iflags;
  9159. struct lpfc_mqe *mqe;
  9160. uint32_t mbx_cmnd;
  9161. /* Check interrupt mode before post async mailbox command */
  9162. if (unlikely(!phba->sli4_hba.intr_enable))
  9163. return MBX_NOT_FINISHED;
  9164. /* Check for mailbox command service token */
  9165. spin_lock_irqsave(&phba->hbalock, iflags);
  9166. if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
  9167. spin_unlock_irqrestore(&phba->hbalock, iflags);
  9168. return MBX_NOT_FINISHED;
  9169. }
  9170. if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
  9171. spin_unlock_irqrestore(&phba->hbalock, iflags);
  9172. return MBX_NOT_FINISHED;
  9173. }
  9174. if (unlikely(phba->sli.mbox_active)) {
  9175. spin_unlock_irqrestore(&phba->hbalock, iflags);
  9176. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  9177. "0384 There is pending active mailbox cmd\n");
  9178. return MBX_NOT_FINISHED;
  9179. }
  9180. /* Take the mailbox command service token */
  9181. psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
  9182. /* Get the next mailbox command from head of queue */
  9183. mboxq = lpfc_mbox_get(phba);
  9184. /* If no more mailbox command waiting for post, we're done */
  9185. if (!mboxq) {
  9186. psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  9187. spin_unlock_irqrestore(&phba->hbalock, iflags);
  9188. return MBX_SUCCESS;
  9189. }
  9190. phba->sli.mbox_active = mboxq;
  9191. spin_unlock_irqrestore(&phba->hbalock, iflags);
  9192. /* Check device readiness for posting mailbox command */
  9193. rc = lpfc_mbox_dev_check(phba);
  9194. if (unlikely(rc))
  9195. /* Driver clean routine will clean up pending mailbox */
  9196. goto out_not_finished;
  9197. /* Prepare the mbox command to be posted */
  9198. mqe = &mboxq->u.mqe;
  9199. mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
  9200. /* Start timer for the mbox_tmo and log some mailbox post messages */
  9201. mod_timer(&psli->mbox_tmo, (jiffies +
  9202. msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
  9203. lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  9204. "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
  9205. "x%x x%x\n",
  9206. mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
  9207. lpfc_sli_config_mbox_subsys_get(phba, mboxq),
  9208. lpfc_sli_config_mbox_opcode_get(phba, mboxq),
  9209. phba->pport->port_state, psli->sli_flag);
  9210. if (mbx_cmnd != MBX_HEARTBEAT) {
  9211. if (mboxq->vport) {
  9212. lpfc_debugfs_disc_trc(mboxq->vport,
  9213. LPFC_DISC_TRC_MBOX_VPORT,
  9214. "MBOX Send vport: cmd:x%x mb:x%x x%x",
  9215. mbx_cmnd, mqe->un.mb_words[0],
  9216. mqe->un.mb_words[1]);
  9217. } else {
  9218. lpfc_debugfs_disc_trc(phba->pport,
  9219. LPFC_DISC_TRC_MBOX,
  9220. "MBOX Send: cmd:x%x mb:x%x x%x",
  9221. mbx_cmnd, mqe->un.mb_words[0],
  9222. mqe->un.mb_words[1]);
  9223. }
  9224. }
  9225. psli->slistat.mbox_cmd++;
  9226. /* Post the mailbox command to the port */
  9227. rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
  9228. if (rc != MBX_SUCCESS) {
  9229. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  9230. "(%d):2533 Mailbox command x%x (x%x/x%x) "
  9231. "cannot issue Data: x%x x%x\n",
  9232. mboxq->vport ? mboxq->vport->vpi : 0,
  9233. mboxq->u.mb.mbxCommand,
  9234. lpfc_sli_config_mbox_subsys_get(phba, mboxq),
  9235. lpfc_sli_config_mbox_opcode_get(phba, mboxq),
  9236. psli->sli_flag, MBX_NOWAIT);
  9237. goto out_not_finished;
  9238. }
  9239. return rc;
  9240. out_not_finished:
  9241. spin_lock_irqsave(&phba->hbalock, iflags);
  9242. if (phba->sli.mbox_active) {
  9243. mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
  9244. __lpfc_mbox_cmpl_put(phba, mboxq);
  9245. /* Release the token */
  9246. psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  9247. phba->sli.mbox_active = NULL;
  9248. }
  9249. spin_unlock_irqrestore(&phba->hbalock, iflags);
  9250. return MBX_NOT_FINISHED;
  9251. }
  9252. /**
  9253. * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
  9254. * @phba: Pointer to HBA context object.
  9255. * @pmbox: Pointer to mailbox object.
  9256. * @flag: Flag indicating how the mailbox need to be processed.
  9257. *
  9258. * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
  9259. * the API jump table function pointer from the lpfc_hba struct.
  9260. *
  9261. * Return codes the caller owns the mailbox command after the return of the
  9262. * function.
  9263. **/
  9264. int
  9265. lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
  9266. {
  9267. return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
  9268. }
  9269. /**
  9270. * lpfc_mbox_api_table_setup - Set up mbox api function jump table
  9271. * @phba: The hba struct for which this call is being executed.
  9272. * @dev_grp: The HBA PCI-Device group number.
  9273. *
  9274. * This routine sets up the mbox interface API function jump table in @phba
  9275. * struct.
  9276. * Returns: 0 - success, -ENODEV - failure.
  9277. **/
  9278. int
  9279. lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
  9280. {
  9281. switch (dev_grp) {
  9282. case LPFC_PCI_DEV_LP:
  9283. phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
  9284. phba->lpfc_sli_handle_slow_ring_event =
  9285. lpfc_sli_handle_slow_ring_event_s3;
  9286. phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
  9287. phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
  9288. phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
  9289. break;
  9290. case LPFC_PCI_DEV_OC:
  9291. phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
  9292. phba->lpfc_sli_handle_slow_ring_event =
  9293. lpfc_sli_handle_slow_ring_event_s4;
  9294. phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
  9295. phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
  9296. phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
  9297. break;
  9298. default:
  9299. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9300. "1420 Invalid HBA PCI-device group: 0x%x\n",
  9301. dev_grp);
  9302. return -ENODEV;
  9303. }
  9304. return 0;
  9305. }
  9306. /**
  9307. * __lpfc_sli_ringtx_put - Add an iocb to the txq
  9308. * @phba: Pointer to HBA context object.
  9309. * @pring: Pointer to driver SLI ring object.
  9310. * @piocb: Pointer to address of newly added command iocb.
  9311. *
  9312. * This function is called with hbalock held for SLI3 ports or
  9313. * the ring lock held for SLI4 ports to add a command
  9314. * iocb to the txq when SLI layer cannot submit the command iocb
  9315. * to the ring.
  9316. **/
  9317. void
  9318. __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  9319. struct lpfc_iocbq *piocb)
  9320. {
  9321. if (phba->sli_rev == LPFC_SLI_REV4)
  9322. lockdep_assert_held(&pring->ring_lock);
  9323. else
  9324. lockdep_assert_held(&phba->hbalock);
  9325. /* Insert the caller's iocb in the txq tail for later processing. */
  9326. list_add_tail(&piocb->list, &pring->txq);
  9327. }
  9328. /**
  9329. * lpfc_sli_next_iocb - Get the next iocb in the txq
  9330. * @phba: Pointer to HBA context object.
  9331. * @pring: Pointer to driver SLI ring object.
  9332. * @piocb: Pointer to address of newly added command iocb.
  9333. *
  9334. * This function is called with hbalock held before a new
  9335. * iocb is submitted to the firmware. This function checks
  9336. * txq to flush the iocbs in txq to Firmware before
  9337. * submitting new iocbs to the Firmware.
  9338. * If there are iocbs in the txq which need to be submitted
  9339. * to firmware, lpfc_sli_next_iocb returns the first element
  9340. * of the txq after dequeuing it from txq.
  9341. * If there is no iocb in the txq then the function will return
  9342. * *piocb and *piocb is set to NULL. Caller needs to check
  9343. * *piocb to find if there are more commands in the txq.
  9344. **/
  9345. static struct lpfc_iocbq *
  9346. lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  9347. struct lpfc_iocbq **piocb)
  9348. {
  9349. struct lpfc_iocbq * nextiocb;
  9350. lockdep_assert_held(&phba->hbalock);
  9351. nextiocb = lpfc_sli_ringtx_get(phba, pring);
  9352. if (!nextiocb) {
  9353. nextiocb = *piocb;
  9354. *piocb = NULL;
  9355. }
  9356. return nextiocb;
  9357. }
  9358. /**
  9359. * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
  9360. * @phba: Pointer to HBA context object.
  9361. * @ring_number: SLI ring number to issue iocb on.
  9362. * @piocb: Pointer to command iocb.
  9363. * @flag: Flag indicating if this command can be put into txq.
  9364. *
  9365. * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
  9366. * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
  9367. * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
  9368. * flag is turned on, the function returns IOCB_ERROR. When the link is down,
  9369. * this function allows only iocbs for posting buffers. This function finds
  9370. * next available slot in the command ring and posts the command to the
  9371. * available slot and writes the port attention register to request HBA start
  9372. * processing new iocb. If there is no slot available in the ring and
  9373. * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
  9374. * the function returns IOCB_BUSY.
  9375. *
  9376. * This function is called with hbalock held. The function will return success
  9377. * after it successfully submit the iocb to firmware or after adding to the
  9378. * txq.
  9379. **/
  9380. static int
  9381. __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
  9382. struct lpfc_iocbq *piocb, uint32_t flag)
  9383. {
  9384. struct lpfc_iocbq *nextiocb;
  9385. IOCB_t *iocb;
  9386. struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
  9387. lockdep_assert_held(&phba->hbalock);
  9388. if (piocb->cmd_cmpl && (!piocb->vport) &&
  9389. (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
  9390. (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
  9391. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  9392. "1807 IOCB x%x failed. No vport\n",
  9393. piocb->iocb.ulpCommand);
  9394. dump_stack();
  9395. return IOCB_ERROR;
  9396. }
  9397. /* If the PCI channel is in offline state, do not post iocbs. */
  9398. if (unlikely(pci_channel_offline(phba->pcidev)))
  9399. return IOCB_ERROR;
  9400. /* If HBA has a deferred error attention, fail the iocb. */
  9401. if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
  9402. return IOCB_ERROR;
  9403. /*
  9404. * We should never get an IOCB if we are in a < LINK_DOWN state
  9405. */
  9406. if (unlikely(phba->link_state < LPFC_LINK_DOWN))
  9407. return IOCB_ERROR;
  9408. /*
  9409. * Check to see if we are blocking IOCB processing because of a
  9410. * outstanding event.
  9411. */
  9412. if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
  9413. goto iocb_busy;
  9414. if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
  9415. /*
  9416. * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
  9417. * can be issued if the link is not up.
  9418. */
  9419. switch (piocb->iocb.ulpCommand) {
  9420. case CMD_QUE_RING_BUF_CN:
  9421. case CMD_QUE_RING_BUF64_CN:
  9422. /*
  9423. * For IOCBs, like QUE_RING_BUF, that have no rsp ring
  9424. * completion, cmd_cmpl MUST be 0.
  9425. */
  9426. if (piocb->cmd_cmpl)
  9427. piocb->cmd_cmpl = NULL;
  9428. fallthrough;
  9429. case CMD_CREATE_XRI_CR:
  9430. case CMD_CLOSE_XRI_CN:
  9431. case CMD_CLOSE_XRI_CX:
  9432. break;
  9433. default:
  9434. goto iocb_busy;
  9435. }
  9436. /*
  9437. * For FCP commands, we must be in a state where we can process link
  9438. * attention events.
  9439. */
  9440. } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
  9441. !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
  9442. goto iocb_busy;
  9443. }
  9444. while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
  9445. (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
  9446. lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
  9447. if (iocb)
  9448. lpfc_sli_update_ring(phba, pring);
  9449. else
  9450. lpfc_sli_update_full_ring(phba, pring);
  9451. if (!piocb)
  9452. return IOCB_SUCCESS;
  9453. goto out_busy;
  9454. iocb_busy:
  9455. pring->stats.iocb_cmd_delay++;
  9456. out_busy:
  9457. if (!(flag & SLI_IOCB_RET_IOCB)) {
  9458. __lpfc_sli_ringtx_put(phba, pring, piocb);
  9459. return IOCB_SUCCESS;
  9460. }
  9461. return IOCB_BUSY;
  9462. }
  9463. /**
  9464. * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
  9465. * @phba: Pointer to HBA context object.
  9466. * @ring_number: SLI ring number to issue wqe on.
  9467. * @piocb: Pointer to command iocb.
  9468. * @flag: Flag indicating if this command can be put into txq.
  9469. *
  9470. * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
  9471. * send an iocb command to an HBA with SLI-3 interface spec.
  9472. *
  9473. * This function takes the hbalock before invoking the lockless version.
  9474. * The function will return success after it successfully submit the wqe to
  9475. * firmware or after adding to the txq.
  9476. **/
  9477. static int
  9478. __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
  9479. struct lpfc_iocbq *piocb, uint32_t flag)
  9480. {
  9481. unsigned long iflags;
  9482. int rc;
  9483. spin_lock_irqsave(&phba->hbalock, iflags);
  9484. rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
  9485. spin_unlock_irqrestore(&phba->hbalock, iflags);
  9486. return rc;
  9487. }
  9488. /**
  9489. * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
  9490. * @phba: Pointer to HBA context object.
  9491. * @ring_number: SLI ring number to issue wqe on.
  9492. * @piocb: Pointer to command iocb.
  9493. * @flag: Flag indicating if this command can be put into txq.
  9494. *
  9495. * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
  9496. * an wqe command to an HBA with SLI-4 interface spec.
  9497. *
  9498. * This function is a lockless version. The function will return success
  9499. * after it successfully submit the wqe to firmware or after adding to the
  9500. * txq.
  9501. **/
  9502. static int
  9503. __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
  9504. struct lpfc_iocbq *piocb, uint32_t flag)
  9505. {
  9506. struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
  9507. lpfc_prep_embed_io(phba, lpfc_cmd);
  9508. return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
  9509. }
  9510. void
  9511. lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
  9512. {
  9513. struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
  9514. union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
  9515. struct sli4_sge_le *sgl;
  9516. u32 type_size;
  9517. /* 128 byte wqe support here */
  9518. sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl;
  9519. if (phba->fcp_embed_io) {
  9520. struct fcp_cmnd *fcp_cmnd;
  9521. u32 *ptr;
  9522. fcp_cmnd = lpfc_cmd->fcp_cmnd;
  9523. /* Word 0-2 - FCP_CMND */
  9524. type_size = le32_to_cpu(sgl->sge_len);
  9525. type_size |= ULP_BDE64_TYPE_BDE_IMMED;
  9526. wqe->generic.bde.tus.w = type_size;
  9527. wqe->generic.bde.addrHigh = 0;
  9528. wqe->generic.bde.addrLow = 72; /* Word 18 */
  9529. bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
  9530. bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
  9531. /* Word 18-29 FCP CMND Payload */
  9532. ptr = &wqe->words[18];
  9533. lpfc_sli_pcimem_bcopy(fcp_cmnd, ptr, le32_to_cpu(sgl->sge_len));
  9534. } else {
  9535. /* Word 0-2 - Inline BDE */
  9536. wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  9537. wqe->generic.bde.tus.f.bdeSize = le32_to_cpu(sgl->sge_len);
  9538. wqe->generic.bde.addrHigh = le32_to_cpu(sgl->addr_hi);
  9539. wqe->generic.bde.addrLow = le32_to_cpu(sgl->addr_lo);
  9540. /* Word 10 */
  9541. bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
  9542. bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
  9543. }
  9544. /* add the VMID tags as per switch response */
  9545. if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
  9546. if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
  9547. bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
  9548. bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
  9549. (piocb->vmid_tag.cs_ctl_vmid));
  9550. } else if (phba->cfg_vmid_app_header) {
  9551. bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
  9552. bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
  9553. wqe->words[31] = piocb->vmid_tag.app_id;
  9554. }
  9555. }
  9556. }
  9557. /**
  9558. * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
  9559. * @phba: Pointer to HBA context object.
  9560. * @ring_number: SLI ring number to issue iocb on.
  9561. * @piocb: Pointer to command iocb.
  9562. * @flag: Flag indicating if this command can be put into txq.
  9563. *
  9564. * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
  9565. * an iocb command to an HBA with SLI-4 interface spec.
  9566. *
  9567. * This function is called with ringlock held. The function will return success
  9568. * after it successfully submit the iocb to firmware or after adding to the
  9569. * txq.
  9570. **/
  9571. static int
  9572. __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
  9573. struct lpfc_iocbq *piocb, uint32_t flag)
  9574. {
  9575. struct lpfc_sglq *sglq;
  9576. union lpfc_wqe128 *wqe;
  9577. struct lpfc_queue *wq;
  9578. struct lpfc_sli_ring *pring;
  9579. u32 ulp_command = get_job_cmnd(phba, piocb);
  9580. /* Get the WQ */
  9581. if ((piocb->cmd_flag & LPFC_IO_FCP) ||
  9582. (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
  9583. wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
  9584. } else {
  9585. wq = phba->sli4_hba.els_wq;
  9586. }
  9587. /* Get corresponding ring */
  9588. pring = wq->pring;
  9589. /*
  9590. * The WQE can be either 64 or 128 bytes,
  9591. */
  9592. lockdep_assert_held(&pring->ring_lock);
  9593. wqe = &piocb->wqe;
  9594. if (piocb->sli4_xritag == NO_XRI) {
  9595. if (ulp_command == CMD_ABORT_XRI_CX)
  9596. sglq = NULL;
  9597. else {
  9598. sglq = __lpfc_sli_get_els_sglq(phba, piocb);
  9599. if (!sglq) {
  9600. if (!(flag & SLI_IOCB_RET_IOCB)) {
  9601. __lpfc_sli_ringtx_put(phba,
  9602. pring,
  9603. piocb);
  9604. return IOCB_SUCCESS;
  9605. } else {
  9606. return IOCB_BUSY;
  9607. }
  9608. }
  9609. }
  9610. } else if (piocb->cmd_flag & LPFC_IO_FCP) {
  9611. /* These IO's already have an XRI and a mapped sgl. */
  9612. sglq = NULL;
  9613. }
  9614. else {
  9615. /*
  9616. * This is a continuation of a commandi,(CX) so this
  9617. * sglq is on the active list
  9618. */
  9619. sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
  9620. if (!sglq)
  9621. return IOCB_ERROR;
  9622. }
  9623. if (sglq) {
  9624. piocb->sli4_lxritag = sglq->sli4_lxritag;
  9625. piocb->sli4_xritag = sglq->sli4_xritag;
  9626. /* ABTS sent by initiator to CT exchange, the
  9627. * RX_ID field will be filled with the newly
  9628. * allocated responder XRI.
  9629. */
  9630. if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
  9631. piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
  9632. bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
  9633. piocb->sli4_xritag);
  9634. bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
  9635. piocb->sli4_xritag);
  9636. if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
  9637. return IOCB_ERROR;
  9638. }
  9639. if (lpfc_sli4_wq_put(wq, wqe))
  9640. return IOCB_ERROR;
  9641. lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
  9642. return 0;
  9643. }
  9644. /*
  9645. * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
  9646. *
  9647. * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
  9648. * or IOCB for sli-3 function.
  9649. * pointer from the lpfc_hba struct.
  9650. *
  9651. * Return codes:
  9652. * IOCB_ERROR - Error
  9653. * IOCB_SUCCESS - Success
  9654. * IOCB_BUSY - Busy
  9655. **/
  9656. int
  9657. lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
  9658. struct lpfc_iocbq *piocb, uint32_t flag)
  9659. {
  9660. return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
  9661. }
  9662. /*
  9663. * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
  9664. *
  9665. * This routine wraps the actual lockless version for issusing IOCB function
  9666. * pointer from the lpfc_hba struct.
  9667. *
  9668. * Return codes:
  9669. * IOCB_ERROR - Error
  9670. * IOCB_SUCCESS - Success
  9671. * IOCB_BUSY - Busy
  9672. **/
  9673. int
  9674. __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
  9675. struct lpfc_iocbq *piocb, uint32_t flag)
  9676. {
  9677. return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
  9678. }
  9679. static void
  9680. __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
  9681. struct lpfc_vport *vport,
  9682. struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
  9683. u32 elscmd, u8 tmo, u8 expect_rsp)
  9684. {
  9685. struct lpfc_hba *phba = vport->phba;
  9686. IOCB_t *cmd;
  9687. cmd = &cmdiocbq->iocb;
  9688. memset(cmd, 0, sizeof(*cmd));
  9689. cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  9690. cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
  9691. cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  9692. if (expect_rsp) {
  9693. cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
  9694. cmd->un.elsreq64.remoteID = did; /* DID */
  9695. cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
  9696. cmd->ulpTimeout = tmo;
  9697. } else {
  9698. cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
  9699. cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
  9700. cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
  9701. cmd->ulpPU = PARM_NPIV_DID;
  9702. }
  9703. cmd->ulpBdeCount = 1;
  9704. cmd->ulpLe = 1;
  9705. cmd->ulpClass = CLASS3;
  9706. /* If we have NPIV enabled, we want to send ELS traffic by VPI. */
  9707. if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
  9708. if (expect_rsp) {
  9709. cmd->un.elsreq64.myID = vport->fc_myDID;
  9710. /* For ELS_REQUEST64_CR, use the VPI by default */
  9711. cmd->ulpContext = phba->vpi_ids[vport->vpi];
  9712. }
  9713. cmd->ulpCt_h = 0;
  9714. /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
  9715. if (elscmd == ELS_CMD_ECHO)
  9716. cmd->ulpCt_l = 0; /* context = invalid RPI */
  9717. else
  9718. cmd->ulpCt_l = 1; /* context = VPI */
  9719. }
  9720. }
  9721. static void
  9722. __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
  9723. struct lpfc_vport *vport,
  9724. struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
  9725. u32 elscmd, u8 tmo, u8 expect_rsp)
  9726. {
  9727. struct lpfc_hba *phba = vport->phba;
  9728. union lpfc_wqe128 *wqe;
  9729. struct ulp_bde64_le *bde;
  9730. u8 els_id;
  9731. wqe = &cmdiocbq->wqe;
  9732. memset(wqe, 0, sizeof(*wqe));
  9733. /* Word 0 - 2 BDE */
  9734. bde = (struct ulp_bde64_le *)&wqe->generic.bde;
  9735. bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
  9736. bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
  9737. bde->type_size = cpu_to_le32(cmd_size);
  9738. bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
  9739. if (expect_rsp) {
  9740. bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
  9741. /* Transfer length */
  9742. wqe->els_req.payload_len = cmd_size;
  9743. wqe->els_req.max_response_payload_len = FCELSSIZE;
  9744. /* DID */
  9745. bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
  9746. /* Word 11 - ELS_ID */
  9747. switch (elscmd) {
  9748. case ELS_CMD_PLOGI:
  9749. els_id = LPFC_ELS_ID_PLOGI;
  9750. break;
  9751. case ELS_CMD_FLOGI:
  9752. els_id = LPFC_ELS_ID_FLOGI;
  9753. break;
  9754. case ELS_CMD_LOGO:
  9755. els_id = LPFC_ELS_ID_LOGO;
  9756. break;
  9757. case ELS_CMD_FDISC:
  9758. if (!vport->fc_myDID) {
  9759. els_id = LPFC_ELS_ID_FDISC;
  9760. break;
  9761. }
  9762. fallthrough;
  9763. default:
  9764. els_id = LPFC_ELS_ID_DEFAULT;
  9765. break;
  9766. }
  9767. bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
  9768. } else {
  9769. /* DID */
  9770. bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
  9771. /* Transfer length */
  9772. wqe->xmit_els_rsp.response_payload_len = cmd_size;
  9773. bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
  9774. CMD_XMIT_ELS_RSP64_WQE);
  9775. }
  9776. bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
  9777. bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
  9778. bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
  9779. /* If we have NPIV enabled, we want to send ELS traffic by VPI.
  9780. * For SLI4, since the driver controls VPIs we also want to include
  9781. * all ELS pt2pt protocol traffic as well.
  9782. */
  9783. if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
  9784. test_bit(FC_PT2PT, &vport->fc_flag)) {
  9785. if (expect_rsp) {
  9786. bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
  9787. /* For ELS_REQUEST64_WQE, use the VPI by default */
  9788. bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
  9789. phba->vpi_ids[vport->vpi]);
  9790. }
  9791. /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
  9792. if (elscmd == ELS_CMD_ECHO)
  9793. bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
  9794. else
  9795. bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
  9796. }
  9797. }
  9798. void
  9799. lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
  9800. struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
  9801. u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
  9802. u8 expect_rsp)
  9803. {
  9804. phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
  9805. elscmd, tmo, expect_rsp);
  9806. }
  9807. static void
  9808. __lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
  9809. u16 rpi, u32 num_entry, u8 tmo)
  9810. {
  9811. IOCB_t *cmd;
  9812. cmd = &cmdiocbq->iocb;
  9813. memset(cmd, 0, sizeof(*cmd));
  9814. cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  9815. cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
  9816. cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  9817. cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
  9818. cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
  9819. cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
  9820. cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
  9821. cmd->ulpContext = rpi;
  9822. cmd->ulpClass = CLASS3;
  9823. cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
  9824. cmd->ulpBdeCount = 1;
  9825. cmd->ulpLe = 1;
  9826. cmd->ulpOwner = OWN_CHIP;
  9827. cmd->ulpTimeout = tmo;
  9828. }
  9829. static void
  9830. __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
  9831. u16 rpi, u32 num_entry, u8 tmo)
  9832. {
  9833. union lpfc_wqe128 *cmdwqe;
  9834. struct ulp_bde64_le *bde, *bpl;
  9835. u32 xmit_len = 0, total_len = 0, size, type, i;
  9836. cmdwqe = &cmdiocbq->wqe;
  9837. memset(cmdwqe, 0, sizeof(*cmdwqe));
  9838. /* Calculate total_len and xmit_len */
  9839. bpl = (struct ulp_bde64_le *)bmp->virt;
  9840. for (i = 0; i < num_entry; i++) {
  9841. size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
  9842. total_len += size;
  9843. }
  9844. for (i = 0; i < num_entry; i++) {
  9845. size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
  9846. type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
  9847. if (type != ULP_BDE64_TYPE_BDE_64)
  9848. break;
  9849. xmit_len += size;
  9850. }
  9851. /* Words 0 - 2 */
  9852. bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
  9853. bde->addr_low = bpl->addr_low;
  9854. bde->addr_high = bpl->addr_high;
  9855. bde->type_size = cpu_to_le32(xmit_len);
  9856. bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
  9857. /* Word 3 */
  9858. cmdwqe->gen_req.request_payload_len = xmit_len;
  9859. /* Word 5 */
  9860. bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
  9861. bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
  9862. bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
  9863. bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
  9864. /* Word 6 */
  9865. bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
  9866. /* Word 7 */
  9867. bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
  9868. bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
  9869. bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
  9870. bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
  9871. /* Word 12 */
  9872. cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
  9873. }
  9874. void
  9875. lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
  9876. struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
  9877. {
  9878. phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
  9879. }
  9880. static void
  9881. __lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
  9882. struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
  9883. u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
  9884. {
  9885. IOCB_t *icmd;
  9886. icmd = &cmdiocbq->iocb;
  9887. memset(icmd, 0, sizeof(*icmd));
  9888. icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  9889. icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
  9890. icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  9891. icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
  9892. icmd->un.xseq64.w5.hcsw.Fctl = LA;
  9893. if (last_seq)
  9894. icmd->un.xseq64.w5.hcsw.Fctl |= LS;
  9895. icmd->un.xseq64.w5.hcsw.Dfctl = 0;
  9896. icmd->un.xseq64.w5.hcsw.Rctl = rctl;
  9897. icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
  9898. icmd->ulpBdeCount = 1;
  9899. icmd->ulpLe = 1;
  9900. icmd->ulpClass = CLASS3;
  9901. switch (cr_cx_cmd) {
  9902. case CMD_XMIT_SEQUENCE64_CR:
  9903. icmd->ulpContext = rpi;
  9904. icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
  9905. break;
  9906. case CMD_XMIT_SEQUENCE64_CX:
  9907. icmd->ulpContext = ox_id;
  9908. icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
  9909. break;
  9910. default:
  9911. break;
  9912. }
  9913. }
  9914. static void
  9915. __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
  9916. struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
  9917. u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
  9918. {
  9919. union lpfc_wqe128 *wqe;
  9920. struct ulp_bde64 *bpl;
  9921. wqe = &cmdiocbq->wqe;
  9922. memset(wqe, 0, sizeof(*wqe));
  9923. /* Words 0 - 2 */
  9924. bpl = (struct ulp_bde64 *)bmp->virt;
  9925. wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
  9926. wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
  9927. wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
  9928. /* Word 5 */
  9929. bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
  9930. bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
  9931. bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
  9932. bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
  9933. bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
  9934. /* Word 6 */
  9935. bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
  9936. bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
  9937. CMD_XMIT_SEQUENCE64_WQE);
  9938. /* Word 7 */
  9939. bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
  9940. /* Word 9 */
  9941. bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
  9942. if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) {
  9943. /* Word 10 */
  9944. if (cmdiocbq->cmd_flag & LPFC_IO_VMID) {
  9945. bf_set(wqe_appid, &wqe->xmit_sequence.wqe_com, 1);
  9946. bf_set(wqe_wqes, &wqe->xmit_sequence.wqe_com, 1);
  9947. wqe->words[31] = LOOPBACK_SRC_APPID;
  9948. }
  9949. /* Word 12 */
  9950. wqe->xmit_sequence.xmit_len = full_size;
  9951. }
  9952. else
  9953. wqe->xmit_sequence.xmit_len =
  9954. wqe->xmit_sequence.bde.tus.f.bdeSize;
  9955. }
  9956. void
  9957. lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
  9958. struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
  9959. u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
  9960. {
  9961. phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
  9962. rctl, last_seq, cr_cx_cmd);
  9963. }
  9964. static void
  9965. __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
  9966. u16 iotag, u8 ulp_class, u16 cqid, bool ia,
  9967. bool wqec)
  9968. {
  9969. IOCB_t *icmd = NULL;
  9970. icmd = &cmdiocbq->iocb;
  9971. memset(icmd, 0, sizeof(*icmd));
  9972. /* Word 5 */
  9973. icmd->un.acxri.abortContextTag = ulp_context;
  9974. icmd->un.acxri.abortIoTag = iotag;
  9975. if (ia) {
  9976. /* Word 7 */
  9977. icmd->ulpCommand = CMD_CLOSE_XRI_CN;
  9978. } else {
  9979. /* Word 3 */
  9980. icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
  9981. /* Word 7 */
  9982. icmd->ulpClass = ulp_class;
  9983. icmd->ulpCommand = CMD_ABORT_XRI_CN;
  9984. }
  9985. /* Word 7 */
  9986. icmd->ulpLe = 1;
  9987. }
  9988. static void
  9989. __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
  9990. u16 iotag, u8 ulp_class, u16 cqid, bool ia,
  9991. bool wqec)
  9992. {
  9993. union lpfc_wqe128 *wqe;
  9994. wqe = &cmdiocbq->wqe;
  9995. memset(wqe, 0, sizeof(*wqe));
  9996. /* Word 3 */
  9997. bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
  9998. if (ia)
  9999. bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
  10000. else
  10001. bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
  10002. /* Word 7 */
  10003. bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
  10004. /* Word 8 */
  10005. wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
  10006. /* Word 9 */
  10007. bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
  10008. /* Word 10 */
  10009. bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
  10010. /* Word 11 */
  10011. if (wqec)
  10012. bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
  10013. bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
  10014. bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
  10015. }
  10016. void
  10017. lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
  10018. u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
  10019. bool ia, bool wqec)
  10020. {
  10021. phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
  10022. cqid, ia, wqec);
  10023. }
  10024. /**
  10025. * lpfc_sli_api_table_setup - Set up sli api function jump table
  10026. * @phba: The hba struct for which this call is being executed.
  10027. * @dev_grp: The HBA PCI-Device group number.
  10028. *
  10029. * This routine sets up the SLI interface API function jump table in @phba
  10030. * struct.
  10031. * Returns: 0 - success, -ENODEV - failure.
  10032. **/
  10033. int
  10034. lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
  10035. {
  10036. switch (dev_grp) {
  10037. case LPFC_PCI_DEV_LP:
  10038. phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
  10039. phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
  10040. phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
  10041. phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
  10042. phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
  10043. phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
  10044. phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
  10045. break;
  10046. case LPFC_PCI_DEV_OC:
  10047. phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
  10048. phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
  10049. phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
  10050. phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
  10051. phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
  10052. phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
  10053. phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
  10054. break;
  10055. default:
  10056. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10057. "1419 Invalid HBA PCI-device group: 0x%x\n",
  10058. dev_grp);
  10059. return -ENODEV;
  10060. }
  10061. return 0;
  10062. }
  10063. /**
  10064. * lpfc_sli4_calc_ring - Calculates which ring to use
  10065. * @phba: Pointer to HBA context object.
  10066. * @piocb: Pointer to command iocb.
  10067. *
  10068. * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
  10069. * hba_wqidx, thus we need to calculate the corresponding ring.
  10070. * Since ABORTS must go on the same WQ of the command they are
  10071. * aborting, we use command's hba_wqidx.
  10072. */
  10073. struct lpfc_sli_ring *
  10074. lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
  10075. {
  10076. struct lpfc_io_buf *lpfc_cmd;
  10077. if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
  10078. if (unlikely(!phba->sli4_hba.hdwq))
  10079. return NULL;
  10080. /*
  10081. * for abort iocb hba_wqidx should already
  10082. * be setup based on what work queue we used.
  10083. */
  10084. if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
  10085. lpfc_cmd = piocb->io_buf;
  10086. piocb->hba_wqidx = lpfc_cmd->hdwq_no;
  10087. }
  10088. return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
  10089. } else {
  10090. if (unlikely(!phba->sli4_hba.els_wq))
  10091. return NULL;
  10092. piocb->hba_wqidx = 0;
  10093. return phba->sli4_hba.els_wq->pring;
  10094. }
  10095. }
  10096. inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
  10097. {
  10098. struct lpfc_hba *phba = eq->phba;
  10099. /*
  10100. * Unlocking an irq is one of the entry point to check
  10101. * for re-schedule, but we are good for io submission
  10102. * path as midlayer does a get_cpu to glue us in. Flush
  10103. * out the invalidate queue so we can see the updated
  10104. * value for flag.
  10105. */
  10106. smp_rmb();
  10107. if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
  10108. /* We will not likely get the completion for the caller
  10109. * during this iteration but i guess that's fine.
  10110. * Future io's coming on this eq should be able to
  10111. * pick it up. As for the case of single io's, they
  10112. * will be handled through a sched from polling timer
  10113. * function which is currently triggered every 1msec.
  10114. */
  10115. lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM,
  10116. LPFC_QUEUE_WORK);
  10117. }
  10118. /**
  10119. * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
  10120. * @phba: Pointer to HBA context object.
  10121. * @ring_number: Ring number
  10122. * @piocb: Pointer to command iocb.
  10123. * @flag: Flag indicating if this command can be put into txq.
  10124. *
  10125. * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
  10126. * function. This function gets the hbalock and calls
  10127. * __lpfc_sli_issue_iocb function and will return the error returned
  10128. * by __lpfc_sli_issue_iocb function. This wrapper is used by
  10129. * functions which do not hold hbalock.
  10130. **/
  10131. int
  10132. lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
  10133. struct lpfc_iocbq *piocb, uint32_t flag)
  10134. {
  10135. struct lpfc_sli_ring *pring;
  10136. struct lpfc_queue *eq;
  10137. unsigned long iflags;
  10138. int rc;
  10139. /* If the PCI channel is in offline state, do not post iocbs. */
  10140. if (unlikely(pci_channel_offline(phba->pcidev)))
  10141. return IOCB_ERROR;
  10142. if (phba->sli_rev == LPFC_SLI_REV4) {
  10143. lpfc_sli_prep_wqe(phba, piocb);
  10144. eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
  10145. pring = lpfc_sli4_calc_ring(phba, piocb);
  10146. if (unlikely(pring == NULL))
  10147. return IOCB_ERROR;
  10148. spin_lock_irqsave(&pring->ring_lock, iflags);
  10149. rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
  10150. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  10151. lpfc_sli4_poll_eq(eq);
  10152. } else {
  10153. /* For now, SLI2/3 will still use hbalock */
  10154. spin_lock_irqsave(&phba->hbalock, iflags);
  10155. rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
  10156. spin_unlock_irqrestore(&phba->hbalock, iflags);
  10157. }
  10158. return rc;
  10159. }
  10160. /**
  10161. * lpfc_extra_ring_setup - Extra ring setup function
  10162. * @phba: Pointer to HBA context object.
  10163. *
  10164. * This function is called while driver attaches with the
  10165. * HBA to setup the extra ring. The extra ring is used
  10166. * only when driver needs to support target mode functionality
  10167. * or IP over FC functionalities.
  10168. *
  10169. * This function is called with no lock held. SLI3 only.
  10170. **/
  10171. static int
  10172. lpfc_extra_ring_setup( struct lpfc_hba *phba)
  10173. {
  10174. struct lpfc_sli *psli;
  10175. struct lpfc_sli_ring *pring;
  10176. psli = &phba->sli;
  10177. /* Adjust cmd/rsp ring iocb entries more evenly */
  10178. /* Take some away from the FCP ring */
  10179. pring = &psli->sli3_ring[LPFC_FCP_RING];
  10180. pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
  10181. pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
  10182. pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
  10183. pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
  10184. /* and give them to the extra ring */
  10185. pring = &psli->sli3_ring[LPFC_EXTRA_RING];
  10186. pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
  10187. pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
  10188. pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
  10189. pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
  10190. /* Setup default profile for this ring */
  10191. pring->iotag_max = 4096;
  10192. pring->num_mask = 1;
  10193. pring->prt[0].profile = 0; /* Mask 0 */
  10194. pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
  10195. pring->prt[0].type = phba->cfg_multi_ring_type;
  10196. pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
  10197. return 0;
  10198. }
  10199. static void
  10200. lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
  10201. struct lpfc_nodelist *ndlp)
  10202. {
  10203. unsigned long iflags;
  10204. struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
  10205. /* Hold a node reference for outstanding queued work */
  10206. if (!lpfc_nlp_get(ndlp))
  10207. return;
  10208. spin_lock_irqsave(&phba->hbalock, iflags);
  10209. if (!list_empty(&evtp->evt_listp)) {
  10210. spin_unlock_irqrestore(&phba->hbalock, iflags);
  10211. lpfc_nlp_put(ndlp);
  10212. return;
  10213. }
  10214. evtp->evt_arg1 = ndlp;
  10215. evtp->evt = LPFC_EVT_RECOVER_PORT;
  10216. list_add_tail(&evtp->evt_listp, &phba->work_list);
  10217. spin_unlock_irqrestore(&phba->hbalock, iflags);
  10218. lpfc_worker_wake_up(phba);
  10219. }
  10220. /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
  10221. * @phba: Pointer to HBA context object.
  10222. * @iocbq: Pointer to iocb object.
  10223. *
  10224. * The async_event handler calls this routine when it receives
  10225. * an ASYNC_STATUS_CN event from the port. The port generates
  10226. * this event when an Abort Sequence request to an rport fails
  10227. * twice in succession. The abort could be originated by the
  10228. * driver or by the port. The ABTS could have been for an ELS
  10229. * or FCP IO. The port only generates this event when an ABTS
  10230. * fails to complete after one retry.
  10231. */
  10232. static void
  10233. lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
  10234. struct lpfc_iocbq *iocbq)
  10235. {
  10236. struct lpfc_nodelist *ndlp = NULL;
  10237. uint16_t rpi = 0, vpi = 0;
  10238. struct lpfc_vport *vport = NULL;
  10239. /* The rpi in the ulpContext is vport-sensitive. */
  10240. vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
  10241. rpi = iocbq->iocb.ulpContext;
  10242. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  10243. "3092 Port generated ABTS async event "
  10244. "on vpi %d rpi %d status 0x%x\n",
  10245. vpi, rpi, iocbq->iocb.ulpStatus);
  10246. vport = lpfc_find_vport_by_vpid(phba, vpi);
  10247. if (!vport)
  10248. goto err_exit;
  10249. ndlp = lpfc_findnode_rpi(vport, rpi);
  10250. if (!ndlp)
  10251. goto err_exit;
  10252. if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
  10253. lpfc_sli_abts_recover_port(vport, ndlp);
  10254. return;
  10255. err_exit:
  10256. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  10257. "3095 Event Context not found, no "
  10258. "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
  10259. vpi, rpi, iocbq->iocb.ulpStatus,
  10260. iocbq->iocb.ulpContext);
  10261. }
  10262. /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
  10263. * @phba: pointer to HBA context object.
  10264. * @ndlp: nodelist pointer for the impacted rport.
  10265. * @axri: pointer to the wcqe containing the failed exchange.
  10266. *
  10267. * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
  10268. * port. The port generates this event when an abort exchange request to an
  10269. * rport fails twice in succession with no reply. The abort could be originated
  10270. * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
  10271. */
  10272. void
  10273. lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
  10274. struct lpfc_nodelist *ndlp,
  10275. struct sli4_wcqe_xri_aborted *axri)
  10276. {
  10277. uint32_t ext_status = 0;
  10278. if (!ndlp) {
  10279. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  10280. "3115 Node Context not found, driver "
  10281. "ignoring abts err event\n");
  10282. return;
  10283. }
  10284. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  10285. "3116 Port generated FCP XRI ABORT event on "
  10286. "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
  10287. ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
  10288. bf_get(lpfc_wcqe_xa_xri, axri),
  10289. bf_get(lpfc_wcqe_xa_status, axri),
  10290. axri->parameter);
  10291. /*
  10292. * Catch the ABTS protocol failure case. Older OCe FW releases returned
  10293. * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
  10294. * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
  10295. */
  10296. ext_status = axri->parameter & IOERR_PARAM_MASK;
  10297. if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
  10298. ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
  10299. lpfc_sli_post_recovery_event(phba, ndlp);
  10300. }
  10301. /**
  10302. * lpfc_sli_async_event_handler - ASYNC iocb handler function
  10303. * @phba: Pointer to HBA context object.
  10304. * @pring: Pointer to driver SLI ring object.
  10305. * @iocbq: Pointer to iocb object.
  10306. *
  10307. * This function is called by the slow ring event handler
  10308. * function when there is an ASYNC event iocb in the ring.
  10309. * This function is called with no lock held.
  10310. * Currently this function handles only temperature related
  10311. * ASYNC events. The function decodes the temperature sensor
  10312. * event message and posts events for the management applications.
  10313. **/
  10314. static void
  10315. lpfc_sli_async_event_handler(struct lpfc_hba * phba,
  10316. struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
  10317. {
  10318. IOCB_t *icmd;
  10319. uint16_t evt_code;
  10320. struct temp_event temp_event_data;
  10321. struct Scsi_Host *shost;
  10322. uint32_t *iocb_w;
  10323. icmd = &iocbq->iocb;
  10324. evt_code = icmd->un.asyncstat.evt_code;
  10325. switch (evt_code) {
  10326. case ASYNC_TEMP_WARN:
  10327. case ASYNC_TEMP_SAFE:
  10328. temp_event_data.data = (uint32_t) icmd->ulpContext;
  10329. temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  10330. if (evt_code == ASYNC_TEMP_WARN) {
  10331. temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
  10332. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  10333. "0347 Adapter is very hot, please take "
  10334. "corrective action. temperature : %d Celsius\n",
  10335. (uint32_t) icmd->ulpContext);
  10336. } else {
  10337. temp_event_data.event_code = LPFC_NORMAL_TEMP;
  10338. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  10339. "0340 Adapter temperature is OK now. "
  10340. "temperature : %d Celsius\n",
  10341. (uint32_t) icmd->ulpContext);
  10342. }
  10343. /* Send temperature change event to applications */
  10344. shost = lpfc_shost_from_vport(phba->pport);
  10345. fc_host_post_vendor_event(shost, fc_get_event_number(),
  10346. sizeof(temp_event_data), (char *) &temp_event_data,
  10347. LPFC_NL_VENDOR_ID);
  10348. break;
  10349. case ASYNC_STATUS_CN:
  10350. lpfc_sli_abts_err_handler(phba, iocbq);
  10351. break;
  10352. default:
  10353. iocb_w = (uint32_t *) icmd;
  10354. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  10355. "0346 Ring %d handler: unexpected ASYNC_STATUS"
  10356. " evt_code 0x%x\n"
  10357. "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
  10358. "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
  10359. "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
  10360. "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
  10361. pring->ringno, icmd->un.asyncstat.evt_code,
  10362. iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
  10363. iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
  10364. iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
  10365. iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
  10366. break;
  10367. }
  10368. }
  10369. /**
  10370. * lpfc_sli4_setup - SLI ring setup function
  10371. * @phba: Pointer to HBA context object.
  10372. *
  10373. * lpfc_sli_setup sets up rings of the SLI interface with
  10374. * number of iocbs per ring and iotags. This function is
  10375. * called while driver attach to the HBA and before the
  10376. * interrupts are enabled. So there is no need for locking.
  10377. *
  10378. * This function always returns 0.
  10379. **/
  10380. int
  10381. lpfc_sli4_setup(struct lpfc_hba *phba)
  10382. {
  10383. struct lpfc_sli_ring *pring;
  10384. pring = phba->sli4_hba.els_wq->pring;
  10385. pring->num_mask = LPFC_MAX_RING_MASK;
  10386. pring->prt[0].profile = 0; /* Mask 0 */
  10387. pring->prt[0].rctl = FC_RCTL_ELS_REQ;
  10388. pring->prt[0].type = FC_TYPE_ELS;
  10389. pring->prt[0].lpfc_sli_rcv_unsol_event =
  10390. lpfc_els_unsol_event;
  10391. pring->prt[1].profile = 0; /* Mask 1 */
  10392. pring->prt[1].rctl = FC_RCTL_ELS_REP;
  10393. pring->prt[1].type = FC_TYPE_ELS;
  10394. pring->prt[1].lpfc_sli_rcv_unsol_event =
  10395. lpfc_els_unsol_event;
  10396. pring->prt[2].profile = 0; /* Mask 2 */
  10397. /* NameServer Inquiry */
  10398. pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
  10399. /* NameServer */
  10400. pring->prt[2].type = FC_TYPE_CT;
  10401. pring->prt[2].lpfc_sli_rcv_unsol_event =
  10402. lpfc_ct_unsol_event;
  10403. pring->prt[3].profile = 0; /* Mask 3 */
  10404. /* NameServer response */
  10405. pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
  10406. /* NameServer */
  10407. pring->prt[3].type = FC_TYPE_CT;
  10408. pring->prt[3].lpfc_sli_rcv_unsol_event =
  10409. lpfc_ct_unsol_event;
  10410. return 0;
  10411. }
  10412. /**
  10413. * lpfc_sli_setup - SLI ring setup function
  10414. * @phba: Pointer to HBA context object.
  10415. *
  10416. * lpfc_sli_setup sets up rings of the SLI interface with
  10417. * number of iocbs per ring and iotags. This function is
  10418. * called while driver attach to the HBA and before the
  10419. * interrupts are enabled. So there is no need for locking.
  10420. *
  10421. * This function always returns 0. SLI3 only.
  10422. **/
  10423. int
  10424. lpfc_sli_setup(struct lpfc_hba *phba)
  10425. {
  10426. int i, totiocbsize = 0;
  10427. struct lpfc_sli *psli = &phba->sli;
  10428. struct lpfc_sli_ring *pring;
  10429. psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
  10430. psli->sli_flag = 0;
  10431. psli->iocbq_lookup = NULL;
  10432. psli->iocbq_lookup_len = 0;
  10433. psli->last_iotag = 0;
  10434. for (i = 0; i < psli->num_rings; i++) {
  10435. pring = &psli->sli3_ring[i];
  10436. switch (i) {
  10437. case LPFC_FCP_RING: /* ring 0 - FCP */
  10438. /* numCiocb and numRiocb are used in config_port */
  10439. pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
  10440. pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
  10441. pring->sli.sli3.numCiocb +=
  10442. SLI2_IOCB_CMD_R1XTRA_ENTRIES;
  10443. pring->sli.sli3.numRiocb +=
  10444. SLI2_IOCB_RSP_R1XTRA_ENTRIES;
  10445. pring->sli.sli3.numCiocb +=
  10446. SLI2_IOCB_CMD_R3XTRA_ENTRIES;
  10447. pring->sli.sli3.numRiocb +=
  10448. SLI2_IOCB_RSP_R3XTRA_ENTRIES;
  10449. pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
  10450. SLI3_IOCB_CMD_SIZE :
  10451. SLI2_IOCB_CMD_SIZE;
  10452. pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
  10453. SLI3_IOCB_RSP_SIZE :
  10454. SLI2_IOCB_RSP_SIZE;
  10455. pring->iotag_ctr = 0;
  10456. pring->iotag_max =
  10457. (phba->cfg_hba_queue_depth * 2);
  10458. pring->fast_iotag = pring->iotag_max;
  10459. pring->num_mask = 0;
  10460. break;
  10461. case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
  10462. /* numCiocb and numRiocb are used in config_port */
  10463. pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
  10464. pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
  10465. pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
  10466. SLI3_IOCB_CMD_SIZE :
  10467. SLI2_IOCB_CMD_SIZE;
  10468. pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
  10469. SLI3_IOCB_RSP_SIZE :
  10470. SLI2_IOCB_RSP_SIZE;
  10471. pring->iotag_max = phba->cfg_hba_queue_depth;
  10472. pring->num_mask = 0;
  10473. break;
  10474. case LPFC_ELS_RING: /* ring 2 - ELS / CT */
  10475. /* numCiocb and numRiocb are used in config_port */
  10476. pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
  10477. pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
  10478. pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
  10479. SLI3_IOCB_CMD_SIZE :
  10480. SLI2_IOCB_CMD_SIZE;
  10481. pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
  10482. SLI3_IOCB_RSP_SIZE :
  10483. SLI2_IOCB_RSP_SIZE;
  10484. pring->fast_iotag = 0;
  10485. pring->iotag_ctr = 0;
  10486. pring->iotag_max = 4096;
  10487. pring->lpfc_sli_rcv_async_status =
  10488. lpfc_sli_async_event_handler;
  10489. pring->num_mask = LPFC_MAX_RING_MASK;
  10490. pring->prt[0].profile = 0; /* Mask 0 */
  10491. pring->prt[0].rctl = FC_RCTL_ELS_REQ;
  10492. pring->prt[0].type = FC_TYPE_ELS;
  10493. pring->prt[0].lpfc_sli_rcv_unsol_event =
  10494. lpfc_els_unsol_event;
  10495. pring->prt[1].profile = 0; /* Mask 1 */
  10496. pring->prt[1].rctl = FC_RCTL_ELS_REP;
  10497. pring->prt[1].type = FC_TYPE_ELS;
  10498. pring->prt[1].lpfc_sli_rcv_unsol_event =
  10499. lpfc_els_unsol_event;
  10500. pring->prt[2].profile = 0; /* Mask 2 */
  10501. /* NameServer Inquiry */
  10502. pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
  10503. /* NameServer */
  10504. pring->prt[2].type = FC_TYPE_CT;
  10505. pring->prt[2].lpfc_sli_rcv_unsol_event =
  10506. lpfc_ct_unsol_event;
  10507. pring->prt[3].profile = 0; /* Mask 3 */
  10508. /* NameServer response */
  10509. pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
  10510. /* NameServer */
  10511. pring->prt[3].type = FC_TYPE_CT;
  10512. pring->prt[3].lpfc_sli_rcv_unsol_event =
  10513. lpfc_ct_unsol_event;
  10514. break;
  10515. }
  10516. totiocbsize += (pring->sli.sli3.numCiocb *
  10517. pring->sli.sli3.sizeCiocb) +
  10518. (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
  10519. }
  10520. if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
  10521. /* Too many cmd / rsp ring entries in SLI2 SLIM */
  10522. printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
  10523. "SLI2 SLIM Data: x%x x%lx\n",
  10524. phba->brd_no, totiocbsize,
  10525. (unsigned long) MAX_SLIM_IOCB_SIZE);
  10526. }
  10527. if (phba->cfg_multi_ring_support == 2)
  10528. lpfc_extra_ring_setup(phba);
  10529. return 0;
  10530. }
  10531. /**
  10532. * lpfc_sli4_queue_init - Queue initialization function
  10533. * @phba: Pointer to HBA context object.
  10534. *
  10535. * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
  10536. * ring. This function also initializes ring indices of each ring.
  10537. * This function is called during the initialization of the SLI
  10538. * interface of an HBA.
  10539. * This function is called with no lock held and always returns
  10540. * 1.
  10541. **/
  10542. void
  10543. lpfc_sli4_queue_init(struct lpfc_hba *phba)
  10544. {
  10545. struct lpfc_sli *psli;
  10546. struct lpfc_sli_ring *pring;
  10547. int i;
  10548. psli = &phba->sli;
  10549. spin_lock_irq(&phba->hbalock);
  10550. INIT_LIST_HEAD(&psli->mboxq);
  10551. INIT_LIST_HEAD(&psli->mboxq_cmpl);
  10552. /* Initialize list headers for txq and txcmplq as double linked lists */
  10553. for (i = 0; i < phba->cfg_hdw_queue; i++) {
  10554. pring = phba->sli4_hba.hdwq[i].io_wq->pring;
  10555. pring->flag = 0;
  10556. pring->ringno = LPFC_FCP_RING;
  10557. pring->txcmplq_cnt = 0;
  10558. INIT_LIST_HEAD(&pring->txq);
  10559. INIT_LIST_HEAD(&pring->txcmplq);
  10560. INIT_LIST_HEAD(&pring->iocb_continueq);
  10561. spin_lock_init(&pring->ring_lock);
  10562. }
  10563. pring = phba->sli4_hba.els_wq->pring;
  10564. pring->flag = 0;
  10565. pring->ringno = LPFC_ELS_RING;
  10566. pring->txcmplq_cnt = 0;
  10567. INIT_LIST_HEAD(&pring->txq);
  10568. INIT_LIST_HEAD(&pring->txcmplq);
  10569. INIT_LIST_HEAD(&pring->iocb_continueq);
  10570. spin_lock_init(&pring->ring_lock);
  10571. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  10572. pring = phba->sli4_hba.nvmels_wq->pring;
  10573. pring->flag = 0;
  10574. pring->ringno = LPFC_ELS_RING;
  10575. pring->txcmplq_cnt = 0;
  10576. INIT_LIST_HEAD(&pring->txq);
  10577. INIT_LIST_HEAD(&pring->txcmplq);
  10578. INIT_LIST_HEAD(&pring->iocb_continueq);
  10579. spin_lock_init(&pring->ring_lock);
  10580. }
  10581. spin_unlock_irq(&phba->hbalock);
  10582. }
  10583. /**
  10584. * lpfc_sli_queue_init - Queue initialization function
  10585. * @phba: Pointer to HBA context object.
  10586. *
  10587. * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
  10588. * ring. This function also initializes ring indices of each ring.
  10589. * This function is called during the initialization of the SLI
  10590. * interface of an HBA.
  10591. * This function is called with no lock held and always returns
  10592. * 1.
  10593. **/
  10594. void
  10595. lpfc_sli_queue_init(struct lpfc_hba *phba)
  10596. {
  10597. struct lpfc_sli *psli;
  10598. struct lpfc_sli_ring *pring;
  10599. int i;
  10600. psli = &phba->sli;
  10601. spin_lock_irq(&phba->hbalock);
  10602. INIT_LIST_HEAD(&psli->mboxq);
  10603. INIT_LIST_HEAD(&psli->mboxq_cmpl);
  10604. /* Initialize list headers for txq and txcmplq as double linked lists */
  10605. for (i = 0; i < psli->num_rings; i++) {
  10606. pring = &psli->sli3_ring[i];
  10607. pring->ringno = i;
  10608. pring->sli.sli3.next_cmdidx = 0;
  10609. pring->sli.sli3.local_getidx = 0;
  10610. pring->sli.sli3.cmdidx = 0;
  10611. INIT_LIST_HEAD(&pring->iocb_continueq);
  10612. INIT_LIST_HEAD(&pring->iocb_continue_saveq);
  10613. INIT_LIST_HEAD(&pring->postbufq);
  10614. pring->flag = 0;
  10615. INIT_LIST_HEAD(&pring->txq);
  10616. INIT_LIST_HEAD(&pring->txcmplq);
  10617. spin_lock_init(&pring->ring_lock);
  10618. }
  10619. spin_unlock_irq(&phba->hbalock);
  10620. }
  10621. /**
  10622. * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
  10623. * @phba: Pointer to HBA context object.
  10624. *
  10625. * This routine flushes the mailbox command subsystem. It will unconditionally
  10626. * flush all the mailbox commands in the three possible stages in the mailbox
  10627. * command sub-system: pending mailbox command queue; the outstanding mailbox
  10628. * command; and completed mailbox command queue. It is caller's responsibility
  10629. * to make sure that the driver is in the proper state to flush the mailbox
  10630. * command sub-system. Namely, the posting of mailbox commands into the
  10631. * pending mailbox command queue from the various clients must be stopped;
  10632. * either the HBA is in a state that it will never works on the outstanding
  10633. * mailbox command (such as in EEH or ERATT conditions) or the outstanding
  10634. * mailbox command has been completed.
  10635. **/
  10636. static void
  10637. lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
  10638. {
  10639. LIST_HEAD(completions);
  10640. struct lpfc_sli *psli = &phba->sli;
  10641. LPFC_MBOXQ_t *pmb;
  10642. unsigned long iflag;
  10643. /* Disable softirqs, including timers from obtaining phba->hbalock */
  10644. local_bh_disable();
  10645. /* Flush all the mailbox commands in the mbox system */
  10646. spin_lock_irqsave(&phba->hbalock, iflag);
  10647. /* The pending mailbox command queue */
  10648. list_splice_init(&phba->sli.mboxq, &completions);
  10649. /* The outstanding active mailbox command */
  10650. if (psli->mbox_active) {
  10651. list_add_tail(&psli->mbox_active->list, &completions);
  10652. psli->mbox_active = NULL;
  10653. psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  10654. }
  10655. /* The completed mailbox command queue */
  10656. list_splice_init(&phba->sli.mboxq_cmpl, &completions);
  10657. spin_unlock_irqrestore(&phba->hbalock, iflag);
  10658. /* Enable softirqs again, done with phba->hbalock */
  10659. local_bh_enable();
  10660. /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
  10661. while (!list_empty(&completions)) {
  10662. list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
  10663. pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
  10664. if (pmb->mbox_cmpl)
  10665. pmb->mbox_cmpl(phba, pmb);
  10666. }
  10667. }
  10668. /**
  10669. * lpfc_sli_host_down - Vport cleanup function
  10670. * @vport: Pointer to virtual port object.
  10671. *
  10672. * lpfc_sli_host_down is called to clean up the resources
  10673. * associated with a vport before destroying virtual
  10674. * port data structures.
  10675. * This function does following operations:
  10676. * - Free discovery resources associated with this virtual
  10677. * port.
  10678. * - Free iocbs associated with this virtual port in
  10679. * the txq.
  10680. * - Send abort for all iocb commands associated with this
  10681. * vport in txcmplq.
  10682. *
  10683. * This function is called with no lock held and always returns 1.
  10684. **/
  10685. int
  10686. lpfc_sli_host_down(struct lpfc_vport *vport)
  10687. {
  10688. LIST_HEAD(completions);
  10689. struct lpfc_hba *phba = vport->phba;
  10690. struct lpfc_sli *psli = &phba->sli;
  10691. struct lpfc_queue *qp = NULL;
  10692. struct lpfc_sli_ring *pring;
  10693. struct lpfc_iocbq *iocb, *next_iocb;
  10694. int i;
  10695. unsigned long flags = 0;
  10696. uint16_t prev_pring_flag;
  10697. lpfc_cleanup_discovery_resources(vport);
  10698. spin_lock_irqsave(&phba->hbalock, flags);
  10699. /*
  10700. * Error everything on the txq since these iocbs
  10701. * have not been given to the FW yet.
  10702. * Also issue ABTS for everything on the txcmplq
  10703. */
  10704. if (phba->sli_rev != LPFC_SLI_REV4) {
  10705. for (i = 0; i < psli->num_rings; i++) {
  10706. pring = &psli->sli3_ring[i];
  10707. prev_pring_flag = pring->flag;
  10708. /* Only slow rings */
  10709. if (pring->ringno == LPFC_ELS_RING) {
  10710. pring->flag |= LPFC_DEFERRED_RING_EVENT;
  10711. /* Set the lpfc data pending flag */
  10712. set_bit(LPFC_DATA_READY, &phba->data_flags);
  10713. }
  10714. list_for_each_entry_safe(iocb, next_iocb,
  10715. &pring->txq, list) {
  10716. if (iocb->vport != vport)
  10717. continue;
  10718. list_move_tail(&iocb->list, &completions);
  10719. }
  10720. list_for_each_entry_safe(iocb, next_iocb,
  10721. &pring->txcmplq, list) {
  10722. if (iocb->vport != vport)
  10723. continue;
  10724. lpfc_sli_issue_abort_iotag(phba, pring, iocb,
  10725. NULL);
  10726. }
  10727. pring->flag = prev_pring_flag;
  10728. }
  10729. } else {
  10730. list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
  10731. pring = qp->pring;
  10732. if (!pring)
  10733. continue;
  10734. if (pring == phba->sli4_hba.els_wq->pring) {
  10735. pring->flag |= LPFC_DEFERRED_RING_EVENT;
  10736. /* Set the lpfc data pending flag */
  10737. set_bit(LPFC_DATA_READY, &phba->data_flags);
  10738. }
  10739. prev_pring_flag = pring->flag;
  10740. spin_lock(&pring->ring_lock);
  10741. list_for_each_entry_safe(iocb, next_iocb,
  10742. &pring->txq, list) {
  10743. if (iocb->vport != vport)
  10744. continue;
  10745. list_move_tail(&iocb->list, &completions);
  10746. }
  10747. spin_unlock(&pring->ring_lock);
  10748. list_for_each_entry_safe(iocb, next_iocb,
  10749. &pring->txcmplq, list) {
  10750. if (iocb->vport != vport)
  10751. continue;
  10752. lpfc_sli_issue_abort_iotag(phba, pring, iocb,
  10753. NULL);
  10754. }
  10755. pring->flag = prev_pring_flag;
  10756. }
  10757. }
  10758. spin_unlock_irqrestore(&phba->hbalock, flags);
  10759. /* Make sure HBA is alive */
  10760. lpfc_issue_hb_tmo(phba);
  10761. /* Cancel all the IOCBs from the completions list */
  10762. lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
  10763. IOERR_SLI_DOWN);
  10764. return 1;
  10765. }
  10766. /**
  10767. * lpfc_sli_hba_down - Resource cleanup function for the HBA
  10768. * @phba: Pointer to HBA context object.
  10769. *
  10770. * This function cleans up all iocb, buffers, mailbox commands
  10771. * while shutting down the HBA. This function is called with no
  10772. * lock held and always returns 1.
  10773. * This function does the following to cleanup driver resources:
  10774. * - Free discovery resources for each virtual port
  10775. * - Cleanup any pending fabric iocbs
  10776. * - Iterate through the iocb txq and free each entry
  10777. * in the list.
  10778. * - Free up any buffer posted to the HBA
  10779. * - Free mailbox commands in the mailbox queue.
  10780. **/
  10781. int
  10782. lpfc_sli_hba_down(struct lpfc_hba *phba)
  10783. {
  10784. LIST_HEAD(completions);
  10785. struct lpfc_sli *psli = &phba->sli;
  10786. struct lpfc_queue *qp = NULL;
  10787. struct lpfc_sli_ring *pring;
  10788. struct lpfc_dmabuf *buf_ptr;
  10789. unsigned long flags = 0;
  10790. int i;
  10791. /* Shutdown the mailbox command sub-system */
  10792. lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
  10793. lpfc_hba_down_prep(phba);
  10794. /* Disable softirqs, including timers from obtaining phba->hbalock */
  10795. local_bh_disable();
  10796. lpfc_fabric_abort_hba(phba);
  10797. spin_lock_irqsave(&phba->hbalock, flags);
  10798. /*
  10799. * Error everything on the txq since these iocbs
  10800. * have not been given to the FW yet.
  10801. */
  10802. if (phba->sli_rev != LPFC_SLI_REV4) {
  10803. for (i = 0; i < psli->num_rings; i++) {
  10804. pring = &psli->sli3_ring[i];
  10805. /* Only slow rings */
  10806. if (pring->ringno == LPFC_ELS_RING) {
  10807. pring->flag |= LPFC_DEFERRED_RING_EVENT;
  10808. /* Set the lpfc data pending flag */
  10809. set_bit(LPFC_DATA_READY, &phba->data_flags);
  10810. }
  10811. list_splice_init(&pring->txq, &completions);
  10812. }
  10813. } else {
  10814. list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
  10815. pring = qp->pring;
  10816. if (!pring)
  10817. continue;
  10818. spin_lock(&pring->ring_lock);
  10819. list_splice_init(&pring->txq, &completions);
  10820. spin_unlock(&pring->ring_lock);
  10821. if (pring == phba->sli4_hba.els_wq->pring) {
  10822. pring->flag |= LPFC_DEFERRED_RING_EVENT;
  10823. /* Set the lpfc data pending flag */
  10824. set_bit(LPFC_DATA_READY, &phba->data_flags);
  10825. }
  10826. }
  10827. }
  10828. spin_unlock_irqrestore(&phba->hbalock, flags);
  10829. /* Cancel all the IOCBs from the completions list */
  10830. lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
  10831. IOERR_SLI_DOWN);
  10832. spin_lock_irqsave(&phba->hbalock, flags);
  10833. list_splice_init(&phba->elsbuf, &completions);
  10834. phba->elsbuf_cnt = 0;
  10835. phba->elsbuf_prev_cnt = 0;
  10836. spin_unlock_irqrestore(&phba->hbalock, flags);
  10837. while (!list_empty(&completions)) {
  10838. list_remove_head(&completions, buf_ptr,
  10839. struct lpfc_dmabuf, list);
  10840. lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
  10841. kfree(buf_ptr);
  10842. }
  10843. /* Enable softirqs again, done with phba->hbalock */
  10844. local_bh_enable();
  10845. /* Return any active mbox cmds */
  10846. del_timer_sync(&psli->mbox_tmo);
  10847. spin_lock_irqsave(&phba->pport->work_port_lock, flags);
  10848. phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
  10849. spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
  10850. return 1;
  10851. }
  10852. /**
  10853. * lpfc_sli_pcimem_bcopy - SLI memory copy function
  10854. * @srcp: Source memory pointer.
  10855. * @destp: Destination memory pointer.
  10856. * @cnt: Number of words required to be copied.
  10857. *
  10858. * This function is used for copying data between driver memory
  10859. * and the SLI memory. This function also changes the endianness
  10860. * of each word if native endianness is different from SLI
  10861. * endianness. This function can be called with or without
  10862. * lock.
  10863. **/
  10864. void
  10865. lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
  10866. {
  10867. uint32_t *src = srcp;
  10868. uint32_t *dest = destp;
  10869. uint32_t ldata;
  10870. int i;
  10871. for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
  10872. ldata = *src;
  10873. ldata = le32_to_cpu(ldata);
  10874. *dest = ldata;
  10875. src++;
  10876. dest++;
  10877. }
  10878. }
  10879. /**
  10880. * lpfc_sli_bemem_bcopy - SLI memory copy function
  10881. * @srcp: Source memory pointer.
  10882. * @destp: Destination memory pointer.
  10883. * @cnt: Number of words required to be copied.
  10884. *
  10885. * This function is used for copying data between a data structure
  10886. * with big endian representation to local endianness.
  10887. * This function can be called with or without lock.
  10888. **/
  10889. void
  10890. lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
  10891. {
  10892. uint32_t *src = srcp;
  10893. uint32_t *dest = destp;
  10894. uint32_t ldata;
  10895. int i;
  10896. for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
  10897. ldata = *src;
  10898. ldata = be32_to_cpu(ldata);
  10899. *dest = ldata;
  10900. src++;
  10901. dest++;
  10902. }
  10903. }
  10904. /**
  10905. * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
  10906. * @phba: Pointer to HBA context object.
  10907. * @pring: Pointer to driver SLI ring object.
  10908. * @mp: Pointer to driver buffer object.
  10909. *
  10910. * This function is called with no lock held.
  10911. * It always return zero after adding the buffer to the postbufq
  10912. * buffer list.
  10913. **/
  10914. int
  10915. lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  10916. struct lpfc_dmabuf *mp)
  10917. {
  10918. /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
  10919. later */
  10920. spin_lock_irq(&phba->hbalock);
  10921. list_add_tail(&mp->list, &pring->postbufq);
  10922. pring->postbufq_cnt++;
  10923. spin_unlock_irq(&phba->hbalock);
  10924. return 0;
  10925. }
  10926. /**
  10927. * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
  10928. * @phba: Pointer to HBA context object.
  10929. *
  10930. * When HBQ is enabled, buffers are searched based on tags. This function
  10931. * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
  10932. * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
  10933. * does not conflict with tags of buffer posted for unsolicited events.
  10934. * The function returns the allocated tag. The function is called with
  10935. * no locks held.
  10936. **/
  10937. uint32_t
  10938. lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
  10939. {
  10940. spin_lock_irq(&phba->hbalock);
  10941. phba->buffer_tag_count++;
  10942. /*
  10943. * Always set the QUE_BUFTAG_BIT to distiguish between
  10944. * a tag assigned by HBQ.
  10945. */
  10946. phba->buffer_tag_count |= QUE_BUFTAG_BIT;
  10947. spin_unlock_irq(&phba->hbalock);
  10948. return phba->buffer_tag_count;
  10949. }
  10950. /**
  10951. * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
  10952. * @phba: Pointer to HBA context object.
  10953. * @pring: Pointer to driver SLI ring object.
  10954. * @tag: Buffer tag.
  10955. *
  10956. * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
  10957. * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
  10958. * iocb is posted to the response ring with the tag of the buffer.
  10959. * This function searches the pring->postbufq list using the tag
  10960. * to find buffer associated with CMD_IOCB_RET_XRI64_CX
  10961. * iocb. If the buffer is found then lpfc_dmabuf object of the
  10962. * buffer is returned to the caller else NULL is returned.
  10963. * This function is called with no lock held.
  10964. **/
  10965. struct lpfc_dmabuf *
  10966. lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  10967. uint32_t tag)
  10968. {
  10969. struct lpfc_dmabuf *mp, *next_mp;
  10970. struct list_head *slp = &pring->postbufq;
  10971. /* Search postbufq, from the beginning, looking for a match on tag */
  10972. spin_lock_irq(&phba->hbalock);
  10973. list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
  10974. if (mp->buffer_tag == tag) {
  10975. list_del_init(&mp->list);
  10976. pring->postbufq_cnt--;
  10977. spin_unlock_irq(&phba->hbalock);
  10978. return mp;
  10979. }
  10980. }
  10981. spin_unlock_irq(&phba->hbalock);
  10982. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  10983. "0402 Cannot find virtual addr for buffer tag on "
  10984. "ring %d Data x%lx x%px x%px x%x\n",
  10985. pring->ringno, (unsigned long) tag,
  10986. slp->next, slp->prev, pring->postbufq_cnt);
  10987. return NULL;
  10988. }
  10989. /**
  10990. * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
  10991. * @phba: Pointer to HBA context object.
  10992. * @pring: Pointer to driver SLI ring object.
  10993. * @phys: DMA address of the buffer.
  10994. *
  10995. * This function searches the buffer list using the dma_address
  10996. * of unsolicited event to find the driver's lpfc_dmabuf object
  10997. * corresponding to the dma_address. The function returns the
  10998. * lpfc_dmabuf object if a buffer is found else it returns NULL.
  10999. * This function is called by the ct and els unsolicited event
  11000. * handlers to get the buffer associated with the unsolicited
  11001. * event.
  11002. *
  11003. * This function is called with no lock held.
  11004. **/
  11005. struct lpfc_dmabuf *
  11006. lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  11007. dma_addr_t phys)
  11008. {
  11009. struct lpfc_dmabuf *mp, *next_mp;
  11010. struct list_head *slp = &pring->postbufq;
  11011. /* Search postbufq, from the beginning, looking for a match on phys */
  11012. spin_lock_irq(&phba->hbalock);
  11013. list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
  11014. if (mp->phys == phys) {
  11015. list_del_init(&mp->list);
  11016. pring->postbufq_cnt--;
  11017. spin_unlock_irq(&phba->hbalock);
  11018. return mp;
  11019. }
  11020. }
  11021. spin_unlock_irq(&phba->hbalock);
  11022. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  11023. "0410 Cannot find virtual addr for mapped buf on "
  11024. "ring %d Data x%llx x%px x%px x%x\n",
  11025. pring->ringno, (unsigned long long)phys,
  11026. slp->next, slp->prev, pring->postbufq_cnt);
  11027. return NULL;
  11028. }
  11029. /**
  11030. * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
  11031. * @phba: Pointer to HBA context object.
  11032. * @cmdiocb: Pointer to driver command iocb object.
  11033. * @rspiocb: Pointer to driver response iocb object.
  11034. *
  11035. * This function is the completion handler for the abort iocbs for
  11036. * ELS commands. This function is called from the ELS ring event
  11037. * handler with no lock held. This function frees memory resources
  11038. * associated with the abort iocb.
  11039. **/
  11040. static void
  11041. lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  11042. struct lpfc_iocbq *rspiocb)
  11043. {
  11044. u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
  11045. u32 ulp_word4 = get_job_word4(phba, rspiocb);
  11046. u8 cmnd = get_job_cmnd(phba, cmdiocb);
  11047. if (ulp_status) {
  11048. /*
  11049. * Assume that the port already completed and returned, or
  11050. * will return the iocb. Just Log the message.
  11051. */
  11052. if (phba->sli_rev < LPFC_SLI_REV4) {
  11053. if (cmnd == CMD_ABORT_XRI_CX &&
  11054. ulp_status == IOSTAT_LOCAL_REJECT &&
  11055. ulp_word4 == IOERR_ABORT_REQUESTED) {
  11056. goto release_iocb;
  11057. }
  11058. }
  11059. }
  11060. lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
  11061. "0327 Abort els iocb complete x%px with io cmd xri %x "
  11062. "abort tag x%x abort status %x abort code %x\n",
  11063. cmdiocb, get_job_abtsiotag(phba, cmdiocb),
  11064. (phba->sli_rev == LPFC_SLI_REV4) ?
  11065. get_wqe_reqtag(cmdiocb) :
  11066. cmdiocb->iocb.ulpIoTag,
  11067. ulp_status, ulp_word4);
  11068. release_iocb:
  11069. lpfc_sli_release_iocbq(phba, cmdiocb);
  11070. return;
  11071. }
  11072. /**
  11073. * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
  11074. * @phba: Pointer to HBA context object.
  11075. * @cmdiocb: Pointer to driver command iocb object.
  11076. * @rspiocb: Pointer to driver response iocb object.
  11077. *
  11078. * The function is called from SLI ring event handler with no
  11079. * lock held. This function is the completion handler for ELS commands
  11080. * which are aborted. The function frees memory resources used for
  11081. * the aborted ELS commands.
  11082. **/
  11083. void
  11084. lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  11085. struct lpfc_iocbq *rspiocb)
  11086. {
  11087. struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
  11088. IOCB_t *irsp;
  11089. LPFC_MBOXQ_t *mbox;
  11090. u32 ulp_command, ulp_status, ulp_word4, iotag;
  11091. ulp_command = get_job_cmnd(phba, cmdiocb);
  11092. ulp_status = get_job_ulpstatus(phba, rspiocb);
  11093. ulp_word4 = get_job_word4(phba, rspiocb);
  11094. if (phba->sli_rev == LPFC_SLI_REV4) {
  11095. iotag = get_wqe_reqtag(cmdiocb);
  11096. } else {
  11097. irsp = &rspiocb->iocb;
  11098. iotag = irsp->ulpIoTag;
  11099. /* It is possible a PLOGI_RJT for NPIV ports to get aborted.
  11100. * The MBX_REG_LOGIN64 mbox command is freed back to the
  11101. * mbox_mem_pool here.
  11102. */
  11103. if (cmdiocb->context_un.mbox) {
  11104. mbox = cmdiocb->context_un.mbox;
  11105. lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
  11106. cmdiocb->context_un.mbox = NULL;
  11107. }
  11108. }
  11109. /* ELS cmd tag <ulpIoTag> completes */
  11110. lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
  11111. "0139 Ignoring ELS cmd code x%x ref cnt x%x Data: "
  11112. "x%x x%x x%x x%px\n",
  11113. ulp_command, kref_read(&cmdiocb->ndlp->kref),
  11114. ulp_status, ulp_word4, iotag, cmdiocb->ndlp);
  11115. /*
  11116. * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
  11117. * if exchange is busy.
  11118. */
  11119. if (ulp_command == CMD_GEN_REQUEST64_CR)
  11120. lpfc_ct_free_iocb(phba, cmdiocb);
  11121. else
  11122. lpfc_els_free_iocb(phba, cmdiocb);
  11123. lpfc_nlp_put(ndlp);
  11124. }
  11125. /**
  11126. * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
  11127. * @phba: Pointer to HBA context object.
  11128. * @pring: Pointer to driver SLI ring object.
  11129. * @cmdiocb: Pointer to driver command iocb object.
  11130. * @cmpl: completion function.
  11131. *
  11132. * This function issues an abort iocb for the provided command iocb. In case
  11133. * of unloading, the abort iocb will not be issued to commands on the ELS
  11134. * ring. Instead, the callback function shall be changed to those commands
  11135. * so that nothing happens when them finishes. This function is called with
  11136. * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
  11137. * when the command iocb is an abort request.
  11138. *
  11139. **/
  11140. int
  11141. lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  11142. struct lpfc_iocbq *cmdiocb, void *cmpl)
  11143. {
  11144. struct lpfc_vport *vport = cmdiocb->vport;
  11145. struct lpfc_iocbq *abtsiocbp;
  11146. int retval = IOCB_ERROR;
  11147. unsigned long iflags;
  11148. struct lpfc_nodelist *ndlp = NULL;
  11149. u32 ulp_command = get_job_cmnd(phba, cmdiocb);
  11150. u16 ulp_context, iotag;
  11151. bool ia;
  11152. /*
  11153. * There are certain command types we don't want to abort. And we
  11154. * don't want to abort commands that are already in the process of
  11155. * being aborted.
  11156. */
  11157. if (ulp_command == CMD_ABORT_XRI_WQE ||
  11158. ulp_command == CMD_ABORT_XRI_CN ||
  11159. ulp_command == CMD_CLOSE_XRI_CN ||
  11160. cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
  11161. return IOCB_ABORTING;
  11162. if (!pring) {
  11163. if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
  11164. cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
  11165. else
  11166. cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
  11167. return retval;
  11168. }
  11169. /*
  11170. * If we're unloading, don't abort iocb on the ELS ring, but change
  11171. * the callback so that nothing happens when it finishes.
  11172. */
  11173. if (test_bit(FC_UNLOADING, &vport->load_flag) &&
  11174. pring->ringno == LPFC_ELS_RING) {
  11175. if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
  11176. cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
  11177. else
  11178. cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
  11179. return retval;
  11180. }
  11181. /* issue ABTS for this IOCB based on iotag */
  11182. abtsiocbp = __lpfc_sli_get_iocbq(phba);
  11183. if (abtsiocbp == NULL)
  11184. return IOCB_NORESOURCE;
  11185. /* This signals the response to set the correct status
  11186. * before calling the completion handler
  11187. */
  11188. cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
  11189. if (phba->sli_rev == LPFC_SLI_REV4) {
  11190. ulp_context = cmdiocb->sli4_xritag;
  11191. iotag = abtsiocbp->iotag;
  11192. } else {
  11193. iotag = cmdiocb->iocb.ulpIoTag;
  11194. if (pring->ringno == LPFC_ELS_RING) {
  11195. ndlp = cmdiocb->ndlp;
  11196. ulp_context = ndlp->nlp_rpi;
  11197. } else {
  11198. ulp_context = cmdiocb->iocb.ulpContext;
  11199. }
  11200. }
  11201. /* Just close the exchange under certain conditions. */
  11202. if (test_bit(FC_UNLOADING, &vport->load_flag) ||
  11203. phba->link_state < LPFC_LINK_UP ||
  11204. (phba->sli_rev == LPFC_SLI_REV4 &&
  11205. phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
  11206. (phba->link_flag & LS_EXTERNAL_LOOPBACK))
  11207. ia = true;
  11208. else
  11209. ia = false;
  11210. lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
  11211. cmdiocb->iocb.ulpClass,
  11212. LPFC_WQE_CQ_ID_DEFAULT, ia, false);
  11213. /* ABTS WQE must go to the same WQ as the WQE to be aborted */
  11214. abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
  11215. if (cmdiocb->cmd_flag & LPFC_IO_FCP)
  11216. abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
  11217. if (cmdiocb->cmd_flag & LPFC_IO_FOF)
  11218. abtsiocbp->cmd_flag |= LPFC_IO_FOF;
  11219. if (cmpl)
  11220. abtsiocbp->cmd_cmpl = cmpl;
  11221. else
  11222. abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
  11223. abtsiocbp->vport = vport;
  11224. if (phba->sli_rev == LPFC_SLI_REV4) {
  11225. pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
  11226. if (unlikely(pring == NULL))
  11227. goto abort_iotag_exit;
  11228. /* Note: both hbalock and ring_lock need to be set here */
  11229. spin_lock_irqsave(&pring->ring_lock, iflags);
  11230. retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
  11231. abtsiocbp, 0);
  11232. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  11233. } else {
  11234. retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
  11235. abtsiocbp, 0);
  11236. }
  11237. abort_iotag_exit:
  11238. lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
  11239. "0339 Abort IO XRI x%x, Original iotag x%x, "
  11240. "abort tag x%x Cmdjob : x%px Abortjob : x%px "
  11241. "retval x%x : IA %d cmd_cmpl %ps\n",
  11242. ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
  11243. cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
  11244. retval, ia, abtsiocbp->cmd_cmpl);
  11245. if (retval) {
  11246. cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
  11247. __lpfc_sli_release_iocbq(phba, abtsiocbp);
  11248. }
  11249. /*
  11250. * Caller to this routine should check for IOCB_ERROR
  11251. * and handle it properly. This routine no longer removes
  11252. * iocb off txcmplq and call compl in case of IOCB_ERROR.
  11253. */
  11254. return retval;
  11255. }
  11256. /**
  11257. * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
  11258. * @phba: pointer to lpfc HBA data structure.
  11259. *
  11260. * This routine will abort all pending and outstanding iocbs to an HBA.
  11261. **/
  11262. void
  11263. lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
  11264. {
  11265. struct lpfc_sli *psli = &phba->sli;
  11266. struct lpfc_sli_ring *pring;
  11267. struct lpfc_queue *qp = NULL;
  11268. int i;
  11269. if (phba->sli_rev != LPFC_SLI_REV4) {
  11270. for (i = 0; i < psli->num_rings; i++) {
  11271. pring = &psli->sli3_ring[i];
  11272. lpfc_sli_abort_iocb_ring(phba, pring);
  11273. }
  11274. return;
  11275. }
  11276. list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
  11277. pring = qp->pring;
  11278. if (!pring)
  11279. continue;
  11280. lpfc_sli_abort_iocb_ring(phba, pring);
  11281. }
  11282. }
  11283. /**
  11284. * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
  11285. * @iocbq: Pointer to iocb object.
  11286. * @vport: Pointer to driver virtual port object.
  11287. *
  11288. * This function acts as an iocb filter for functions which abort FCP iocbs.
  11289. *
  11290. * Return values
  11291. * -ENODEV, if a null iocb or vport ptr is encountered
  11292. * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
  11293. * driver already started the abort process, or is an abort iocb itself
  11294. * 0, passes criteria for aborting the FCP I/O iocb
  11295. **/
  11296. static int
  11297. lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
  11298. struct lpfc_vport *vport)
  11299. {
  11300. u8 ulp_command;
  11301. /* No null ptr vports */
  11302. if (!iocbq || iocbq->vport != vport)
  11303. return -ENODEV;
  11304. /* iocb must be for FCP IO, already exists on the TX cmpl queue,
  11305. * can't be premarked as driver aborted, nor be an ABORT iocb itself
  11306. */
  11307. ulp_command = get_job_cmnd(vport->phba, iocbq);
  11308. if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
  11309. !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
  11310. (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
  11311. (ulp_command == CMD_ABORT_XRI_CN ||
  11312. ulp_command == CMD_CLOSE_XRI_CN ||
  11313. ulp_command == CMD_ABORT_XRI_WQE))
  11314. return -EINVAL;
  11315. return 0;
  11316. }
  11317. /**
  11318. * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
  11319. * @iocbq: Pointer to driver iocb object.
  11320. * @vport: Pointer to driver virtual port object.
  11321. * @tgt_id: SCSI ID of the target.
  11322. * @lun_id: LUN ID of the scsi device.
  11323. * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
  11324. *
  11325. * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
  11326. * host.
  11327. *
  11328. * It will return
  11329. * 0 if the filtering criteria is met for the given iocb and will return
  11330. * 1 if the filtering criteria is not met.
  11331. * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
  11332. * given iocb is for the SCSI device specified by vport, tgt_id and
  11333. * lun_id parameter.
  11334. * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
  11335. * given iocb is for the SCSI target specified by vport and tgt_id
  11336. * parameters.
  11337. * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
  11338. * given iocb is for the SCSI host associated with the given vport.
  11339. * This function is called with no locks held.
  11340. **/
  11341. static int
  11342. lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
  11343. uint16_t tgt_id, uint64_t lun_id,
  11344. lpfc_ctx_cmd ctx_cmd)
  11345. {
  11346. struct lpfc_io_buf *lpfc_cmd;
  11347. int rc = 1;
  11348. lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
  11349. if (lpfc_cmd->pCmd == NULL)
  11350. return rc;
  11351. switch (ctx_cmd) {
  11352. case LPFC_CTX_LUN:
  11353. if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
  11354. (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
  11355. (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
  11356. rc = 0;
  11357. break;
  11358. case LPFC_CTX_TGT:
  11359. if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
  11360. (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
  11361. rc = 0;
  11362. break;
  11363. case LPFC_CTX_HOST:
  11364. rc = 0;
  11365. break;
  11366. default:
  11367. printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
  11368. __func__, ctx_cmd);
  11369. break;
  11370. }
  11371. return rc;
  11372. }
  11373. /**
  11374. * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
  11375. * @vport: Pointer to virtual port.
  11376. * @tgt_id: SCSI ID of the target.
  11377. * @lun_id: LUN ID of the scsi device.
  11378. * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
  11379. *
  11380. * This function returns number of FCP commands pending for the vport.
  11381. * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
  11382. * commands pending on the vport associated with SCSI device specified
  11383. * by tgt_id and lun_id parameters.
  11384. * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
  11385. * commands pending on the vport associated with SCSI target specified
  11386. * by tgt_id parameter.
  11387. * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
  11388. * commands pending on the vport.
  11389. * This function returns the number of iocbs which satisfy the filter.
  11390. * This function is called without any lock held.
  11391. **/
  11392. int
  11393. lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
  11394. lpfc_ctx_cmd ctx_cmd)
  11395. {
  11396. struct lpfc_hba *phba = vport->phba;
  11397. struct lpfc_iocbq *iocbq;
  11398. int sum, i;
  11399. unsigned long iflags;
  11400. u8 ulp_command;
  11401. spin_lock_irqsave(&phba->hbalock, iflags);
  11402. for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
  11403. iocbq = phba->sli.iocbq_lookup[i];
  11404. if (!iocbq || iocbq->vport != vport)
  11405. continue;
  11406. if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
  11407. !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
  11408. continue;
  11409. /* Include counting outstanding aborts */
  11410. ulp_command = get_job_cmnd(phba, iocbq);
  11411. if (ulp_command == CMD_ABORT_XRI_CN ||
  11412. ulp_command == CMD_CLOSE_XRI_CN ||
  11413. ulp_command == CMD_ABORT_XRI_WQE) {
  11414. sum++;
  11415. continue;
  11416. }
  11417. if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
  11418. ctx_cmd) == 0)
  11419. sum++;
  11420. }
  11421. spin_unlock_irqrestore(&phba->hbalock, iflags);
  11422. return sum;
  11423. }
  11424. /**
  11425. * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
  11426. * @phba: Pointer to HBA context object
  11427. * @cmdiocb: Pointer to command iocb object.
  11428. * @rspiocb: Pointer to response iocb object.
  11429. *
  11430. * This function is called when an aborted FCP iocb completes. This
  11431. * function is called by the ring event handler with no lock held.
  11432. * This function frees the iocb.
  11433. **/
  11434. void
  11435. lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  11436. struct lpfc_iocbq *rspiocb)
  11437. {
  11438. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  11439. "3096 ABORT_XRI_CX completing on rpi x%x "
  11440. "original iotag x%x, abort cmd iotag x%x "
  11441. "status 0x%x, reason 0x%x\n",
  11442. (phba->sli_rev == LPFC_SLI_REV4) ?
  11443. cmdiocb->sli4_xritag :
  11444. cmdiocb->iocb.un.acxri.abortContextTag,
  11445. get_job_abtsiotag(phba, cmdiocb),
  11446. cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
  11447. get_job_word4(phba, rspiocb));
  11448. lpfc_sli_release_iocbq(phba, cmdiocb);
  11449. return;
  11450. }
  11451. /**
  11452. * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
  11453. * @vport: Pointer to virtual port.
  11454. * @tgt_id: SCSI ID of the target.
  11455. * @lun_id: LUN ID of the scsi device.
  11456. * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
  11457. *
  11458. * This function sends an abort command for every SCSI command
  11459. * associated with the given virtual port pending on the ring
  11460. * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
  11461. * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
  11462. * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
  11463. * followed by lpfc_sli_validate_fcp_iocb.
  11464. *
  11465. * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
  11466. * FCP iocbs associated with lun specified by tgt_id and lun_id
  11467. * parameters
  11468. * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
  11469. * FCP iocbs associated with SCSI target specified by tgt_id parameter.
  11470. * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
  11471. * FCP iocbs associated with virtual port.
  11472. * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
  11473. * lpfc_sli4_calc_ring is used.
  11474. * This function returns number of iocbs it failed to abort.
  11475. * This function is called with no locks held.
  11476. **/
  11477. int
  11478. lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
  11479. lpfc_ctx_cmd abort_cmd)
  11480. {
  11481. struct lpfc_hba *phba = vport->phba;
  11482. struct lpfc_sli_ring *pring = NULL;
  11483. struct lpfc_iocbq *iocbq;
  11484. int errcnt = 0, ret_val = 0;
  11485. unsigned long iflags;
  11486. int i;
  11487. /* all I/Os are in process of being flushed */
  11488. if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
  11489. return errcnt;
  11490. for (i = 1; i <= phba->sli.last_iotag; i++) {
  11491. iocbq = phba->sli.iocbq_lookup[i];
  11492. if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
  11493. continue;
  11494. if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
  11495. abort_cmd) != 0)
  11496. continue;
  11497. spin_lock_irqsave(&phba->hbalock, iflags);
  11498. if (phba->sli_rev == LPFC_SLI_REV3) {
  11499. pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
  11500. } else if (phba->sli_rev == LPFC_SLI_REV4) {
  11501. pring = lpfc_sli4_calc_ring(phba, iocbq);
  11502. }
  11503. ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
  11504. lpfc_sli_abort_fcp_cmpl);
  11505. spin_unlock_irqrestore(&phba->hbalock, iflags);
  11506. if (ret_val != IOCB_SUCCESS)
  11507. errcnt++;
  11508. }
  11509. return errcnt;
  11510. }
  11511. /**
  11512. * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
  11513. * @vport: Pointer to virtual port.
  11514. * @pring: Pointer to driver SLI ring object.
  11515. * @tgt_id: SCSI ID of the target.
  11516. * @lun_id: LUN ID of the scsi device.
  11517. * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
  11518. *
  11519. * This function sends an abort command for every SCSI command
  11520. * associated with the given virtual port pending on the ring
  11521. * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
  11522. * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
  11523. * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
  11524. * followed by lpfc_sli_validate_fcp_iocb.
  11525. *
  11526. * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
  11527. * FCP iocbs associated with lun specified by tgt_id and lun_id
  11528. * parameters
  11529. * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
  11530. * FCP iocbs associated with SCSI target specified by tgt_id parameter.
  11531. * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
  11532. * FCP iocbs associated with virtual port.
  11533. * This function returns number of iocbs it aborted .
  11534. * This function is called with no locks held right after a taskmgmt
  11535. * command is sent.
  11536. **/
  11537. int
  11538. lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
  11539. uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
  11540. {
  11541. struct lpfc_hba *phba = vport->phba;
  11542. struct lpfc_io_buf *lpfc_cmd;
  11543. struct lpfc_iocbq *abtsiocbq;
  11544. struct lpfc_nodelist *ndlp = NULL;
  11545. struct lpfc_iocbq *iocbq;
  11546. int sum, i, ret_val;
  11547. unsigned long iflags;
  11548. struct lpfc_sli_ring *pring_s4 = NULL;
  11549. u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
  11550. bool ia;
  11551. /* all I/Os are in process of being flushed */
  11552. if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
  11553. return 0;
  11554. sum = 0;
  11555. spin_lock_irqsave(&phba->hbalock, iflags);
  11556. for (i = 1; i <= phba->sli.last_iotag; i++) {
  11557. iocbq = phba->sli.iocbq_lookup[i];
  11558. if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
  11559. continue;
  11560. if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
  11561. cmd) != 0)
  11562. continue;
  11563. /* Guard against IO completion being called at same time */
  11564. lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
  11565. spin_lock(&lpfc_cmd->buf_lock);
  11566. if (!lpfc_cmd->pCmd) {
  11567. spin_unlock(&lpfc_cmd->buf_lock);
  11568. continue;
  11569. }
  11570. if (phba->sli_rev == LPFC_SLI_REV4) {
  11571. pring_s4 =
  11572. phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
  11573. if (!pring_s4) {
  11574. spin_unlock(&lpfc_cmd->buf_lock);
  11575. continue;
  11576. }
  11577. /* Note: both hbalock and ring_lock must be set here */
  11578. spin_lock(&pring_s4->ring_lock);
  11579. }
  11580. /*
  11581. * If the iocbq is already being aborted, don't take a second
  11582. * action, but do count it.
  11583. */
  11584. if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
  11585. !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
  11586. if (phba->sli_rev == LPFC_SLI_REV4)
  11587. spin_unlock(&pring_s4->ring_lock);
  11588. spin_unlock(&lpfc_cmd->buf_lock);
  11589. continue;
  11590. }
  11591. /* issue ABTS for this IOCB based on iotag */
  11592. abtsiocbq = __lpfc_sli_get_iocbq(phba);
  11593. if (!abtsiocbq) {
  11594. if (phba->sli_rev == LPFC_SLI_REV4)
  11595. spin_unlock(&pring_s4->ring_lock);
  11596. spin_unlock(&lpfc_cmd->buf_lock);
  11597. continue;
  11598. }
  11599. if (phba->sli_rev == LPFC_SLI_REV4) {
  11600. iotag = abtsiocbq->iotag;
  11601. ulp_context = iocbq->sli4_xritag;
  11602. cqid = lpfc_cmd->hdwq->io_cq_map;
  11603. } else {
  11604. iotag = iocbq->iocb.ulpIoTag;
  11605. if (pring->ringno == LPFC_ELS_RING) {
  11606. ndlp = iocbq->ndlp;
  11607. ulp_context = ndlp->nlp_rpi;
  11608. } else {
  11609. ulp_context = iocbq->iocb.ulpContext;
  11610. }
  11611. }
  11612. ndlp = lpfc_cmd->rdata->pnode;
  11613. if (lpfc_is_link_up(phba) &&
  11614. (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
  11615. !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
  11616. ia = false;
  11617. else
  11618. ia = true;
  11619. lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
  11620. iocbq->iocb.ulpClass, cqid,
  11621. ia, false);
  11622. abtsiocbq->vport = vport;
  11623. /* ABTS WQE must go to the same WQ as the WQE to be aborted */
  11624. abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
  11625. if (iocbq->cmd_flag & LPFC_IO_FCP)
  11626. abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
  11627. if (iocbq->cmd_flag & LPFC_IO_FOF)
  11628. abtsiocbq->cmd_flag |= LPFC_IO_FOF;
  11629. /* Setup callback routine and issue the command. */
  11630. abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
  11631. /*
  11632. * Indicate the IO is being aborted by the driver and set
  11633. * the caller's flag into the aborted IO.
  11634. */
  11635. iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
  11636. if (phba->sli_rev == LPFC_SLI_REV4) {
  11637. ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
  11638. abtsiocbq, 0);
  11639. spin_unlock(&pring_s4->ring_lock);
  11640. } else {
  11641. ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
  11642. abtsiocbq, 0);
  11643. }
  11644. spin_unlock(&lpfc_cmd->buf_lock);
  11645. if (ret_val == IOCB_ERROR)
  11646. __lpfc_sli_release_iocbq(phba, abtsiocbq);
  11647. else
  11648. sum++;
  11649. }
  11650. spin_unlock_irqrestore(&phba->hbalock, iflags);
  11651. return sum;
  11652. }
  11653. /**
  11654. * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
  11655. * @phba: Pointer to HBA context object.
  11656. * @cmdiocbq: Pointer to command iocb.
  11657. * @rspiocbq: Pointer to response iocb.
  11658. *
  11659. * This function is the completion handler for iocbs issued using
  11660. * lpfc_sli_issue_iocb_wait function. This function is called by the
  11661. * ring event handler function without any lock held. This function
  11662. * can be called from both worker thread context and interrupt
  11663. * context. This function also can be called from other thread which
  11664. * cleans up the SLI layer objects.
  11665. * This function copy the contents of the response iocb to the
  11666. * response iocb memory object provided by the caller of
  11667. * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  11668. * sleeps for the iocb completion.
  11669. **/
  11670. static void
  11671. lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
  11672. struct lpfc_iocbq *cmdiocbq,
  11673. struct lpfc_iocbq *rspiocbq)
  11674. {
  11675. wait_queue_head_t *pdone_q;
  11676. unsigned long iflags;
  11677. struct lpfc_io_buf *lpfc_cmd;
  11678. size_t offset = offsetof(struct lpfc_iocbq, wqe);
  11679. spin_lock_irqsave(&phba->hbalock, iflags);
  11680. if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
  11681. /*
  11682. * A time out has occurred for the iocb. If a time out
  11683. * completion handler has been supplied, call it. Otherwise,
  11684. * just free the iocbq.
  11685. */
  11686. spin_unlock_irqrestore(&phba->hbalock, iflags);
  11687. cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
  11688. cmdiocbq->wait_cmd_cmpl = NULL;
  11689. if (cmdiocbq->cmd_cmpl)
  11690. cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
  11691. else
  11692. lpfc_sli_release_iocbq(phba, cmdiocbq);
  11693. return;
  11694. }
  11695. /* Copy the contents of the local rspiocb into the caller's buffer. */
  11696. cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
  11697. if (cmdiocbq->rsp_iocb && rspiocbq)
  11698. memcpy((char *)cmdiocbq->rsp_iocb + offset,
  11699. (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
  11700. /* Set the exchange busy flag for task management commands */
  11701. if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
  11702. !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
  11703. lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
  11704. cur_iocbq);
  11705. if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
  11706. lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
  11707. else
  11708. lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
  11709. }
  11710. pdone_q = cmdiocbq->context_un.wait_queue;
  11711. if (pdone_q)
  11712. wake_up(pdone_q);
  11713. spin_unlock_irqrestore(&phba->hbalock, iflags);
  11714. return;
  11715. }
  11716. /**
  11717. * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
  11718. * @phba: Pointer to HBA context object..
  11719. * @piocbq: Pointer to command iocb.
  11720. * @flag: Flag to test.
  11721. *
  11722. * This routine grabs the hbalock and then test the cmd_flag to
  11723. * see if the passed in flag is set.
  11724. * Returns:
  11725. * 1 if flag is set.
  11726. * 0 if flag is not set.
  11727. **/
  11728. static int
  11729. lpfc_chk_iocb_flg(struct lpfc_hba *phba,
  11730. struct lpfc_iocbq *piocbq, uint32_t flag)
  11731. {
  11732. unsigned long iflags;
  11733. int ret;
  11734. spin_lock_irqsave(&phba->hbalock, iflags);
  11735. ret = piocbq->cmd_flag & flag;
  11736. spin_unlock_irqrestore(&phba->hbalock, iflags);
  11737. return ret;
  11738. }
  11739. /**
  11740. * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
  11741. * @phba: Pointer to HBA context object..
  11742. * @ring_number: Ring number
  11743. * @piocb: Pointer to command iocb.
  11744. * @prspiocbq: Pointer to response iocb.
  11745. * @timeout: Timeout in number of seconds.
  11746. *
  11747. * This function issues the iocb to firmware and waits for the
  11748. * iocb to complete. The cmd_cmpl field of the shall be used
  11749. * to handle iocbs which time out. If the field is NULL, the
  11750. * function shall free the iocbq structure. If more clean up is
  11751. * needed, the caller is expected to provide a completion function
  11752. * that will provide the needed clean up. If the iocb command is
  11753. * not completed within timeout seconds, the function will either
  11754. * free the iocbq structure (if cmd_cmpl == NULL) or execute the
  11755. * completion function set in the cmd_cmpl field and then return
  11756. * a status of IOCB_TIMEDOUT. The caller should not free the iocb
  11757. * resources if this function returns IOCB_TIMEDOUT.
  11758. * The function waits for the iocb completion using an
  11759. * non-interruptible wait.
  11760. * This function will sleep while waiting for iocb completion.
  11761. * So, this function should not be called from any context which
  11762. * does not allow sleeping. Due to the same reason, this function
  11763. * cannot be called with interrupt disabled.
  11764. * This function assumes that the iocb completions occur while
  11765. * this function sleep. So, this function cannot be called from
  11766. * the thread which process iocb completion for this ring.
  11767. * This function clears the cmd_flag of the iocb object before
  11768. * issuing the iocb and the iocb completion handler sets this
  11769. * flag and wakes this thread when the iocb completes.
  11770. * The contents of the response iocb will be copied to prspiocbq
  11771. * by the completion handler when the command completes.
  11772. * This function returns IOCB_SUCCESS when success.
  11773. * This function is called with no lock held.
  11774. **/
  11775. int
  11776. lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
  11777. uint32_t ring_number,
  11778. struct lpfc_iocbq *piocb,
  11779. struct lpfc_iocbq *prspiocbq,
  11780. uint32_t timeout)
  11781. {
  11782. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
  11783. long timeleft, timeout_req = 0;
  11784. int retval = IOCB_SUCCESS;
  11785. uint32_t creg_val;
  11786. struct lpfc_iocbq *iocb;
  11787. int txq_cnt = 0;
  11788. int txcmplq_cnt = 0;
  11789. struct lpfc_sli_ring *pring;
  11790. unsigned long iflags;
  11791. bool iocb_completed = true;
  11792. if (phba->sli_rev >= LPFC_SLI_REV4) {
  11793. lpfc_sli_prep_wqe(phba, piocb);
  11794. pring = lpfc_sli4_calc_ring(phba, piocb);
  11795. } else
  11796. pring = &phba->sli.sli3_ring[ring_number];
  11797. /*
  11798. * If the caller has provided a response iocbq buffer, then rsp_iocb
  11799. * is NULL or its an error.
  11800. */
  11801. if (prspiocbq) {
  11802. if (piocb->rsp_iocb)
  11803. return IOCB_ERROR;
  11804. piocb->rsp_iocb = prspiocbq;
  11805. }
  11806. piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
  11807. piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
  11808. piocb->context_un.wait_queue = &done_q;
  11809. piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
  11810. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  11811. if (lpfc_readl(phba->HCregaddr, &creg_val))
  11812. return IOCB_ERROR;
  11813. creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
  11814. writel(creg_val, phba->HCregaddr);
  11815. readl(phba->HCregaddr); /* flush */
  11816. }
  11817. retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
  11818. SLI_IOCB_RET_IOCB);
  11819. if (retval == IOCB_SUCCESS) {
  11820. timeout_req = msecs_to_jiffies(timeout * 1000);
  11821. timeleft = wait_event_timeout(done_q,
  11822. lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
  11823. timeout_req);
  11824. spin_lock_irqsave(&phba->hbalock, iflags);
  11825. if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
  11826. /*
  11827. * IOCB timed out. Inform the wake iocb wait
  11828. * completion function and set local status
  11829. */
  11830. iocb_completed = false;
  11831. piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
  11832. }
  11833. spin_unlock_irqrestore(&phba->hbalock, iflags);
  11834. if (iocb_completed) {
  11835. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  11836. "0331 IOCB wake signaled\n");
  11837. /* Note: we are not indicating if the IOCB has a success
  11838. * status or not - that's for the caller to check.
  11839. * IOCB_SUCCESS means just that the command was sent and
  11840. * completed. Not that it completed successfully.
  11841. * */
  11842. } else if (timeleft == 0) {
  11843. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  11844. "0338 IOCB wait timeout error - no "
  11845. "wake response Data x%x\n", timeout);
  11846. retval = IOCB_TIMEDOUT;
  11847. } else {
  11848. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  11849. "0330 IOCB wake NOT set, "
  11850. "Data x%x x%lx\n",
  11851. timeout, (timeleft / jiffies));
  11852. retval = IOCB_TIMEDOUT;
  11853. }
  11854. } else if (retval == IOCB_BUSY) {
  11855. if (phba->cfg_log_verbose & LOG_SLI) {
  11856. list_for_each_entry(iocb, &pring->txq, list) {
  11857. txq_cnt++;
  11858. }
  11859. list_for_each_entry(iocb, &pring->txcmplq, list) {
  11860. txcmplq_cnt++;
  11861. }
  11862. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  11863. "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
  11864. phba->iocb_cnt, txq_cnt, txcmplq_cnt);
  11865. }
  11866. return retval;
  11867. } else {
  11868. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  11869. "0332 IOCB wait issue failed, Data x%x\n",
  11870. retval);
  11871. retval = IOCB_ERROR;
  11872. }
  11873. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  11874. if (lpfc_readl(phba->HCregaddr, &creg_val))
  11875. return IOCB_ERROR;
  11876. creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
  11877. writel(creg_val, phba->HCregaddr);
  11878. readl(phba->HCregaddr); /* flush */
  11879. }
  11880. if (prspiocbq)
  11881. piocb->rsp_iocb = NULL;
  11882. piocb->context_un.wait_queue = NULL;
  11883. piocb->cmd_cmpl = NULL;
  11884. return retval;
  11885. }
  11886. /**
  11887. * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
  11888. * @phba: Pointer to HBA context object.
  11889. * @pmboxq: Pointer to driver mailbox object.
  11890. * @timeout: Timeout in number of seconds.
  11891. *
  11892. * This function issues the mailbox to firmware and waits for the
  11893. * mailbox command to complete. If the mailbox command is not
  11894. * completed within timeout seconds, it returns MBX_TIMEOUT.
  11895. * The function waits for the mailbox completion using an
  11896. * interruptible wait. If the thread is woken up due to a
  11897. * signal, MBX_TIMEOUT error is returned to the caller. Caller
  11898. * should not free the mailbox resources, if this function returns
  11899. * MBX_TIMEOUT.
  11900. * This function will sleep while waiting for mailbox completion.
  11901. * So, this function should not be called from any context which
  11902. * does not allow sleeping. Due to the same reason, this function
  11903. * cannot be called with interrupt disabled.
  11904. * This function assumes that the mailbox completion occurs while
  11905. * this function sleep. So, this function cannot be called from
  11906. * the worker thread which processes mailbox completion.
  11907. * This function is called in the context of HBA management
  11908. * applications.
  11909. * This function returns MBX_SUCCESS when successful.
  11910. * This function is called with no lock held.
  11911. **/
  11912. int
  11913. lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
  11914. uint32_t timeout)
  11915. {
  11916. struct completion mbox_done;
  11917. int retval;
  11918. unsigned long flag;
  11919. pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
  11920. /* setup wake call as IOCB callback */
  11921. pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
  11922. /* setup ctx_u field to pass wait_queue pointer to wake function */
  11923. init_completion(&mbox_done);
  11924. pmboxq->ctx_u.mbox_wait = &mbox_done;
  11925. /* now issue the command */
  11926. retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
  11927. if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
  11928. wait_for_completion_timeout(&mbox_done,
  11929. msecs_to_jiffies(timeout * 1000));
  11930. spin_lock_irqsave(&phba->hbalock, flag);
  11931. pmboxq->ctx_u.mbox_wait = NULL;
  11932. /*
  11933. * if LPFC_MBX_WAKE flag is set the mailbox is completed
  11934. * else do not free the resources.
  11935. */
  11936. if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
  11937. retval = MBX_SUCCESS;
  11938. } else {
  11939. retval = MBX_TIMEOUT;
  11940. pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  11941. }
  11942. spin_unlock_irqrestore(&phba->hbalock, flag);
  11943. }
  11944. return retval;
  11945. }
  11946. /**
  11947. * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
  11948. * @phba: Pointer to HBA context.
  11949. * @mbx_action: Mailbox shutdown options.
  11950. *
  11951. * This function is called to shutdown the driver's mailbox sub-system.
  11952. * It first marks the mailbox sub-system is in a block state to prevent
  11953. * the asynchronous mailbox command from issued off the pending mailbox
  11954. * command queue. If the mailbox command sub-system shutdown is due to
  11955. * HBA error conditions such as EEH or ERATT, this routine shall invoke
  11956. * the mailbox sub-system flush routine to forcefully bring down the
  11957. * mailbox sub-system. Otherwise, if it is due to normal condition (such
  11958. * as with offline or HBA function reset), this routine will wait for the
  11959. * outstanding mailbox command to complete before invoking the mailbox
  11960. * sub-system flush routine to gracefully bring down mailbox sub-system.
  11961. **/
  11962. void
  11963. lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
  11964. {
  11965. struct lpfc_sli *psli = &phba->sli;
  11966. unsigned long timeout;
  11967. if (mbx_action == LPFC_MBX_NO_WAIT) {
  11968. /* delay 100ms for port state */
  11969. msleep(100);
  11970. lpfc_sli_mbox_sys_flush(phba);
  11971. return;
  11972. }
  11973. timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
  11974. /* Disable softirqs, including timers from obtaining phba->hbalock */
  11975. local_bh_disable();
  11976. spin_lock_irq(&phba->hbalock);
  11977. psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
  11978. if (psli->sli_flag & LPFC_SLI_ACTIVE) {
  11979. /* Determine how long we might wait for the active mailbox
  11980. * command to be gracefully completed by firmware.
  11981. */
  11982. if (phba->sli.mbox_active)
  11983. timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
  11984. phba->sli.mbox_active) *
  11985. 1000) + jiffies;
  11986. spin_unlock_irq(&phba->hbalock);
  11987. /* Enable softirqs again, done with phba->hbalock */
  11988. local_bh_enable();
  11989. while (phba->sli.mbox_active) {
  11990. /* Check active mailbox complete status every 2ms */
  11991. msleep(2);
  11992. if (time_after(jiffies, timeout))
  11993. /* Timeout, let the mailbox flush routine to
  11994. * forcefully release active mailbox command
  11995. */
  11996. break;
  11997. }
  11998. } else {
  11999. spin_unlock_irq(&phba->hbalock);
  12000. /* Enable softirqs again, done with phba->hbalock */
  12001. local_bh_enable();
  12002. }
  12003. lpfc_sli_mbox_sys_flush(phba);
  12004. }
  12005. /**
  12006. * lpfc_sli_eratt_read - read sli-3 error attention events
  12007. * @phba: Pointer to HBA context.
  12008. *
  12009. * This function is called to read the SLI3 device error attention registers
  12010. * for possible error attention events. The caller must hold the hostlock
  12011. * with spin_lock_irq().
  12012. *
  12013. * This function returns 1 when there is Error Attention in the Host Attention
  12014. * Register and returns 0 otherwise.
  12015. **/
  12016. static int
  12017. lpfc_sli_eratt_read(struct lpfc_hba *phba)
  12018. {
  12019. uint32_t ha_copy;
  12020. /* Read chip Host Attention (HA) register */
  12021. if (lpfc_readl(phba->HAregaddr, &ha_copy))
  12022. goto unplug_err;
  12023. if (ha_copy & HA_ERATT) {
  12024. /* Read host status register to retrieve error event */
  12025. if (lpfc_sli_read_hs(phba))
  12026. goto unplug_err;
  12027. /* Check if there is a deferred error condition is active */
  12028. if ((HS_FFER1 & phba->work_hs) &&
  12029. ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
  12030. HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
  12031. set_bit(DEFER_ERATT, &phba->hba_flag);
  12032. /* Clear all interrupt enable conditions */
  12033. writel(0, phba->HCregaddr);
  12034. readl(phba->HCregaddr);
  12035. }
  12036. /* Set the driver HA work bitmap */
  12037. phba->work_ha |= HA_ERATT;
  12038. /* Indicate polling handles this ERATT */
  12039. set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
  12040. return 1;
  12041. }
  12042. return 0;
  12043. unplug_err:
  12044. /* Set the driver HS work bitmap */
  12045. phba->work_hs |= UNPLUG_ERR;
  12046. /* Set the driver HA work bitmap */
  12047. phba->work_ha |= HA_ERATT;
  12048. /* Indicate polling handles this ERATT */
  12049. set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
  12050. return 1;
  12051. }
  12052. /**
  12053. * lpfc_sli4_eratt_read - read sli-4 error attention events
  12054. * @phba: Pointer to HBA context.
  12055. *
  12056. * This function is called to read the SLI4 device error attention registers
  12057. * for possible error attention events. The caller must hold the hostlock
  12058. * with spin_lock_irq().
  12059. *
  12060. * This function returns 1 when there is Error Attention in the Host Attention
  12061. * Register and returns 0 otherwise.
  12062. **/
  12063. static int
  12064. lpfc_sli4_eratt_read(struct lpfc_hba *phba)
  12065. {
  12066. uint32_t uerr_sta_hi, uerr_sta_lo;
  12067. uint32_t if_type, portsmphr;
  12068. struct lpfc_register portstat_reg;
  12069. u32 logmask;
  12070. /*
  12071. * For now, use the SLI4 device internal unrecoverable error
  12072. * registers for error attention. This can be changed later.
  12073. */
  12074. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  12075. switch (if_type) {
  12076. case LPFC_SLI_INTF_IF_TYPE_0:
  12077. if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
  12078. &uerr_sta_lo) ||
  12079. lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
  12080. &uerr_sta_hi)) {
  12081. phba->work_hs |= UNPLUG_ERR;
  12082. phba->work_ha |= HA_ERATT;
  12083. set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
  12084. return 1;
  12085. }
  12086. if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
  12087. (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
  12088. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  12089. "1423 HBA Unrecoverable error: "
  12090. "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
  12091. "ue_mask_lo_reg=0x%x, "
  12092. "ue_mask_hi_reg=0x%x\n",
  12093. uerr_sta_lo, uerr_sta_hi,
  12094. phba->sli4_hba.ue_mask_lo,
  12095. phba->sli4_hba.ue_mask_hi);
  12096. phba->work_status[0] = uerr_sta_lo;
  12097. phba->work_status[1] = uerr_sta_hi;
  12098. phba->work_ha |= HA_ERATT;
  12099. set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
  12100. return 1;
  12101. }
  12102. break;
  12103. case LPFC_SLI_INTF_IF_TYPE_2:
  12104. case LPFC_SLI_INTF_IF_TYPE_6:
  12105. if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
  12106. &portstat_reg.word0) ||
  12107. lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
  12108. &portsmphr)){
  12109. phba->work_hs |= UNPLUG_ERR;
  12110. phba->work_ha |= HA_ERATT;
  12111. set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
  12112. return 1;
  12113. }
  12114. if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
  12115. phba->work_status[0] =
  12116. readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
  12117. phba->work_status[1] =
  12118. readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
  12119. logmask = LOG_TRACE_EVENT;
  12120. if (phba->work_status[0] ==
  12121. SLIPORT_ERR1_REG_ERR_CODE_2 &&
  12122. phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
  12123. logmask = LOG_SLI;
  12124. lpfc_printf_log(phba, KERN_ERR, logmask,
  12125. "2885 Port Status Event: "
  12126. "port status reg 0x%x, "
  12127. "port smphr reg 0x%x, "
  12128. "error 1=0x%x, error 2=0x%x\n",
  12129. portstat_reg.word0,
  12130. portsmphr,
  12131. phba->work_status[0],
  12132. phba->work_status[1]);
  12133. phba->work_ha |= HA_ERATT;
  12134. set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
  12135. return 1;
  12136. }
  12137. break;
  12138. case LPFC_SLI_INTF_IF_TYPE_1:
  12139. default:
  12140. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  12141. "2886 HBA Error Attention on unsupported "
  12142. "if type %d.", if_type);
  12143. return 1;
  12144. }
  12145. return 0;
  12146. }
  12147. /**
  12148. * lpfc_sli_check_eratt - check error attention events
  12149. * @phba: Pointer to HBA context.
  12150. *
  12151. * This function is called from timer soft interrupt context to check HBA's
  12152. * error attention register bit for error attention events.
  12153. *
  12154. * This function returns 1 when there is Error Attention in the Host Attention
  12155. * Register and returns 0 otherwise.
  12156. **/
  12157. int
  12158. lpfc_sli_check_eratt(struct lpfc_hba *phba)
  12159. {
  12160. uint32_t ha_copy;
  12161. /* If somebody is waiting to handle an eratt, don't process it
  12162. * here. The brdkill function will do this.
  12163. */
  12164. if (phba->link_flag & LS_IGNORE_ERATT)
  12165. return 0;
  12166. /* Check if interrupt handler handles this ERATT */
  12167. if (test_bit(HBA_ERATT_HANDLED, &phba->hba_flag))
  12168. /* Interrupt handler has handled ERATT */
  12169. return 0;
  12170. /*
  12171. * If there is deferred error attention, do not check for error
  12172. * attention
  12173. */
  12174. if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
  12175. return 0;
  12176. spin_lock_irq(&phba->hbalock);
  12177. /* If PCI channel is offline, don't process it */
  12178. if (unlikely(pci_channel_offline(phba->pcidev))) {
  12179. spin_unlock_irq(&phba->hbalock);
  12180. return 0;
  12181. }
  12182. switch (phba->sli_rev) {
  12183. case LPFC_SLI_REV2:
  12184. case LPFC_SLI_REV3:
  12185. /* Read chip Host Attention (HA) register */
  12186. ha_copy = lpfc_sli_eratt_read(phba);
  12187. break;
  12188. case LPFC_SLI_REV4:
  12189. /* Read device Uncoverable Error (UERR) registers */
  12190. ha_copy = lpfc_sli4_eratt_read(phba);
  12191. break;
  12192. default:
  12193. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  12194. "0299 Invalid SLI revision (%d)\n",
  12195. phba->sli_rev);
  12196. ha_copy = 0;
  12197. break;
  12198. }
  12199. spin_unlock_irq(&phba->hbalock);
  12200. return ha_copy;
  12201. }
  12202. /**
  12203. * lpfc_intr_state_check - Check device state for interrupt handling
  12204. * @phba: Pointer to HBA context.
  12205. *
  12206. * This inline routine checks whether a device or its PCI slot is in a state
  12207. * that the interrupt should be handled.
  12208. *
  12209. * This function returns 0 if the device or the PCI slot is in a state that
  12210. * interrupt should be handled, otherwise -EIO.
  12211. */
  12212. static inline int
  12213. lpfc_intr_state_check(struct lpfc_hba *phba)
  12214. {
  12215. /* If the pci channel is offline, ignore all the interrupts */
  12216. if (unlikely(pci_channel_offline(phba->pcidev)))
  12217. return -EIO;
  12218. /* Update device level interrupt statistics */
  12219. phba->sli.slistat.sli_intr++;
  12220. /* Ignore all interrupts during initialization. */
  12221. if (unlikely(phba->link_state < LPFC_LINK_DOWN))
  12222. return -EIO;
  12223. return 0;
  12224. }
  12225. /**
  12226. * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
  12227. * @irq: Interrupt number.
  12228. * @dev_id: The device context pointer.
  12229. *
  12230. * This function is directly called from the PCI layer as an interrupt
  12231. * service routine when device with SLI-3 interface spec is enabled with
  12232. * MSI-X multi-message interrupt mode and there are slow-path events in
  12233. * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
  12234. * interrupt mode, this function is called as part of the device-level
  12235. * interrupt handler. When the PCI slot is in error recovery or the HBA
  12236. * is undergoing initialization, the interrupt handler will not process
  12237. * the interrupt. The link attention and ELS ring attention events are
  12238. * handled by the worker thread. The interrupt handler signals the worker
  12239. * thread and returns for these events. This function is called without
  12240. * any lock held. It gets the hbalock to access and update SLI data
  12241. * structures.
  12242. *
  12243. * This function returns IRQ_HANDLED when interrupt is handled else it
  12244. * returns IRQ_NONE.
  12245. **/
  12246. irqreturn_t
  12247. lpfc_sli_sp_intr_handler(int irq, void *dev_id)
  12248. {
  12249. struct lpfc_hba *phba;
  12250. uint32_t ha_copy, hc_copy;
  12251. uint32_t work_ha_copy;
  12252. unsigned long status;
  12253. unsigned long iflag;
  12254. uint32_t control;
  12255. MAILBOX_t *mbox, *pmbox;
  12256. struct lpfc_vport *vport;
  12257. struct lpfc_nodelist *ndlp;
  12258. struct lpfc_dmabuf *mp;
  12259. LPFC_MBOXQ_t *pmb;
  12260. int rc;
  12261. /*
  12262. * Get the driver's phba structure from the dev_id and
  12263. * assume the HBA is not interrupting.
  12264. */
  12265. phba = (struct lpfc_hba *)dev_id;
  12266. if (unlikely(!phba))
  12267. return IRQ_NONE;
  12268. /*
  12269. * Stuff needs to be attented to when this function is invoked as an
  12270. * individual interrupt handler in MSI-X multi-message interrupt mode
  12271. */
  12272. if (phba->intr_type == MSIX) {
  12273. /* Check device state for handling interrupt */
  12274. if (lpfc_intr_state_check(phba))
  12275. return IRQ_NONE;
  12276. /* Need to read HA REG for slow-path events */
  12277. spin_lock_irqsave(&phba->hbalock, iflag);
  12278. if (lpfc_readl(phba->HAregaddr, &ha_copy))
  12279. goto unplug_error;
  12280. /* If somebody is waiting to handle an eratt don't process it
  12281. * here. The brdkill function will do this.
  12282. */
  12283. if (phba->link_flag & LS_IGNORE_ERATT)
  12284. ha_copy &= ~HA_ERATT;
  12285. /* Check the need for handling ERATT in interrupt handler */
  12286. if (ha_copy & HA_ERATT) {
  12287. if (test_and_set_bit(HBA_ERATT_HANDLED,
  12288. &phba->hba_flag))
  12289. /* ERATT polling has handled ERATT */
  12290. ha_copy &= ~HA_ERATT;
  12291. }
  12292. /*
  12293. * If there is deferred error attention, do not check for any
  12294. * interrupt.
  12295. */
  12296. if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
  12297. spin_unlock_irqrestore(&phba->hbalock, iflag);
  12298. return IRQ_NONE;
  12299. }
  12300. /* Clear up only attention source related to slow-path */
  12301. if (lpfc_readl(phba->HCregaddr, &hc_copy))
  12302. goto unplug_error;
  12303. writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
  12304. HC_LAINT_ENA | HC_ERINT_ENA),
  12305. phba->HCregaddr);
  12306. writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
  12307. phba->HAregaddr);
  12308. writel(hc_copy, phba->HCregaddr);
  12309. readl(phba->HAregaddr); /* flush */
  12310. spin_unlock_irqrestore(&phba->hbalock, iflag);
  12311. } else
  12312. ha_copy = phba->ha_copy;
  12313. work_ha_copy = ha_copy & phba->work_ha_mask;
  12314. if (work_ha_copy) {
  12315. if (work_ha_copy & HA_LATT) {
  12316. if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
  12317. /*
  12318. * Turn off Link Attention interrupts
  12319. * until CLEAR_LA done
  12320. */
  12321. spin_lock_irqsave(&phba->hbalock, iflag);
  12322. phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
  12323. if (lpfc_readl(phba->HCregaddr, &control))
  12324. goto unplug_error;
  12325. control &= ~HC_LAINT_ENA;
  12326. writel(control, phba->HCregaddr);
  12327. readl(phba->HCregaddr); /* flush */
  12328. spin_unlock_irqrestore(&phba->hbalock, iflag);
  12329. }
  12330. else
  12331. work_ha_copy &= ~HA_LATT;
  12332. }
  12333. if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
  12334. /*
  12335. * Turn off Slow Rings interrupts, LPFC_ELS_RING is
  12336. * the only slow ring.
  12337. */
  12338. status = (work_ha_copy &
  12339. (HA_RXMASK << (4*LPFC_ELS_RING)));
  12340. status >>= (4*LPFC_ELS_RING);
  12341. if (status & HA_RXMASK) {
  12342. spin_lock_irqsave(&phba->hbalock, iflag);
  12343. if (lpfc_readl(phba->HCregaddr, &control))
  12344. goto unplug_error;
  12345. lpfc_debugfs_slow_ring_trc(phba,
  12346. "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
  12347. control, status,
  12348. (uint32_t)phba->sli.slistat.sli_intr);
  12349. if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
  12350. lpfc_debugfs_slow_ring_trc(phba,
  12351. "ISR Disable ring:"
  12352. "pwork:x%x hawork:x%x wait:x%x",
  12353. phba->work_ha, work_ha_copy,
  12354. (uint32_t)((unsigned long)
  12355. &phba->work_waitq));
  12356. control &=
  12357. ~(HC_R0INT_ENA << LPFC_ELS_RING);
  12358. writel(control, phba->HCregaddr);
  12359. readl(phba->HCregaddr); /* flush */
  12360. }
  12361. else {
  12362. lpfc_debugfs_slow_ring_trc(phba,
  12363. "ISR slow ring: pwork:"
  12364. "x%x hawork:x%x wait:x%x",
  12365. phba->work_ha, work_ha_copy,
  12366. (uint32_t)((unsigned long)
  12367. &phba->work_waitq));
  12368. }
  12369. spin_unlock_irqrestore(&phba->hbalock, iflag);
  12370. }
  12371. }
  12372. spin_lock_irqsave(&phba->hbalock, iflag);
  12373. if (work_ha_copy & HA_ERATT) {
  12374. if (lpfc_sli_read_hs(phba))
  12375. goto unplug_error;
  12376. /*
  12377. * Check if there is a deferred error condition
  12378. * is active
  12379. */
  12380. if ((HS_FFER1 & phba->work_hs) &&
  12381. ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
  12382. HS_FFER6 | HS_FFER7 | HS_FFER8) &
  12383. phba->work_hs)) {
  12384. set_bit(DEFER_ERATT, &phba->hba_flag);
  12385. /* Clear all interrupt enable conditions */
  12386. writel(0, phba->HCregaddr);
  12387. readl(phba->HCregaddr);
  12388. }
  12389. }
  12390. if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
  12391. pmb = phba->sli.mbox_active;
  12392. pmbox = &pmb->u.mb;
  12393. mbox = phba->mbox;
  12394. vport = pmb->vport;
  12395. /* First check out the status word */
  12396. lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
  12397. if (pmbox->mbxOwner != OWN_HOST) {
  12398. spin_unlock_irqrestore(&phba->hbalock, iflag);
  12399. /*
  12400. * Stray Mailbox Interrupt, mbxCommand <cmd>
  12401. * mbxStatus <status>
  12402. */
  12403. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  12404. "(%d):0304 Stray Mailbox "
  12405. "Interrupt mbxCommand x%x "
  12406. "mbxStatus x%x\n",
  12407. (vport ? vport->vpi : 0),
  12408. pmbox->mbxCommand,
  12409. pmbox->mbxStatus);
  12410. /* clear mailbox attention bit */
  12411. work_ha_copy &= ~HA_MBATT;
  12412. } else {
  12413. phba->sli.mbox_active = NULL;
  12414. spin_unlock_irqrestore(&phba->hbalock, iflag);
  12415. phba->last_completion_time = jiffies;
  12416. del_timer(&phba->sli.mbox_tmo);
  12417. if (pmb->mbox_cmpl) {
  12418. lpfc_sli_pcimem_bcopy(mbox, pmbox,
  12419. MAILBOX_CMD_SIZE);
  12420. if (pmb->out_ext_byte_len &&
  12421. pmb->ext_buf)
  12422. lpfc_sli_pcimem_bcopy(
  12423. phba->mbox_ext,
  12424. pmb->ext_buf,
  12425. pmb->out_ext_byte_len);
  12426. }
  12427. if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
  12428. pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
  12429. lpfc_debugfs_disc_trc(vport,
  12430. LPFC_DISC_TRC_MBOX_VPORT,
  12431. "MBOX dflt rpi: : "
  12432. "status:x%x rpi:x%x",
  12433. (uint32_t)pmbox->mbxStatus,
  12434. pmbox->un.varWords[0], 0);
  12435. if (!pmbox->mbxStatus) {
  12436. mp = pmb->ctx_buf;
  12437. ndlp = pmb->ctx_ndlp;
  12438. /* Reg_LOGIN of dflt RPI was
  12439. * successful. new lets get
  12440. * rid of the RPI using the
  12441. * same mbox buffer.
  12442. */
  12443. lpfc_unreg_login(phba,
  12444. vport->vpi,
  12445. pmbox->un.varWords[0],
  12446. pmb);
  12447. pmb->mbox_cmpl =
  12448. lpfc_mbx_cmpl_dflt_rpi;
  12449. pmb->ctx_buf = mp;
  12450. pmb->ctx_ndlp = ndlp;
  12451. pmb->vport = vport;
  12452. rc = lpfc_sli_issue_mbox(phba,
  12453. pmb,
  12454. MBX_NOWAIT);
  12455. if (rc != MBX_BUSY)
  12456. lpfc_printf_log(phba,
  12457. KERN_ERR,
  12458. LOG_TRACE_EVENT,
  12459. "0350 rc should have"
  12460. "been MBX_BUSY\n");
  12461. if (rc != MBX_NOT_FINISHED)
  12462. goto send_current_mbox;
  12463. }
  12464. }
  12465. spin_lock_irqsave(
  12466. &phba->pport->work_port_lock,
  12467. iflag);
  12468. phba->pport->work_port_events &=
  12469. ~WORKER_MBOX_TMO;
  12470. spin_unlock_irqrestore(
  12471. &phba->pport->work_port_lock,
  12472. iflag);
  12473. /* Do NOT queue MBX_HEARTBEAT to the worker
  12474. * thread for processing.
  12475. */
  12476. if (pmbox->mbxCommand == MBX_HEARTBEAT) {
  12477. /* Process mbox now */
  12478. phba->sli.mbox_active = NULL;
  12479. phba->sli.sli_flag &=
  12480. ~LPFC_SLI_MBOX_ACTIVE;
  12481. if (pmb->mbox_cmpl)
  12482. pmb->mbox_cmpl(phba, pmb);
  12483. } else {
  12484. /* Queue to worker thread to process */
  12485. lpfc_mbox_cmpl_put(phba, pmb);
  12486. }
  12487. }
  12488. } else
  12489. spin_unlock_irqrestore(&phba->hbalock, iflag);
  12490. if ((work_ha_copy & HA_MBATT) &&
  12491. (phba->sli.mbox_active == NULL)) {
  12492. send_current_mbox:
  12493. /* Process next mailbox command if there is one */
  12494. do {
  12495. rc = lpfc_sli_issue_mbox(phba, NULL,
  12496. MBX_NOWAIT);
  12497. } while (rc == MBX_NOT_FINISHED);
  12498. if (rc != MBX_SUCCESS)
  12499. lpfc_printf_log(phba, KERN_ERR,
  12500. LOG_TRACE_EVENT,
  12501. "0349 rc should be "
  12502. "MBX_SUCCESS\n");
  12503. }
  12504. spin_lock_irqsave(&phba->hbalock, iflag);
  12505. phba->work_ha |= work_ha_copy;
  12506. spin_unlock_irqrestore(&phba->hbalock, iflag);
  12507. lpfc_worker_wake_up(phba);
  12508. }
  12509. return IRQ_HANDLED;
  12510. unplug_error:
  12511. spin_unlock_irqrestore(&phba->hbalock, iflag);
  12512. return IRQ_HANDLED;
  12513. } /* lpfc_sli_sp_intr_handler */
  12514. /**
  12515. * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
  12516. * @irq: Interrupt number.
  12517. * @dev_id: The device context pointer.
  12518. *
  12519. * This function is directly called from the PCI layer as an interrupt
  12520. * service routine when device with SLI-3 interface spec is enabled with
  12521. * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
  12522. * ring event in the HBA. However, when the device is enabled with either
  12523. * MSI or Pin-IRQ interrupt mode, this function is called as part of the
  12524. * device-level interrupt handler. When the PCI slot is in error recovery
  12525. * or the HBA is undergoing initialization, the interrupt handler will not
  12526. * process the interrupt. The SCSI FCP fast-path ring event are handled in
  12527. * the intrrupt context. This function is called without any lock held.
  12528. * It gets the hbalock to access and update SLI data structures.
  12529. *
  12530. * This function returns IRQ_HANDLED when interrupt is handled else it
  12531. * returns IRQ_NONE.
  12532. **/
  12533. irqreturn_t
  12534. lpfc_sli_fp_intr_handler(int irq, void *dev_id)
  12535. {
  12536. struct lpfc_hba *phba;
  12537. uint32_t ha_copy;
  12538. unsigned long status;
  12539. unsigned long iflag;
  12540. struct lpfc_sli_ring *pring;
  12541. /* Get the driver's phba structure from the dev_id and
  12542. * assume the HBA is not interrupting.
  12543. */
  12544. phba = (struct lpfc_hba *) dev_id;
  12545. if (unlikely(!phba))
  12546. return IRQ_NONE;
  12547. /*
  12548. * Stuff needs to be attented to when this function is invoked as an
  12549. * individual interrupt handler in MSI-X multi-message interrupt mode
  12550. */
  12551. if (phba->intr_type == MSIX) {
  12552. /* Check device state for handling interrupt */
  12553. if (lpfc_intr_state_check(phba))
  12554. return IRQ_NONE;
  12555. /* Need to read HA REG for FCP ring and other ring events */
  12556. if (lpfc_readl(phba->HAregaddr, &ha_copy))
  12557. return IRQ_HANDLED;
  12558. /*
  12559. * If there is deferred error attention, do not check for
  12560. * any interrupt.
  12561. */
  12562. if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
  12563. return IRQ_NONE;
  12564. /* Clear up only attention source related to fast-path */
  12565. spin_lock_irqsave(&phba->hbalock, iflag);
  12566. writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
  12567. phba->HAregaddr);
  12568. readl(phba->HAregaddr); /* flush */
  12569. spin_unlock_irqrestore(&phba->hbalock, iflag);
  12570. } else
  12571. ha_copy = phba->ha_copy;
  12572. /*
  12573. * Process all events on FCP ring. Take the optimized path for FCP IO.
  12574. */
  12575. ha_copy &= ~(phba->work_ha_mask);
  12576. status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
  12577. status >>= (4*LPFC_FCP_RING);
  12578. pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
  12579. if (status & HA_RXMASK)
  12580. lpfc_sli_handle_fast_ring_event(phba, pring, status);
  12581. if (phba->cfg_multi_ring_support == 2) {
  12582. /*
  12583. * Process all events on extra ring. Take the optimized path
  12584. * for extra ring IO.
  12585. */
  12586. status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
  12587. status >>= (4*LPFC_EXTRA_RING);
  12588. if (status & HA_RXMASK) {
  12589. lpfc_sli_handle_fast_ring_event(phba,
  12590. &phba->sli.sli3_ring[LPFC_EXTRA_RING],
  12591. status);
  12592. }
  12593. }
  12594. return IRQ_HANDLED;
  12595. } /* lpfc_sli_fp_intr_handler */
  12596. /**
  12597. * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
  12598. * @irq: Interrupt number.
  12599. * @dev_id: The device context pointer.
  12600. *
  12601. * This function is the HBA device-level interrupt handler to device with
  12602. * SLI-3 interface spec, called from the PCI layer when either MSI or
  12603. * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
  12604. * requires driver attention. This function invokes the slow-path interrupt
  12605. * attention handling function and fast-path interrupt attention handling
  12606. * function in turn to process the relevant HBA attention events. This
  12607. * function is called without any lock held. It gets the hbalock to access
  12608. * and update SLI data structures.
  12609. *
  12610. * This function returns IRQ_HANDLED when interrupt is handled, else it
  12611. * returns IRQ_NONE.
  12612. **/
  12613. irqreturn_t
  12614. lpfc_sli_intr_handler(int irq, void *dev_id)
  12615. {
  12616. struct lpfc_hba *phba;
  12617. irqreturn_t sp_irq_rc, fp_irq_rc;
  12618. unsigned long status1, status2;
  12619. uint32_t hc_copy;
  12620. /*
  12621. * Get the driver's phba structure from the dev_id and
  12622. * assume the HBA is not interrupting.
  12623. */
  12624. phba = (struct lpfc_hba *) dev_id;
  12625. if (unlikely(!phba))
  12626. return IRQ_NONE;
  12627. /* Check device state for handling interrupt */
  12628. if (lpfc_intr_state_check(phba))
  12629. return IRQ_NONE;
  12630. spin_lock(&phba->hbalock);
  12631. if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
  12632. spin_unlock(&phba->hbalock);
  12633. return IRQ_HANDLED;
  12634. }
  12635. if (unlikely(!phba->ha_copy)) {
  12636. spin_unlock(&phba->hbalock);
  12637. return IRQ_NONE;
  12638. } else if (phba->ha_copy & HA_ERATT) {
  12639. if (test_and_set_bit(HBA_ERATT_HANDLED, &phba->hba_flag))
  12640. /* ERATT polling has handled ERATT */
  12641. phba->ha_copy &= ~HA_ERATT;
  12642. }
  12643. /*
  12644. * If there is deferred error attention, do not check for any interrupt.
  12645. */
  12646. if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
  12647. spin_unlock(&phba->hbalock);
  12648. return IRQ_NONE;
  12649. }
  12650. /* Clear attention sources except link and error attentions */
  12651. if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
  12652. spin_unlock(&phba->hbalock);
  12653. return IRQ_HANDLED;
  12654. }
  12655. writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
  12656. | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
  12657. phba->HCregaddr);
  12658. writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
  12659. writel(hc_copy, phba->HCregaddr);
  12660. readl(phba->HAregaddr); /* flush */
  12661. spin_unlock(&phba->hbalock);
  12662. /*
  12663. * Invokes slow-path host attention interrupt handling as appropriate.
  12664. */
  12665. /* status of events with mailbox and link attention */
  12666. status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
  12667. /* status of events with ELS ring */
  12668. status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
  12669. status2 >>= (4*LPFC_ELS_RING);
  12670. if (status1 || (status2 & HA_RXMASK))
  12671. sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
  12672. else
  12673. sp_irq_rc = IRQ_NONE;
  12674. /*
  12675. * Invoke fast-path host attention interrupt handling as appropriate.
  12676. */
  12677. /* status of events with FCP ring */
  12678. status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
  12679. status1 >>= (4*LPFC_FCP_RING);
  12680. /* status of events with extra ring */
  12681. if (phba->cfg_multi_ring_support == 2) {
  12682. status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
  12683. status2 >>= (4*LPFC_EXTRA_RING);
  12684. } else
  12685. status2 = 0;
  12686. if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
  12687. fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
  12688. else
  12689. fp_irq_rc = IRQ_NONE;
  12690. /* Return device-level interrupt handling status */
  12691. return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
  12692. } /* lpfc_sli_intr_handler */
  12693. /**
  12694. * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
  12695. * @phba: pointer to lpfc hba data structure.
  12696. *
  12697. * This routine is invoked by the worker thread to process all the pending
  12698. * SLI4 els abort xri events.
  12699. **/
  12700. void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
  12701. {
  12702. struct lpfc_cq_event *cq_event;
  12703. unsigned long iflags;
  12704. /* First, declare the els xri abort event has been handled */
  12705. clear_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag);
  12706. /* Now, handle all the els xri abort events */
  12707. spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
  12708. while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
  12709. /* Get the first event from the head of the event queue */
  12710. list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
  12711. cq_event, struct lpfc_cq_event, list);
  12712. spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
  12713. iflags);
  12714. /* Notify aborted XRI for ELS work queue */
  12715. lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
  12716. /* Free the event processed back to the free pool */
  12717. lpfc_sli4_cq_event_release(phba, cq_event);
  12718. spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
  12719. iflags);
  12720. }
  12721. spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
  12722. }
  12723. /**
  12724. * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
  12725. * @phba: Pointer to HBA context object.
  12726. * @irspiocbq: Pointer to work-queue completion queue entry.
  12727. *
  12728. * This routine handles an ELS work-queue completion event and construct
  12729. * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
  12730. * discovery engine to handle.
  12731. *
  12732. * Return: Pointer to the receive IOCBQ, NULL otherwise.
  12733. **/
  12734. static struct lpfc_iocbq *
  12735. lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
  12736. struct lpfc_iocbq *irspiocbq)
  12737. {
  12738. struct lpfc_sli_ring *pring;
  12739. struct lpfc_iocbq *cmdiocbq;
  12740. struct lpfc_wcqe_complete *wcqe;
  12741. unsigned long iflags;
  12742. pring = lpfc_phba_elsring(phba);
  12743. if (unlikely(!pring))
  12744. return NULL;
  12745. wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
  12746. spin_lock_irqsave(&pring->ring_lock, iflags);
  12747. pring->stats.iocb_event++;
  12748. /* Look up the ELS command IOCB and create pseudo response IOCB */
  12749. cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
  12750. bf_get(lpfc_wcqe_c_request_tag, wcqe));
  12751. if (unlikely(!cmdiocbq)) {
  12752. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  12753. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  12754. "0386 ELS complete with no corresponding "
  12755. "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
  12756. wcqe->word0, wcqe->total_data_placed,
  12757. wcqe->parameter, wcqe->word3);
  12758. lpfc_sli_release_iocbq(phba, irspiocbq);
  12759. return NULL;
  12760. }
  12761. memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
  12762. memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
  12763. /* Put the iocb back on the txcmplq */
  12764. lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
  12765. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  12766. if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
  12767. spin_lock_irqsave(&phba->hbalock, iflags);
  12768. irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
  12769. spin_unlock_irqrestore(&phba->hbalock, iflags);
  12770. }
  12771. return irspiocbq;
  12772. }
  12773. inline struct lpfc_cq_event *
  12774. lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
  12775. {
  12776. struct lpfc_cq_event *cq_event;
  12777. /* Allocate a new internal CQ_EVENT entry */
  12778. cq_event = lpfc_sli4_cq_event_alloc(phba);
  12779. if (!cq_event) {
  12780. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  12781. "0602 Failed to alloc CQ_EVENT entry\n");
  12782. return NULL;
  12783. }
  12784. /* Move the CQE into the event */
  12785. memcpy(&cq_event->cqe, entry, size);
  12786. return cq_event;
  12787. }
  12788. /**
  12789. * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
  12790. * @phba: Pointer to HBA context object.
  12791. * @mcqe: Pointer to mailbox completion queue entry.
  12792. *
  12793. * This routine process a mailbox completion queue entry with asynchronous
  12794. * event.
  12795. *
  12796. * Return: true if work posted to worker thread, otherwise false.
  12797. **/
  12798. static bool
  12799. lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
  12800. {
  12801. struct lpfc_cq_event *cq_event;
  12802. unsigned long iflags;
  12803. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  12804. "0392 Async Event: word0:x%x, word1:x%x, "
  12805. "word2:x%x, word3:x%x\n", mcqe->word0,
  12806. mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
  12807. cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
  12808. if (!cq_event)
  12809. return false;
  12810. spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
  12811. list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
  12812. spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
  12813. /* Set the async event flag */
  12814. set_bit(ASYNC_EVENT, &phba->hba_flag);
  12815. return true;
  12816. }
  12817. /**
  12818. * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
  12819. * @phba: Pointer to HBA context object.
  12820. * @mcqe: Pointer to mailbox completion queue entry.
  12821. *
  12822. * This routine process a mailbox completion queue entry with mailbox
  12823. * completion event.
  12824. *
  12825. * Return: true if work posted to worker thread, otherwise false.
  12826. **/
  12827. static bool
  12828. lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
  12829. {
  12830. uint32_t mcqe_status;
  12831. MAILBOX_t *mbox, *pmbox;
  12832. struct lpfc_mqe *mqe;
  12833. struct lpfc_vport *vport;
  12834. struct lpfc_nodelist *ndlp;
  12835. struct lpfc_dmabuf *mp;
  12836. unsigned long iflags;
  12837. LPFC_MBOXQ_t *pmb;
  12838. bool workposted = false;
  12839. int rc;
  12840. /* If not a mailbox complete MCQE, out by checking mailbox consume */
  12841. if (!bf_get(lpfc_trailer_completed, mcqe))
  12842. goto out_no_mqe_complete;
  12843. /* Get the reference to the active mbox command */
  12844. spin_lock_irqsave(&phba->hbalock, iflags);
  12845. pmb = phba->sli.mbox_active;
  12846. if (unlikely(!pmb)) {
  12847. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  12848. "1832 No pending MBOX command to handle\n");
  12849. spin_unlock_irqrestore(&phba->hbalock, iflags);
  12850. goto out_no_mqe_complete;
  12851. }
  12852. spin_unlock_irqrestore(&phba->hbalock, iflags);
  12853. mqe = &pmb->u.mqe;
  12854. pmbox = (MAILBOX_t *)&pmb->u.mqe;
  12855. mbox = phba->mbox;
  12856. vport = pmb->vport;
  12857. /* Reset heartbeat timer */
  12858. phba->last_completion_time = jiffies;
  12859. del_timer(&phba->sli.mbox_tmo);
  12860. /* Move mbox data to caller's mailbox region, do endian swapping */
  12861. if (pmb->mbox_cmpl && mbox)
  12862. lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
  12863. /*
  12864. * For mcqe errors, conditionally move a modified error code to
  12865. * the mbox so that the error will not be missed.
  12866. */
  12867. mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
  12868. if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
  12869. if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
  12870. bf_set(lpfc_mqe_status, mqe,
  12871. (LPFC_MBX_ERROR_RANGE | mcqe_status));
  12872. }
  12873. if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
  12874. pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
  12875. lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
  12876. "MBOX dflt rpi: status:x%x rpi:x%x",
  12877. mcqe_status,
  12878. pmbox->un.varWords[0], 0);
  12879. if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
  12880. mp = pmb->ctx_buf;
  12881. ndlp = pmb->ctx_ndlp;
  12882. /* Reg_LOGIN of dflt RPI was successful. Mark the
  12883. * node as having an UNREG_LOGIN in progress to stop
  12884. * an unsolicited PLOGI from the same NPortId from
  12885. * starting another mailbox transaction.
  12886. */
  12887. spin_lock_irqsave(&ndlp->lock, iflags);
  12888. ndlp->nlp_flag |= NLP_UNREG_INP;
  12889. spin_unlock_irqrestore(&ndlp->lock, iflags);
  12890. lpfc_unreg_login(phba, vport->vpi,
  12891. pmbox->un.varWords[0], pmb);
  12892. pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
  12893. pmb->ctx_buf = mp;
  12894. /* No reference taken here. This is a default
  12895. * RPI reg/immediate unreg cycle. The reference was
  12896. * taken in the reg rpi path and is released when
  12897. * this mailbox completes.
  12898. */
  12899. pmb->ctx_ndlp = ndlp;
  12900. pmb->vport = vport;
  12901. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  12902. if (rc != MBX_BUSY)
  12903. lpfc_printf_log(phba, KERN_ERR,
  12904. LOG_TRACE_EVENT,
  12905. "0385 rc should "
  12906. "have been MBX_BUSY\n");
  12907. if (rc != MBX_NOT_FINISHED)
  12908. goto send_current_mbox;
  12909. }
  12910. }
  12911. spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
  12912. phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
  12913. spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
  12914. /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
  12915. if (pmbox->mbxCommand == MBX_HEARTBEAT) {
  12916. spin_lock_irqsave(&phba->hbalock, iflags);
  12917. /* Release the mailbox command posting token */
  12918. phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  12919. phba->sli.mbox_active = NULL;
  12920. if (bf_get(lpfc_trailer_consumed, mcqe))
  12921. lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
  12922. spin_unlock_irqrestore(&phba->hbalock, iflags);
  12923. /* Post the next mbox command, if there is one */
  12924. lpfc_sli4_post_async_mbox(phba);
  12925. /* Process cmpl now */
  12926. if (pmb->mbox_cmpl)
  12927. pmb->mbox_cmpl(phba, pmb);
  12928. return false;
  12929. }
  12930. /* There is mailbox completion work to queue to the worker thread */
  12931. spin_lock_irqsave(&phba->hbalock, iflags);
  12932. __lpfc_mbox_cmpl_put(phba, pmb);
  12933. phba->work_ha |= HA_MBATT;
  12934. spin_unlock_irqrestore(&phba->hbalock, iflags);
  12935. workposted = true;
  12936. send_current_mbox:
  12937. spin_lock_irqsave(&phba->hbalock, iflags);
  12938. /* Release the mailbox command posting token */
  12939. phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  12940. /* Setting active mailbox pointer need to be in sync to flag clear */
  12941. phba->sli.mbox_active = NULL;
  12942. if (bf_get(lpfc_trailer_consumed, mcqe))
  12943. lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
  12944. spin_unlock_irqrestore(&phba->hbalock, iflags);
  12945. /* Wake up worker thread to post the next pending mailbox command */
  12946. lpfc_worker_wake_up(phba);
  12947. return workposted;
  12948. out_no_mqe_complete:
  12949. spin_lock_irqsave(&phba->hbalock, iflags);
  12950. if (bf_get(lpfc_trailer_consumed, mcqe))
  12951. lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
  12952. spin_unlock_irqrestore(&phba->hbalock, iflags);
  12953. return false;
  12954. }
  12955. /**
  12956. * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
  12957. * @phba: Pointer to HBA context object.
  12958. * @cq: Pointer to associated CQ
  12959. * @cqe: Pointer to mailbox completion queue entry.
  12960. *
  12961. * This routine process a mailbox completion queue entry, it invokes the
  12962. * proper mailbox complete handling or asynchronous event handling routine
  12963. * according to the MCQE's async bit.
  12964. *
  12965. * Return: true if work posted to worker thread, otherwise false.
  12966. **/
  12967. static bool
  12968. lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  12969. struct lpfc_cqe *cqe)
  12970. {
  12971. struct lpfc_mcqe mcqe;
  12972. bool workposted;
  12973. cq->CQ_mbox++;
  12974. /* Copy the mailbox MCQE and convert endian order as needed */
  12975. lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
  12976. /* Invoke the proper event handling routine */
  12977. if (!bf_get(lpfc_trailer_async, &mcqe))
  12978. workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
  12979. else
  12980. workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
  12981. return workposted;
  12982. }
  12983. /**
  12984. * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
  12985. * @phba: Pointer to HBA context object.
  12986. * @cq: Pointer to associated CQ
  12987. * @wcqe: Pointer to work-queue completion queue entry.
  12988. *
  12989. * This routine handles an ELS work-queue completion event.
  12990. *
  12991. * Return: true if work posted to worker thread, otherwise false.
  12992. **/
  12993. static bool
  12994. lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  12995. struct lpfc_wcqe_complete *wcqe)
  12996. {
  12997. struct lpfc_iocbq *irspiocbq;
  12998. unsigned long iflags;
  12999. struct lpfc_sli_ring *pring = cq->pring;
  13000. int txq_cnt = 0;
  13001. int txcmplq_cnt = 0;
  13002. /* Check for response status */
  13003. if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
  13004. /* Log the error status */
  13005. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  13006. "0357 ELS CQE error: status=x%x: "
  13007. "CQE: %08x %08x %08x %08x\n",
  13008. bf_get(lpfc_wcqe_c_status, wcqe),
  13009. wcqe->word0, wcqe->total_data_placed,
  13010. wcqe->parameter, wcqe->word3);
  13011. }
  13012. /* Get an irspiocbq for later ELS response processing use */
  13013. irspiocbq = lpfc_sli_get_iocbq(phba);
  13014. if (!irspiocbq) {
  13015. if (!list_empty(&pring->txq))
  13016. txq_cnt++;
  13017. if (!list_empty(&pring->txcmplq))
  13018. txcmplq_cnt++;
  13019. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13020. "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
  13021. "els_txcmplq_cnt=%d\n",
  13022. txq_cnt, phba->iocb_cnt,
  13023. txcmplq_cnt);
  13024. return false;
  13025. }
  13026. /* Save off the slow-path queue event for work thread to process */
  13027. memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
  13028. spin_lock_irqsave(&phba->hbalock, iflags);
  13029. list_add_tail(&irspiocbq->cq_event.list,
  13030. &phba->sli4_hba.sp_queue_event);
  13031. spin_unlock_irqrestore(&phba->hbalock, iflags);
  13032. set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
  13033. return true;
  13034. }
  13035. /**
  13036. * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
  13037. * @phba: Pointer to HBA context object.
  13038. * @wcqe: Pointer to work-queue completion queue entry.
  13039. *
  13040. * This routine handles slow-path WQ entry consumed event by invoking the
  13041. * proper WQ release routine to the slow-path WQ.
  13042. **/
  13043. static void
  13044. lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
  13045. struct lpfc_wcqe_release *wcqe)
  13046. {
  13047. /* sanity check on queue memory */
  13048. if (unlikely(!phba->sli4_hba.els_wq))
  13049. return;
  13050. /* Check for the slow-path ELS work queue */
  13051. if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
  13052. lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
  13053. bf_get(lpfc_wcqe_r_wqe_index, wcqe));
  13054. else
  13055. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  13056. "2579 Slow-path wqe consume event carries "
  13057. "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
  13058. bf_get(lpfc_wcqe_r_wqe_index, wcqe),
  13059. phba->sli4_hba.els_wq->queue_id);
  13060. }
  13061. /**
  13062. * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
  13063. * @phba: Pointer to HBA context object.
  13064. * @cq: Pointer to a WQ completion queue.
  13065. * @wcqe: Pointer to work-queue completion queue entry.
  13066. *
  13067. * This routine handles an XRI abort event.
  13068. *
  13069. * Return: true if work posted to worker thread, otherwise false.
  13070. **/
  13071. static bool
  13072. lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
  13073. struct lpfc_queue *cq,
  13074. struct sli4_wcqe_xri_aborted *wcqe)
  13075. {
  13076. bool workposted = false;
  13077. struct lpfc_cq_event *cq_event;
  13078. unsigned long iflags;
  13079. switch (cq->subtype) {
  13080. case LPFC_IO:
  13081. lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
  13082. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  13083. /* Notify aborted XRI for NVME work queue */
  13084. if (phba->nvmet_support)
  13085. lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
  13086. }
  13087. workposted = false;
  13088. break;
  13089. case LPFC_NVME_LS: /* NVME LS uses ELS resources */
  13090. case LPFC_ELS:
  13091. cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
  13092. if (!cq_event) {
  13093. workposted = false;
  13094. break;
  13095. }
  13096. cq_event->hdwq = cq->hdwq;
  13097. spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
  13098. iflags);
  13099. list_add_tail(&cq_event->list,
  13100. &phba->sli4_hba.sp_els_xri_aborted_work_queue);
  13101. /* Set the els xri abort event flag */
  13102. set_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag);
  13103. spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
  13104. iflags);
  13105. workposted = true;
  13106. break;
  13107. default:
  13108. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13109. "0603 Invalid CQ subtype %d: "
  13110. "%08x %08x %08x %08x\n",
  13111. cq->subtype, wcqe->word0, wcqe->parameter,
  13112. wcqe->word2, wcqe->word3);
  13113. workposted = false;
  13114. break;
  13115. }
  13116. return workposted;
  13117. }
  13118. #define FC_RCTL_MDS_DIAGS 0xF4
  13119. /**
  13120. * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
  13121. * @phba: Pointer to HBA context object.
  13122. * @rcqe: Pointer to receive-queue completion queue entry.
  13123. *
  13124. * This routine process a receive-queue completion queue entry.
  13125. *
  13126. * Return: true if work posted to worker thread, otherwise false.
  13127. **/
  13128. static bool
  13129. lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
  13130. {
  13131. bool workposted = false;
  13132. struct fc_frame_header *fc_hdr;
  13133. struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
  13134. struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
  13135. struct lpfc_nvmet_tgtport *tgtp;
  13136. struct hbq_dmabuf *dma_buf;
  13137. uint32_t status, rq_id;
  13138. unsigned long iflags;
  13139. /* sanity check on queue memory */
  13140. if (unlikely(!hrq) || unlikely(!drq))
  13141. return workposted;
  13142. if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
  13143. rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
  13144. else
  13145. rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
  13146. if (rq_id != hrq->queue_id)
  13147. goto out;
  13148. status = bf_get(lpfc_rcqe_status, rcqe);
  13149. switch (status) {
  13150. case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
  13151. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13152. "2537 Receive Frame Truncated!!\n");
  13153. fallthrough;
  13154. case FC_STATUS_RQ_SUCCESS:
  13155. spin_lock_irqsave(&phba->hbalock, iflags);
  13156. lpfc_sli4_rq_release(hrq, drq);
  13157. dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
  13158. if (!dma_buf) {
  13159. hrq->RQ_no_buf_found++;
  13160. spin_unlock_irqrestore(&phba->hbalock, iflags);
  13161. goto out;
  13162. }
  13163. hrq->RQ_rcv_buf++;
  13164. hrq->RQ_buf_posted--;
  13165. memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
  13166. fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
  13167. if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
  13168. fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
  13169. spin_unlock_irqrestore(&phba->hbalock, iflags);
  13170. /* Handle MDS Loopback frames */
  13171. if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
  13172. lpfc_sli4_handle_mds_loopback(phba->pport,
  13173. dma_buf);
  13174. else
  13175. lpfc_in_buf_free(phba, &dma_buf->dbuf);
  13176. break;
  13177. }
  13178. /* save off the frame for the work thread to process */
  13179. list_add_tail(&dma_buf->cq_event.list,
  13180. &phba->sli4_hba.sp_queue_event);
  13181. spin_unlock_irqrestore(&phba->hbalock, iflags);
  13182. /* Frame received */
  13183. set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
  13184. workposted = true;
  13185. break;
  13186. case FC_STATUS_INSUFF_BUF_FRM_DISC:
  13187. if (phba->nvmet_support) {
  13188. tgtp = phba->targetport->private;
  13189. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13190. "6402 RQE Error x%x, posted %d err_cnt "
  13191. "%d: %x %x %x\n",
  13192. status, hrq->RQ_buf_posted,
  13193. hrq->RQ_no_posted_buf,
  13194. atomic_read(&tgtp->rcv_fcp_cmd_in),
  13195. atomic_read(&tgtp->rcv_fcp_cmd_out),
  13196. atomic_read(&tgtp->xmt_fcp_release));
  13197. }
  13198. fallthrough;
  13199. case FC_STATUS_INSUFF_BUF_NEED_BUF:
  13200. hrq->RQ_no_posted_buf++;
  13201. /* Post more buffers if possible */
  13202. set_bit(HBA_POST_RECEIVE_BUFFER, &phba->hba_flag);
  13203. workposted = true;
  13204. break;
  13205. case FC_STATUS_RQ_DMA_FAILURE:
  13206. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13207. "2564 RQE DMA Error x%x, x%08x x%08x x%08x "
  13208. "x%08x\n",
  13209. status, rcqe->word0, rcqe->word1,
  13210. rcqe->word2, rcqe->word3);
  13211. /* If IV set, no further recovery */
  13212. if (bf_get(lpfc_rcqe_iv, rcqe))
  13213. break;
  13214. /* recycle consumed resource */
  13215. spin_lock_irqsave(&phba->hbalock, iflags);
  13216. lpfc_sli4_rq_release(hrq, drq);
  13217. dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
  13218. if (!dma_buf) {
  13219. hrq->RQ_no_buf_found++;
  13220. spin_unlock_irqrestore(&phba->hbalock, iflags);
  13221. break;
  13222. }
  13223. hrq->RQ_rcv_buf++;
  13224. hrq->RQ_buf_posted--;
  13225. spin_unlock_irqrestore(&phba->hbalock, iflags);
  13226. lpfc_in_buf_free(phba, &dma_buf->dbuf);
  13227. break;
  13228. default:
  13229. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13230. "2565 Unexpected RQE Status x%x, w0-3 x%08x "
  13231. "x%08x x%08x x%08x\n",
  13232. status, rcqe->word0, rcqe->word1,
  13233. rcqe->word2, rcqe->word3);
  13234. break;
  13235. }
  13236. out:
  13237. return workposted;
  13238. }
  13239. /**
  13240. * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
  13241. * @phba: Pointer to HBA context object.
  13242. * @cq: Pointer to the completion queue.
  13243. * @cqe: Pointer to a completion queue entry.
  13244. *
  13245. * This routine process a slow-path work-queue or receive queue completion queue
  13246. * entry.
  13247. *
  13248. * Return: true if work posted to worker thread, otherwise false.
  13249. **/
  13250. static bool
  13251. lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  13252. struct lpfc_cqe *cqe)
  13253. {
  13254. struct lpfc_cqe cqevt;
  13255. bool workposted = false;
  13256. /* Copy the work queue CQE and convert endian order if needed */
  13257. lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
  13258. /* Check and process for different type of WCQE and dispatch */
  13259. switch (bf_get(lpfc_cqe_code, &cqevt)) {
  13260. case CQE_CODE_COMPL_WQE:
  13261. /* Process the WQ/RQ complete event */
  13262. phba->last_completion_time = jiffies;
  13263. workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
  13264. (struct lpfc_wcqe_complete *)&cqevt);
  13265. break;
  13266. case CQE_CODE_RELEASE_WQE:
  13267. /* Process the WQ release event */
  13268. lpfc_sli4_sp_handle_rel_wcqe(phba,
  13269. (struct lpfc_wcqe_release *)&cqevt);
  13270. break;
  13271. case CQE_CODE_XRI_ABORTED:
  13272. /* Process the WQ XRI abort event */
  13273. phba->last_completion_time = jiffies;
  13274. workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
  13275. (struct sli4_wcqe_xri_aborted *)&cqevt);
  13276. break;
  13277. case CQE_CODE_RECEIVE:
  13278. case CQE_CODE_RECEIVE_V1:
  13279. /* Process the RQ event */
  13280. phba->last_completion_time = jiffies;
  13281. workposted = lpfc_sli4_sp_handle_rcqe(phba,
  13282. (struct lpfc_rcqe *)&cqevt);
  13283. break;
  13284. default:
  13285. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13286. "0388 Not a valid WCQE code: x%x\n",
  13287. bf_get(lpfc_cqe_code, &cqevt));
  13288. break;
  13289. }
  13290. return workposted;
  13291. }
  13292. /**
  13293. * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
  13294. * @phba: Pointer to HBA context object.
  13295. * @eqe: Pointer to fast-path event queue entry.
  13296. * @speq: Pointer to slow-path event queue.
  13297. *
  13298. * This routine process a event queue entry from the slow-path event queue.
  13299. * It will check the MajorCode and MinorCode to determine this is for a
  13300. * completion event on a completion queue, if not, an error shall be logged
  13301. * and just return. Otherwise, it will get to the corresponding completion
  13302. * queue and process all the entries on that completion queue, rearm the
  13303. * completion queue, and then return.
  13304. *
  13305. **/
  13306. static void
  13307. lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
  13308. struct lpfc_queue *speq)
  13309. {
  13310. struct lpfc_queue *cq = NULL, *childq;
  13311. uint16_t cqid;
  13312. int ret = 0;
  13313. /* Get the reference to the corresponding CQ */
  13314. cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
  13315. list_for_each_entry(childq, &speq->child_list, list) {
  13316. if (childq->queue_id == cqid) {
  13317. cq = childq;
  13318. break;
  13319. }
  13320. }
  13321. if (unlikely(!cq)) {
  13322. if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
  13323. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13324. "0365 Slow-path CQ identifier "
  13325. "(%d) does not exist\n", cqid);
  13326. return;
  13327. }
  13328. /* Save EQ associated with this CQ */
  13329. cq->assoc_qp = speq;
  13330. if (is_kdump_kernel())
  13331. ret = queue_work(phba->wq, &cq->spwork);
  13332. else
  13333. ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
  13334. if (!ret)
  13335. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13336. "0390 Cannot schedule queue work "
  13337. "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
  13338. cqid, cq->queue_id, raw_smp_processor_id());
  13339. }
  13340. /**
  13341. * __lpfc_sli4_process_cq - Process elements of a CQ
  13342. * @phba: Pointer to HBA context object.
  13343. * @cq: Pointer to CQ to be processed
  13344. * @handler: Routine to process each cqe
  13345. * @delay: Pointer to usdelay to set in case of rescheduling of the handler
  13346. *
  13347. * This routine processes completion queue entries in a CQ. While a valid
  13348. * queue element is found, the handler is called. During processing checks
  13349. * are made for periodic doorbell writes to let the hardware know of
  13350. * element consumption.
  13351. *
  13352. * If the max limit on cqes to process is hit, or there are no more valid
  13353. * entries, the loop stops. If we processed a sufficient number of elements,
  13354. * meaning there is sufficient load, rather than rearming and generating
  13355. * another interrupt, a cq rescheduling delay will be set. A delay of 0
  13356. * indicates no rescheduling.
  13357. *
  13358. * Returns True if work scheduled, False otherwise.
  13359. **/
  13360. static bool
  13361. __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
  13362. bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
  13363. struct lpfc_cqe *), unsigned long *delay)
  13364. {
  13365. struct lpfc_cqe *cqe;
  13366. bool workposted = false;
  13367. int count = 0, consumed = 0;
  13368. bool arm = true;
  13369. /* default - no reschedule */
  13370. *delay = 0;
  13371. if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
  13372. goto rearm_and_exit;
  13373. /* Process all the entries to the CQ */
  13374. cq->q_flag = 0;
  13375. cqe = lpfc_sli4_cq_get(cq);
  13376. while (cqe) {
  13377. workposted |= handler(phba, cq, cqe);
  13378. __lpfc_sli4_consume_cqe(phba, cq, cqe);
  13379. consumed++;
  13380. if (!(++count % cq->max_proc_limit))
  13381. break;
  13382. if (!(count % cq->notify_interval)) {
  13383. phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
  13384. LPFC_QUEUE_NOARM);
  13385. consumed = 0;
  13386. cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
  13387. }
  13388. if (count == LPFC_NVMET_CQ_NOTIFY)
  13389. cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
  13390. cqe = lpfc_sli4_cq_get(cq);
  13391. }
  13392. if (count >= phba->cfg_cq_poll_threshold) {
  13393. *delay = 1;
  13394. arm = false;
  13395. }
  13396. /* Track the max number of CQEs processed in 1 EQ */
  13397. if (count > cq->CQ_max_cqe)
  13398. cq->CQ_max_cqe = count;
  13399. cq->assoc_qp->EQ_cqe_cnt += count;
  13400. /* Catch the no cq entry condition */
  13401. if (unlikely(count == 0))
  13402. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  13403. "0369 No entry from completion queue "
  13404. "qid=%d\n", cq->queue_id);
  13405. xchg(&cq->queue_claimed, 0);
  13406. rearm_and_exit:
  13407. phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
  13408. arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
  13409. return workposted;
  13410. }
  13411. /**
  13412. * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
  13413. * @cq: pointer to CQ to process
  13414. *
  13415. * This routine calls the cq processing routine with a handler specific
  13416. * to the type of queue bound to it.
  13417. *
  13418. * The CQ routine returns two values: the first is the calling status,
  13419. * which indicates whether work was queued to the background discovery
  13420. * thread. If true, the routine should wakeup the discovery thread;
  13421. * the second is the delay parameter. If non-zero, rather than rearming
  13422. * the CQ and yet another interrupt, the CQ handler should be queued so
  13423. * that it is processed in a subsequent polling action. The value of
  13424. * the delay indicates when to reschedule it.
  13425. **/
  13426. static void
  13427. __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
  13428. {
  13429. struct lpfc_hba *phba = cq->phba;
  13430. unsigned long delay;
  13431. bool workposted = false;
  13432. int ret = 0;
  13433. /* Process and rearm the CQ */
  13434. switch (cq->type) {
  13435. case LPFC_MCQ:
  13436. workposted |= __lpfc_sli4_process_cq(phba, cq,
  13437. lpfc_sli4_sp_handle_mcqe,
  13438. &delay);
  13439. break;
  13440. case LPFC_WCQ:
  13441. if (cq->subtype == LPFC_IO)
  13442. workposted |= __lpfc_sli4_process_cq(phba, cq,
  13443. lpfc_sli4_fp_handle_cqe,
  13444. &delay);
  13445. else
  13446. workposted |= __lpfc_sli4_process_cq(phba, cq,
  13447. lpfc_sli4_sp_handle_cqe,
  13448. &delay);
  13449. break;
  13450. default:
  13451. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13452. "0370 Invalid completion queue type (%d)\n",
  13453. cq->type);
  13454. return;
  13455. }
  13456. if (delay) {
  13457. if (is_kdump_kernel())
  13458. ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
  13459. delay);
  13460. else
  13461. ret = queue_delayed_work_on(cq->chann, phba->wq,
  13462. &cq->sched_spwork, delay);
  13463. if (!ret)
  13464. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13465. "0394 Cannot schedule queue work "
  13466. "for cqid=%d on CPU %d\n",
  13467. cq->queue_id, cq->chann);
  13468. }
  13469. /* wake up worker thread if there are works to be done */
  13470. if (workposted)
  13471. lpfc_worker_wake_up(phba);
  13472. }
  13473. /**
  13474. * lpfc_sli4_sp_process_cq - slow-path work handler when started by
  13475. * interrupt
  13476. * @work: pointer to work element
  13477. *
  13478. * translates from the work handler and calls the slow-path handler.
  13479. **/
  13480. static void
  13481. lpfc_sli4_sp_process_cq(struct work_struct *work)
  13482. {
  13483. struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
  13484. __lpfc_sli4_sp_process_cq(cq);
  13485. }
  13486. /**
  13487. * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
  13488. * @work: pointer to work element
  13489. *
  13490. * translates from the work handler and calls the slow-path handler.
  13491. **/
  13492. static void
  13493. lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
  13494. {
  13495. struct lpfc_queue *cq = container_of(to_delayed_work(work),
  13496. struct lpfc_queue, sched_spwork);
  13497. __lpfc_sli4_sp_process_cq(cq);
  13498. }
  13499. /**
  13500. * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
  13501. * @phba: Pointer to HBA context object.
  13502. * @cq: Pointer to associated CQ
  13503. * @wcqe: Pointer to work-queue completion queue entry.
  13504. *
  13505. * This routine process a fast-path work queue completion entry from fast-path
  13506. * event queue for FCP command response completion.
  13507. **/
  13508. static void
  13509. lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  13510. struct lpfc_wcqe_complete *wcqe)
  13511. {
  13512. struct lpfc_sli_ring *pring = cq->pring;
  13513. struct lpfc_iocbq *cmdiocbq;
  13514. unsigned long iflags;
  13515. /* Check for response status */
  13516. if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
  13517. /* If resource errors reported from HBA, reduce queue
  13518. * depth of the SCSI device.
  13519. */
  13520. if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
  13521. IOSTAT_LOCAL_REJECT)) &&
  13522. ((wcqe->parameter & IOERR_PARAM_MASK) ==
  13523. IOERR_NO_RESOURCES))
  13524. phba->lpfc_rampdown_queue_depth(phba);
  13525. /* Log the cmpl status */
  13526. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  13527. "0373 FCP CQE cmpl: status=x%x: "
  13528. "CQE: %08x %08x %08x %08x\n",
  13529. bf_get(lpfc_wcqe_c_status, wcqe),
  13530. wcqe->word0, wcqe->total_data_placed,
  13531. wcqe->parameter, wcqe->word3);
  13532. }
  13533. /* Look up the FCP command IOCB and create pseudo response IOCB */
  13534. spin_lock_irqsave(&pring->ring_lock, iflags);
  13535. pring->stats.iocb_event++;
  13536. cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
  13537. bf_get(lpfc_wcqe_c_request_tag, wcqe));
  13538. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  13539. if (unlikely(!cmdiocbq)) {
  13540. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  13541. "0374 FCP complete with no corresponding "
  13542. "cmdiocb: iotag (%d)\n",
  13543. bf_get(lpfc_wcqe_c_request_tag, wcqe));
  13544. return;
  13545. }
  13546. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  13547. cmdiocbq->isr_timestamp = cq->isr_timestamp;
  13548. #endif
  13549. if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
  13550. spin_lock_irqsave(&phba->hbalock, iflags);
  13551. cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
  13552. spin_unlock_irqrestore(&phba->hbalock, iflags);
  13553. }
  13554. if (cmdiocbq->cmd_cmpl) {
  13555. /* For FCP the flag is cleared in cmd_cmpl */
  13556. if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
  13557. cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
  13558. spin_lock_irqsave(&phba->hbalock, iflags);
  13559. cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
  13560. spin_unlock_irqrestore(&phba->hbalock, iflags);
  13561. }
  13562. /* Pass the cmd_iocb and the wcqe to the upper layer */
  13563. memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
  13564. sizeof(struct lpfc_wcqe_complete));
  13565. cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
  13566. } else {
  13567. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  13568. "0375 FCP cmdiocb not callback function "
  13569. "iotag: (%d)\n",
  13570. bf_get(lpfc_wcqe_c_request_tag, wcqe));
  13571. }
  13572. }
  13573. /**
  13574. * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
  13575. * @phba: Pointer to HBA context object.
  13576. * @cq: Pointer to completion queue.
  13577. * @wcqe: Pointer to work-queue completion queue entry.
  13578. *
  13579. * This routine handles an fast-path WQ entry consumed event by invoking the
  13580. * proper WQ release routine to the slow-path WQ.
  13581. **/
  13582. static void
  13583. lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  13584. struct lpfc_wcqe_release *wcqe)
  13585. {
  13586. struct lpfc_queue *childwq;
  13587. bool wqid_matched = false;
  13588. uint16_t hba_wqid;
  13589. /* Check for fast-path FCP work queue release */
  13590. hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
  13591. list_for_each_entry(childwq, &cq->child_list, list) {
  13592. if (childwq->queue_id == hba_wqid) {
  13593. lpfc_sli4_wq_release(childwq,
  13594. bf_get(lpfc_wcqe_r_wqe_index, wcqe));
  13595. if (childwq->q_flag & HBA_NVMET_WQFULL)
  13596. lpfc_nvmet_wqfull_process(phba, childwq);
  13597. wqid_matched = true;
  13598. break;
  13599. }
  13600. }
  13601. /* Report warning log message if no match found */
  13602. if (wqid_matched != true)
  13603. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  13604. "2580 Fast-path wqe consume event carries "
  13605. "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
  13606. }
  13607. /**
  13608. * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
  13609. * @phba: Pointer to HBA context object.
  13610. * @cq: Pointer to completion queue.
  13611. * @rcqe: Pointer to receive-queue completion queue entry.
  13612. *
  13613. * This routine process a receive-queue completion queue entry.
  13614. *
  13615. * Return: true if work posted to worker thread, otherwise false.
  13616. **/
  13617. static bool
  13618. lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  13619. struct lpfc_rcqe *rcqe)
  13620. {
  13621. bool workposted = false;
  13622. struct lpfc_queue *hrq;
  13623. struct lpfc_queue *drq;
  13624. struct rqb_dmabuf *dma_buf;
  13625. struct fc_frame_header *fc_hdr;
  13626. struct lpfc_nvmet_tgtport *tgtp;
  13627. uint32_t status, rq_id;
  13628. unsigned long iflags;
  13629. uint32_t fctl, idx;
  13630. if ((phba->nvmet_support == 0) ||
  13631. (phba->sli4_hba.nvmet_cqset == NULL))
  13632. return workposted;
  13633. idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
  13634. hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
  13635. drq = phba->sli4_hba.nvmet_mrq_data[idx];
  13636. /* sanity check on queue memory */
  13637. if (unlikely(!hrq) || unlikely(!drq))
  13638. return workposted;
  13639. if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
  13640. rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
  13641. else
  13642. rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
  13643. if ((phba->nvmet_support == 0) ||
  13644. (rq_id != hrq->queue_id))
  13645. return workposted;
  13646. status = bf_get(lpfc_rcqe_status, rcqe);
  13647. switch (status) {
  13648. case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
  13649. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13650. "6126 Receive Frame Truncated!!\n");
  13651. fallthrough;
  13652. case FC_STATUS_RQ_SUCCESS:
  13653. spin_lock_irqsave(&phba->hbalock, iflags);
  13654. lpfc_sli4_rq_release(hrq, drq);
  13655. dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
  13656. if (!dma_buf) {
  13657. hrq->RQ_no_buf_found++;
  13658. spin_unlock_irqrestore(&phba->hbalock, iflags);
  13659. goto out;
  13660. }
  13661. spin_unlock_irqrestore(&phba->hbalock, iflags);
  13662. hrq->RQ_rcv_buf++;
  13663. hrq->RQ_buf_posted--;
  13664. fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
  13665. /* Just some basic sanity checks on FCP Command frame */
  13666. fctl = (fc_hdr->fh_f_ctl[0] << 16 |
  13667. fc_hdr->fh_f_ctl[1] << 8 |
  13668. fc_hdr->fh_f_ctl[2]);
  13669. if (((fctl &
  13670. (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
  13671. (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
  13672. (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
  13673. goto drop;
  13674. if (fc_hdr->fh_type == FC_TYPE_FCP) {
  13675. dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
  13676. lpfc_nvmet_unsol_fcp_event(
  13677. phba, idx, dma_buf, cq->isr_timestamp,
  13678. cq->q_flag & HBA_NVMET_CQ_NOTIFY);
  13679. return false;
  13680. }
  13681. drop:
  13682. lpfc_rq_buf_free(phba, &dma_buf->hbuf);
  13683. break;
  13684. case FC_STATUS_INSUFF_BUF_FRM_DISC:
  13685. if (phba->nvmet_support) {
  13686. tgtp = phba->targetport->private;
  13687. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13688. "6401 RQE Error x%x, posted %d err_cnt "
  13689. "%d: %x %x %x\n",
  13690. status, hrq->RQ_buf_posted,
  13691. hrq->RQ_no_posted_buf,
  13692. atomic_read(&tgtp->rcv_fcp_cmd_in),
  13693. atomic_read(&tgtp->rcv_fcp_cmd_out),
  13694. atomic_read(&tgtp->xmt_fcp_release));
  13695. }
  13696. fallthrough;
  13697. case FC_STATUS_INSUFF_BUF_NEED_BUF:
  13698. hrq->RQ_no_posted_buf++;
  13699. /* Post more buffers if possible */
  13700. break;
  13701. case FC_STATUS_RQ_DMA_FAILURE:
  13702. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13703. "2575 RQE DMA Error x%x, x%08x x%08x x%08x "
  13704. "x%08x\n",
  13705. status, rcqe->word0, rcqe->word1,
  13706. rcqe->word2, rcqe->word3);
  13707. /* If IV set, no further recovery */
  13708. if (bf_get(lpfc_rcqe_iv, rcqe))
  13709. break;
  13710. /* recycle consumed resource */
  13711. spin_lock_irqsave(&phba->hbalock, iflags);
  13712. lpfc_sli4_rq_release(hrq, drq);
  13713. dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
  13714. if (!dma_buf) {
  13715. hrq->RQ_no_buf_found++;
  13716. spin_unlock_irqrestore(&phba->hbalock, iflags);
  13717. break;
  13718. }
  13719. hrq->RQ_rcv_buf++;
  13720. hrq->RQ_buf_posted--;
  13721. spin_unlock_irqrestore(&phba->hbalock, iflags);
  13722. lpfc_rq_buf_free(phba, &dma_buf->hbuf);
  13723. break;
  13724. default:
  13725. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13726. "2576 Unexpected RQE Status x%x, w0-3 x%08x "
  13727. "x%08x x%08x x%08x\n",
  13728. status, rcqe->word0, rcqe->word1,
  13729. rcqe->word2, rcqe->word3);
  13730. break;
  13731. }
  13732. out:
  13733. return workposted;
  13734. }
  13735. /**
  13736. * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
  13737. * @phba: adapter with cq
  13738. * @cq: Pointer to the completion queue.
  13739. * @cqe: Pointer to fast-path completion queue entry.
  13740. *
  13741. * This routine process a fast-path work queue completion entry from fast-path
  13742. * event queue for FCP command response completion.
  13743. *
  13744. * Return: true if work posted to worker thread, otherwise false.
  13745. **/
  13746. static bool
  13747. lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  13748. struct lpfc_cqe *cqe)
  13749. {
  13750. struct lpfc_wcqe_release wcqe;
  13751. bool workposted = false;
  13752. /* Copy the work queue CQE and convert endian order if needed */
  13753. lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
  13754. /* Check and process for different type of WCQE and dispatch */
  13755. switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
  13756. case CQE_CODE_COMPL_WQE:
  13757. case CQE_CODE_NVME_ERSP:
  13758. cq->CQ_wq++;
  13759. /* Process the WQ complete event */
  13760. phba->last_completion_time = jiffies;
  13761. if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
  13762. lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
  13763. (struct lpfc_wcqe_complete *)&wcqe);
  13764. break;
  13765. case CQE_CODE_RELEASE_WQE:
  13766. cq->CQ_release_wqe++;
  13767. /* Process the WQ release event */
  13768. lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
  13769. (struct lpfc_wcqe_release *)&wcqe);
  13770. break;
  13771. case CQE_CODE_XRI_ABORTED:
  13772. cq->CQ_xri_aborted++;
  13773. /* Process the WQ XRI abort event */
  13774. phba->last_completion_time = jiffies;
  13775. workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
  13776. (struct sli4_wcqe_xri_aborted *)&wcqe);
  13777. break;
  13778. case CQE_CODE_RECEIVE_V1:
  13779. case CQE_CODE_RECEIVE:
  13780. phba->last_completion_time = jiffies;
  13781. if (cq->subtype == LPFC_NVMET) {
  13782. workposted = lpfc_sli4_nvmet_handle_rcqe(
  13783. phba, cq, (struct lpfc_rcqe *)&wcqe);
  13784. }
  13785. break;
  13786. default:
  13787. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13788. "0144 Not a valid CQE code: x%x\n",
  13789. bf_get(lpfc_wcqe_c_code, &wcqe));
  13790. break;
  13791. }
  13792. return workposted;
  13793. }
  13794. /**
  13795. * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
  13796. * @cq: Pointer to CQ to be processed
  13797. *
  13798. * This routine calls the cq processing routine with the handler for
  13799. * fast path CQEs.
  13800. *
  13801. * The CQ routine returns two values: the first is the calling status,
  13802. * which indicates whether work was queued to the background discovery
  13803. * thread. If true, the routine should wakeup the discovery thread;
  13804. * the second is the delay parameter. If non-zero, rather than rearming
  13805. * the CQ and yet another interrupt, the CQ handler should be queued so
  13806. * that it is processed in a subsequent polling action. The value of
  13807. * the delay indicates when to reschedule it.
  13808. **/
  13809. static void
  13810. __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
  13811. {
  13812. struct lpfc_hba *phba = cq->phba;
  13813. unsigned long delay;
  13814. bool workposted = false;
  13815. int ret;
  13816. /* process and rearm the CQ */
  13817. workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
  13818. &delay);
  13819. if (delay) {
  13820. if (is_kdump_kernel())
  13821. ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
  13822. delay);
  13823. else
  13824. ret = queue_delayed_work_on(cq->chann, phba->wq,
  13825. &cq->sched_irqwork, delay);
  13826. if (!ret)
  13827. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13828. "0367 Cannot schedule queue work "
  13829. "for cqid=%d on CPU %d\n",
  13830. cq->queue_id, cq->chann);
  13831. }
  13832. /* wake up worker thread if there are works to be done */
  13833. if (workposted)
  13834. lpfc_worker_wake_up(phba);
  13835. }
  13836. /**
  13837. * lpfc_sli4_hba_process_cq - fast-path work handler when started by
  13838. * interrupt
  13839. * @work: pointer to work element
  13840. *
  13841. * translates from the work handler and calls the fast-path handler.
  13842. **/
  13843. static void
  13844. lpfc_sli4_hba_process_cq(struct work_struct *work)
  13845. {
  13846. struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
  13847. __lpfc_sli4_hba_process_cq(cq);
  13848. }
  13849. /**
  13850. * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
  13851. * @phba: Pointer to HBA context object.
  13852. * @eq: Pointer to the queue structure.
  13853. * @eqe: Pointer to fast-path event queue entry.
  13854. * @poll_mode: poll_mode to execute processing the cq.
  13855. *
  13856. * This routine process a event queue entry from the fast-path event queue.
  13857. * It will check the MajorCode and MinorCode to determine this is for a
  13858. * completion event on a completion queue, if not, an error shall be logged
  13859. * and just return. Otherwise, it will get to the corresponding completion
  13860. * queue and process all the entries on the completion queue, rearm the
  13861. * completion queue, and then return.
  13862. **/
  13863. static void
  13864. lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
  13865. struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode)
  13866. {
  13867. struct lpfc_queue *cq = NULL;
  13868. uint32_t qidx = eq->hdwq;
  13869. uint16_t cqid, id;
  13870. int ret;
  13871. if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
  13872. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13873. "0366 Not a valid completion "
  13874. "event: majorcode=x%x, minorcode=x%x\n",
  13875. bf_get_le32(lpfc_eqe_major_code, eqe),
  13876. bf_get_le32(lpfc_eqe_minor_code, eqe));
  13877. return;
  13878. }
  13879. /* Get the reference to the corresponding CQ */
  13880. cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
  13881. /* Use the fast lookup method first */
  13882. if (cqid <= phba->sli4_hba.cq_max) {
  13883. cq = phba->sli4_hba.cq_lookup[cqid];
  13884. if (cq)
  13885. goto work_cq;
  13886. }
  13887. /* Next check for NVMET completion */
  13888. if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
  13889. id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
  13890. if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
  13891. /* Process NVMET unsol rcv */
  13892. cq = phba->sli4_hba.nvmet_cqset[cqid - id];
  13893. goto process_cq;
  13894. }
  13895. }
  13896. if (phba->sli4_hba.nvmels_cq &&
  13897. (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
  13898. /* Process NVME unsol rcv */
  13899. cq = phba->sli4_hba.nvmels_cq;
  13900. }
  13901. /* Otherwise this is a Slow path event */
  13902. if (cq == NULL) {
  13903. lpfc_sli4_sp_handle_eqe(phba, eqe,
  13904. phba->sli4_hba.hdwq[qidx].hba_eq);
  13905. return;
  13906. }
  13907. process_cq:
  13908. if (unlikely(cqid != cq->queue_id)) {
  13909. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13910. "0368 Miss-matched fast-path completion "
  13911. "queue identifier: eqcqid=%d, fcpcqid=%d\n",
  13912. cqid, cq->queue_id);
  13913. return;
  13914. }
  13915. work_cq:
  13916. #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
  13917. if (phba->ktime_on)
  13918. cq->isr_timestamp = ktime_get_ns();
  13919. else
  13920. cq->isr_timestamp = 0;
  13921. #endif
  13922. switch (poll_mode) {
  13923. case LPFC_THREADED_IRQ:
  13924. __lpfc_sli4_hba_process_cq(cq);
  13925. break;
  13926. case LPFC_QUEUE_WORK:
  13927. default:
  13928. if (is_kdump_kernel())
  13929. ret = queue_work(phba->wq, &cq->irqwork);
  13930. else
  13931. ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
  13932. if (!ret)
  13933. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  13934. "0383 Cannot schedule queue work "
  13935. "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
  13936. cqid, cq->queue_id,
  13937. raw_smp_processor_id());
  13938. break;
  13939. }
  13940. }
  13941. /**
  13942. * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
  13943. * @work: pointer to work element
  13944. *
  13945. * translates from the work handler and calls the fast-path handler.
  13946. **/
  13947. static void
  13948. lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
  13949. {
  13950. struct lpfc_queue *cq = container_of(to_delayed_work(work),
  13951. struct lpfc_queue, sched_irqwork);
  13952. __lpfc_sli4_hba_process_cq(cq);
  13953. }
  13954. /**
  13955. * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
  13956. * @irq: Interrupt number.
  13957. * @dev_id: The device context pointer.
  13958. *
  13959. * This function is directly called from the PCI layer as an interrupt
  13960. * service routine when device with SLI-4 interface spec is enabled with
  13961. * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
  13962. * ring event in the HBA. However, when the device is enabled with either
  13963. * MSI or Pin-IRQ interrupt mode, this function is called as part of the
  13964. * device-level interrupt handler. When the PCI slot is in error recovery
  13965. * or the HBA is undergoing initialization, the interrupt handler will not
  13966. * process the interrupt. The SCSI FCP fast-path ring event are handled in
  13967. * the intrrupt context. This function is called without any lock held.
  13968. * It gets the hbalock to access and update SLI data structures. Note that,
  13969. * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
  13970. * equal to that of FCP CQ index.
  13971. *
  13972. * The link attention and ELS ring attention events are handled
  13973. * by the worker thread. The interrupt handler signals the worker thread
  13974. * and returns for these events. This function is called without any lock
  13975. * held. It gets the hbalock to access and update SLI data structures.
  13976. *
  13977. * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD
  13978. * when interrupt is scheduled to be handled from a threaded irq context, or
  13979. * else returns IRQ_NONE.
  13980. **/
  13981. irqreturn_t
  13982. lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
  13983. {
  13984. struct lpfc_hba *phba;
  13985. struct lpfc_hba_eq_hdl *hba_eq_hdl;
  13986. struct lpfc_queue *fpeq;
  13987. unsigned long iflag;
  13988. int hba_eqidx;
  13989. int ecount = 0;
  13990. struct lpfc_eq_intr_info *eqi;
  13991. /* Get the driver's phba structure from the dev_id */
  13992. hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
  13993. phba = hba_eq_hdl->phba;
  13994. hba_eqidx = hba_eq_hdl->idx;
  13995. if (unlikely(!phba))
  13996. return IRQ_NONE;
  13997. if (unlikely(!phba->sli4_hba.hdwq))
  13998. return IRQ_NONE;
  13999. /* Get to the EQ struct associated with this vector */
  14000. fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
  14001. if (unlikely(!fpeq))
  14002. return IRQ_NONE;
  14003. /* Check device state for handling interrupt */
  14004. if (unlikely(lpfc_intr_state_check(phba))) {
  14005. /* Check again for link_state with lock held */
  14006. spin_lock_irqsave(&phba->hbalock, iflag);
  14007. if (phba->link_state < LPFC_LINK_DOWN)
  14008. /* Flush, clear interrupt, and rearm the EQ */
  14009. lpfc_sli4_eqcq_flush(phba, fpeq);
  14010. spin_unlock_irqrestore(&phba->hbalock, iflag);
  14011. return IRQ_NONE;
  14012. }
  14013. switch (fpeq->poll_mode) {
  14014. case LPFC_THREADED_IRQ:
  14015. /* CGN mgmt is mutually exclusive from irq processing */
  14016. if (phba->cmf_active_mode == LPFC_CFG_OFF)
  14017. return IRQ_WAKE_THREAD;
  14018. fallthrough;
  14019. case LPFC_QUEUE_WORK:
  14020. default:
  14021. eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
  14022. eqi->icnt++;
  14023. fpeq->last_cpu = raw_smp_processor_id();
  14024. if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
  14025. fpeq->q_flag & HBA_EQ_DELAY_CHK &&
  14026. phba->cfg_auto_imax &&
  14027. fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
  14028. phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
  14029. lpfc_sli4_mod_hba_eq_delay(phba, fpeq,
  14030. LPFC_MAX_AUTO_EQ_DELAY);
  14031. /* process and rearm the EQ */
  14032. ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
  14033. LPFC_QUEUE_WORK);
  14034. if (unlikely(ecount == 0)) {
  14035. fpeq->EQ_no_entry++;
  14036. if (phba->intr_type == MSIX)
  14037. /* MSI-X treated interrupt served as no EQ share INT */
  14038. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  14039. "0358 MSI-X interrupt with no EQE\n");
  14040. else
  14041. /* Non MSI-X treated on interrupt as EQ share INT */
  14042. return IRQ_NONE;
  14043. }
  14044. }
  14045. return IRQ_HANDLED;
  14046. } /* lpfc_sli4_hba_intr_handler */
  14047. /**
  14048. * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
  14049. * @irq: Interrupt number.
  14050. * @dev_id: The device context pointer.
  14051. *
  14052. * This function is the device-level interrupt handler to device with SLI-4
  14053. * interface spec, called from the PCI layer when either MSI or Pin-IRQ
  14054. * interrupt mode is enabled and there is an event in the HBA which requires
  14055. * driver attention. This function invokes the slow-path interrupt attention
  14056. * handling function and fast-path interrupt attention handling function in
  14057. * turn to process the relevant HBA attention events. This function is called
  14058. * without any lock held. It gets the hbalock to access and update SLI data
  14059. * structures.
  14060. *
  14061. * This function returns IRQ_HANDLED when interrupt is handled, else it
  14062. * returns IRQ_NONE.
  14063. **/
  14064. irqreturn_t
  14065. lpfc_sli4_intr_handler(int irq, void *dev_id)
  14066. {
  14067. struct lpfc_hba *phba;
  14068. irqreturn_t hba_irq_rc;
  14069. bool hba_handled = false;
  14070. int qidx;
  14071. /* Get the driver's phba structure from the dev_id */
  14072. phba = (struct lpfc_hba *)dev_id;
  14073. if (unlikely(!phba))
  14074. return IRQ_NONE;
  14075. /*
  14076. * Invoke fast-path host attention interrupt handling as appropriate.
  14077. */
  14078. for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
  14079. hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
  14080. &phba->sli4_hba.hba_eq_hdl[qidx]);
  14081. if (hba_irq_rc == IRQ_HANDLED)
  14082. hba_handled |= true;
  14083. }
  14084. return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
  14085. } /* lpfc_sli4_intr_handler */
  14086. void lpfc_sli4_poll_hbtimer(struct timer_list *t)
  14087. {
  14088. struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
  14089. struct lpfc_queue *eq;
  14090. rcu_read_lock();
  14091. list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
  14092. lpfc_sli4_poll_eq(eq);
  14093. if (!list_empty(&phba->poll_list))
  14094. mod_timer(&phba->cpuhp_poll_timer,
  14095. jiffies + msecs_to_jiffies(LPFC_POLL_HB));
  14096. rcu_read_unlock();
  14097. }
  14098. static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
  14099. {
  14100. struct lpfc_hba *phba = eq->phba;
  14101. /* kickstart slowpath processing if needed */
  14102. if (list_empty(&phba->poll_list))
  14103. mod_timer(&phba->cpuhp_poll_timer,
  14104. jiffies + msecs_to_jiffies(LPFC_POLL_HB));
  14105. list_add_rcu(&eq->_poll_list, &phba->poll_list);
  14106. synchronize_rcu();
  14107. }
  14108. static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
  14109. {
  14110. struct lpfc_hba *phba = eq->phba;
  14111. /* Disable slowpath processing for this eq. Kick start the eq
  14112. * by RE-ARMING the eq's ASAP
  14113. */
  14114. list_del_rcu(&eq->_poll_list);
  14115. synchronize_rcu();
  14116. if (list_empty(&phba->poll_list))
  14117. del_timer_sync(&phba->cpuhp_poll_timer);
  14118. }
  14119. void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
  14120. {
  14121. struct lpfc_queue *eq, *next;
  14122. list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
  14123. list_del(&eq->_poll_list);
  14124. INIT_LIST_HEAD(&phba->poll_list);
  14125. synchronize_rcu();
  14126. }
  14127. static inline void
  14128. __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
  14129. {
  14130. if (mode == eq->mode)
  14131. return;
  14132. /*
  14133. * currently this function is only called during a hotplug
  14134. * event and the cpu on which this function is executing
  14135. * is going offline. By now the hotplug has instructed
  14136. * the scheduler to remove this cpu from cpu active mask.
  14137. * So we don't need to work about being put aside by the
  14138. * scheduler for a high priority process. Yes, the inte-
  14139. * rrupts could come but they are known to retire ASAP.
  14140. */
  14141. /* Disable polling in the fastpath */
  14142. WRITE_ONCE(eq->mode, mode);
  14143. /* flush out the store buffer */
  14144. smp_wmb();
  14145. /*
  14146. * Add this eq to the polling list and start polling. For
  14147. * a grace period both interrupt handler and poller will
  14148. * try to process the eq _but_ that's fine. We have a
  14149. * synchronization mechanism in place (queue_claimed) to
  14150. * deal with it. This is just a draining phase for int-
  14151. * errupt handler (not eq's) as we have guranteed through
  14152. * barrier that all the CPUs have seen the new CQ_POLLED
  14153. * state. which will effectively disable the REARMING of
  14154. * the EQ. The whole idea is eq's die off eventually as
  14155. * we are not rearming EQ's anymore.
  14156. */
  14157. mode ? lpfc_sli4_add_to_poll_list(eq) :
  14158. lpfc_sli4_remove_from_poll_list(eq);
  14159. }
  14160. void lpfc_sli4_start_polling(struct lpfc_queue *eq)
  14161. {
  14162. __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
  14163. }
  14164. void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
  14165. {
  14166. struct lpfc_hba *phba = eq->phba;
  14167. __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
  14168. /* Kick start for the pending io's in h/w.
  14169. * Once we switch back to interrupt processing on a eq
  14170. * the io path completion will only arm eq's when it
  14171. * receives a completion. But since eq's are in disa-
  14172. * rmed state it doesn't receive a completion. This
  14173. * creates a deadlock scenaro.
  14174. */
  14175. phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
  14176. }
  14177. /**
  14178. * lpfc_sli4_queue_free - free a queue structure and associated memory
  14179. * @queue: The queue structure to free.
  14180. *
  14181. * This function frees a queue structure and the DMAable memory used for
  14182. * the host resident queue. This function must be called after destroying the
  14183. * queue on the HBA.
  14184. **/
  14185. void
  14186. lpfc_sli4_queue_free(struct lpfc_queue *queue)
  14187. {
  14188. struct lpfc_dmabuf *dmabuf;
  14189. if (!queue)
  14190. return;
  14191. if (!list_empty(&queue->wq_list))
  14192. list_del(&queue->wq_list);
  14193. while (!list_empty(&queue->page_list)) {
  14194. list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
  14195. list);
  14196. dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
  14197. dmabuf->virt, dmabuf->phys);
  14198. kfree(dmabuf);
  14199. }
  14200. if (queue->rqbp) {
  14201. lpfc_free_rq_buffer(queue->phba, queue);
  14202. kfree(queue->rqbp);
  14203. }
  14204. if (!list_empty(&queue->cpu_list))
  14205. list_del(&queue->cpu_list);
  14206. kfree(queue);
  14207. return;
  14208. }
  14209. /**
  14210. * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
  14211. * @phba: The HBA that this queue is being created on.
  14212. * @page_size: The size of a queue page
  14213. * @entry_size: The size of each queue entry for this queue.
  14214. * @entry_count: The number of entries that this queue will handle.
  14215. * @cpu: The cpu that will primarily utilize this queue.
  14216. *
  14217. * This function allocates a queue structure and the DMAable memory used for
  14218. * the host resident queue. This function must be called before creating the
  14219. * queue on the HBA.
  14220. **/
  14221. struct lpfc_queue *
  14222. lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
  14223. uint32_t entry_size, uint32_t entry_count, int cpu)
  14224. {
  14225. struct lpfc_queue *queue;
  14226. struct lpfc_dmabuf *dmabuf;
  14227. uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  14228. uint16_t x, pgcnt;
  14229. if (!phba->sli4_hba.pc_sli4_params.supported)
  14230. hw_page_size = page_size;
  14231. pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
  14232. /* If needed, Adjust page count to match the max the adapter supports */
  14233. if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
  14234. pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
  14235. queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
  14236. GFP_KERNEL, cpu_to_node(cpu));
  14237. if (!queue)
  14238. return NULL;
  14239. INIT_LIST_HEAD(&queue->list);
  14240. INIT_LIST_HEAD(&queue->_poll_list);
  14241. INIT_LIST_HEAD(&queue->wq_list);
  14242. INIT_LIST_HEAD(&queue->wqfull_list);
  14243. INIT_LIST_HEAD(&queue->page_list);
  14244. INIT_LIST_HEAD(&queue->child_list);
  14245. INIT_LIST_HEAD(&queue->cpu_list);
  14246. /* Set queue parameters now. If the system cannot provide memory
  14247. * resources, the free routine needs to know what was allocated.
  14248. */
  14249. queue->page_count = pgcnt;
  14250. queue->q_pgs = (void **)&queue[1];
  14251. queue->entry_cnt_per_pg = hw_page_size / entry_size;
  14252. queue->entry_size = entry_size;
  14253. queue->entry_count = entry_count;
  14254. queue->page_size = hw_page_size;
  14255. queue->phba = phba;
  14256. for (x = 0; x < queue->page_count; x++) {
  14257. dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
  14258. dev_to_node(&phba->pcidev->dev));
  14259. if (!dmabuf)
  14260. goto out_fail;
  14261. dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
  14262. hw_page_size, &dmabuf->phys,
  14263. GFP_KERNEL);
  14264. if (!dmabuf->virt) {
  14265. kfree(dmabuf);
  14266. goto out_fail;
  14267. }
  14268. dmabuf->buffer_tag = x;
  14269. list_add_tail(&dmabuf->list, &queue->page_list);
  14270. /* use lpfc_sli4_qe to index a paritcular entry in this page */
  14271. queue->q_pgs[x] = dmabuf->virt;
  14272. }
  14273. INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
  14274. INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
  14275. INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
  14276. INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
  14277. /* notify_interval will be set during q creation */
  14278. return queue;
  14279. out_fail:
  14280. lpfc_sli4_queue_free(queue);
  14281. return NULL;
  14282. }
  14283. /**
  14284. * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
  14285. * @phba: HBA structure that indicates port to create a queue on.
  14286. * @pci_barset: PCI BAR set flag.
  14287. *
  14288. * This function shall perform iomap of the specified PCI BAR address to host
  14289. * memory address if not already done so and return it. The returned host
  14290. * memory address can be NULL.
  14291. */
  14292. static void __iomem *
  14293. lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
  14294. {
  14295. if (!phba->pcidev)
  14296. return NULL;
  14297. switch (pci_barset) {
  14298. case WQ_PCI_BAR_0_AND_1:
  14299. return phba->pci_bar0_memmap_p;
  14300. case WQ_PCI_BAR_2_AND_3:
  14301. return phba->pci_bar2_memmap_p;
  14302. case WQ_PCI_BAR_4_AND_5:
  14303. return phba->pci_bar4_memmap_p;
  14304. default:
  14305. break;
  14306. }
  14307. return NULL;
  14308. }
  14309. /**
  14310. * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
  14311. * @phba: HBA structure that EQs are on.
  14312. * @startq: The starting EQ index to modify
  14313. * @numq: The number of EQs (consecutive indexes) to modify
  14314. * @usdelay: amount of delay
  14315. *
  14316. * This function revises the EQ delay on 1 or more EQs. The EQ delay
  14317. * is set either by writing to a register (if supported by the SLI Port)
  14318. * or by mailbox command. The mailbox command allows several EQs to be
  14319. * updated at once.
  14320. *
  14321. * The @phba struct is used to send a mailbox command to HBA. The @startq
  14322. * is used to get the starting EQ index to change. The @numq value is
  14323. * used to specify how many consecutive EQ indexes, starting at EQ index,
  14324. * are to be changed. This function is asynchronous and will wait for any
  14325. * mailbox commands to finish before returning.
  14326. *
  14327. * On success this function will return a zero. If unable to allocate
  14328. * enough memory this function will return -ENOMEM. If a mailbox command
  14329. * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
  14330. * have had their delay multipler changed.
  14331. **/
  14332. void
  14333. lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
  14334. uint32_t numq, uint32_t usdelay)
  14335. {
  14336. struct lpfc_mbx_modify_eq_delay *eq_delay;
  14337. LPFC_MBOXQ_t *mbox;
  14338. struct lpfc_queue *eq;
  14339. int cnt = 0, rc, length;
  14340. uint32_t shdr_status, shdr_add_status;
  14341. uint32_t dmult;
  14342. int qidx;
  14343. union lpfc_sli4_cfg_shdr *shdr;
  14344. if (startq >= phba->cfg_irq_chann)
  14345. return;
  14346. if (usdelay > 0xFFFF) {
  14347. lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
  14348. "6429 usdelay %d too large. Scaled down to "
  14349. "0xFFFF.\n", usdelay);
  14350. usdelay = 0xFFFF;
  14351. }
  14352. /* set values by EQ_DELAY register if supported */
  14353. if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
  14354. for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
  14355. eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
  14356. if (!eq)
  14357. continue;
  14358. lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
  14359. if (++cnt >= numq)
  14360. break;
  14361. }
  14362. return;
  14363. }
  14364. /* Otherwise, set values by mailbox cmd */
  14365. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  14366. if (!mbox) {
  14367. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14368. "6428 Failed allocating mailbox cmd buffer."
  14369. " EQ delay was not set.\n");
  14370. return;
  14371. }
  14372. length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
  14373. sizeof(struct lpfc_sli4_cfg_mhdr));
  14374. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  14375. LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
  14376. length, LPFC_SLI4_MBX_EMBED);
  14377. eq_delay = &mbox->u.mqe.un.eq_delay;
  14378. /* Calculate delay multiper from maximum interrupt per second */
  14379. dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
  14380. if (dmult)
  14381. dmult--;
  14382. if (dmult > LPFC_DMULT_MAX)
  14383. dmult = LPFC_DMULT_MAX;
  14384. for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
  14385. eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
  14386. if (!eq)
  14387. continue;
  14388. eq->q_mode = usdelay;
  14389. eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
  14390. eq_delay->u.request.eq[cnt].phase = 0;
  14391. eq_delay->u.request.eq[cnt].delay_multi = dmult;
  14392. if (++cnt >= numq)
  14393. break;
  14394. }
  14395. eq_delay->u.request.num_eq = cnt;
  14396. mbox->vport = phba->pport;
  14397. mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  14398. mbox->ctx_ndlp = NULL;
  14399. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  14400. shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
  14401. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  14402. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  14403. if (shdr_status || shdr_add_status || rc) {
  14404. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14405. "2512 MODIFY_EQ_DELAY mailbox failed with "
  14406. "status x%x add_status x%x, mbx status x%x\n",
  14407. shdr_status, shdr_add_status, rc);
  14408. }
  14409. mempool_free(mbox, phba->mbox_mem_pool);
  14410. return;
  14411. }
  14412. /**
  14413. * lpfc_eq_create - Create an Event Queue on the HBA
  14414. * @phba: HBA structure that indicates port to create a queue on.
  14415. * @eq: The queue structure to use to create the event queue.
  14416. * @imax: The maximum interrupt per second limit.
  14417. *
  14418. * This function creates an event queue, as detailed in @eq, on a port,
  14419. * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
  14420. *
  14421. * The @phba struct is used to send mailbox command to HBA. The @eq struct
  14422. * is used to get the entry count and entry size that are necessary to
  14423. * determine the number of pages to allocate and use for this queue. This
  14424. * function will send the EQ_CREATE mailbox command to the HBA to setup the
  14425. * event queue. This function is asynchronous and will wait for the mailbox
  14426. * command to finish before continuing.
  14427. *
  14428. * On success this function will return a zero. If unable to allocate enough
  14429. * memory this function will return -ENOMEM. If the queue create mailbox command
  14430. * fails this function will return -ENXIO.
  14431. **/
  14432. int
  14433. lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
  14434. {
  14435. struct lpfc_mbx_eq_create *eq_create;
  14436. LPFC_MBOXQ_t *mbox;
  14437. int rc, length, status = 0;
  14438. struct lpfc_dmabuf *dmabuf;
  14439. uint32_t shdr_status, shdr_add_status;
  14440. union lpfc_sli4_cfg_shdr *shdr;
  14441. uint16_t dmult;
  14442. uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  14443. /* sanity check on queue memory */
  14444. if (!eq)
  14445. return -ENODEV;
  14446. if (!phba->sli4_hba.pc_sli4_params.supported)
  14447. hw_page_size = SLI4_PAGE_SIZE;
  14448. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  14449. if (!mbox)
  14450. return -ENOMEM;
  14451. length = (sizeof(struct lpfc_mbx_eq_create) -
  14452. sizeof(struct lpfc_sli4_cfg_mhdr));
  14453. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  14454. LPFC_MBOX_OPCODE_EQ_CREATE,
  14455. length, LPFC_SLI4_MBX_EMBED);
  14456. eq_create = &mbox->u.mqe.un.eq_create;
  14457. shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
  14458. bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
  14459. eq->page_count);
  14460. bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
  14461. LPFC_EQE_SIZE);
  14462. bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
  14463. /* Use version 2 of CREATE_EQ if eqav is set */
  14464. if (phba->sli4_hba.pc_sli4_params.eqav) {
  14465. bf_set(lpfc_mbox_hdr_version, &shdr->request,
  14466. LPFC_Q_CREATE_VERSION_2);
  14467. bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
  14468. phba->sli4_hba.pc_sli4_params.eqav);
  14469. }
  14470. /* don't setup delay multiplier using EQ_CREATE */
  14471. dmult = 0;
  14472. bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
  14473. dmult);
  14474. switch (eq->entry_count) {
  14475. default:
  14476. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14477. "0360 Unsupported EQ count. (%d)\n",
  14478. eq->entry_count);
  14479. if (eq->entry_count < 256) {
  14480. status = -EINVAL;
  14481. goto out;
  14482. }
  14483. fallthrough; /* otherwise default to smallest count */
  14484. case 256:
  14485. bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
  14486. LPFC_EQ_CNT_256);
  14487. break;
  14488. case 512:
  14489. bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
  14490. LPFC_EQ_CNT_512);
  14491. break;
  14492. case 1024:
  14493. bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
  14494. LPFC_EQ_CNT_1024);
  14495. break;
  14496. case 2048:
  14497. bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
  14498. LPFC_EQ_CNT_2048);
  14499. break;
  14500. case 4096:
  14501. bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
  14502. LPFC_EQ_CNT_4096);
  14503. break;
  14504. }
  14505. list_for_each_entry(dmabuf, &eq->page_list, list) {
  14506. memset(dmabuf->virt, 0, hw_page_size);
  14507. eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
  14508. putPaddrLow(dmabuf->phys);
  14509. eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
  14510. putPaddrHigh(dmabuf->phys);
  14511. }
  14512. mbox->vport = phba->pport;
  14513. mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  14514. mbox->ctx_buf = NULL;
  14515. mbox->ctx_ndlp = NULL;
  14516. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  14517. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  14518. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  14519. if (shdr_status || shdr_add_status || rc) {
  14520. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14521. "2500 EQ_CREATE mailbox failed with "
  14522. "status x%x add_status x%x, mbx status x%x\n",
  14523. shdr_status, shdr_add_status, rc);
  14524. status = -ENXIO;
  14525. }
  14526. eq->type = LPFC_EQ;
  14527. eq->subtype = LPFC_NONE;
  14528. eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
  14529. if (eq->queue_id == 0xFFFF)
  14530. status = -ENXIO;
  14531. eq->host_index = 0;
  14532. eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
  14533. eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
  14534. out:
  14535. mempool_free(mbox, phba->mbox_mem_pool);
  14536. return status;
  14537. }
  14538. /**
  14539. * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler
  14540. * @irq: Interrupt number.
  14541. * @dev_id: The device context pointer.
  14542. *
  14543. * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within
  14544. * threaded irq context.
  14545. *
  14546. * Returns
  14547. * IRQ_HANDLED - interrupt is handled
  14548. * IRQ_NONE - otherwise
  14549. **/
  14550. irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id)
  14551. {
  14552. struct lpfc_hba *phba;
  14553. struct lpfc_hba_eq_hdl *hba_eq_hdl;
  14554. struct lpfc_queue *fpeq;
  14555. int ecount = 0;
  14556. int hba_eqidx;
  14557. struct lpfc_eq_intr_info *eqi;
  14558. /* Get the driver's phba structure from the dev_id */
  14559. hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
  14560. phba = hba_eq_hdl->phba;
  14561. hba_eqidx = hba_eq_hdl->idx;
  14562. if (unlikely(!phba))
  14563. return IRQ_NONE;
  14564. if (unlikely(!phba->sli4_hba.hdwq))
  14565. return IRQ_NONE;
  14566. /* Get to the EQ struct associated with this vector */
  14567. fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
  14568. if (unlikely(!fpeq))
  14569. return IRQ_NONE;
  14570. eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id());
  14571. eqi->icnt++;
  14572. fpeq->last_cpu = raw_smp_processor_id();
  14573. if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
  14574. fpeq->q_flag & HBA_EQ_DELAY_CHK &&
  14575. phba->cfg_auto_imax &&
  14576. fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
  14577. phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
  14578. lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
  14579. /* process and rearm the EQ */
  14580. ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
  14581. LPFC_THREADED_IRQ);
  14582. if (unlikely(ecount == 0)) {
  14583. fpeq->EQ_no_entry++;
  14584. if (phba->intr_type == MSIX)
  14585. /* MSI-X treated interrupt served as no EQ share INT */
  14586. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  14587. "3358 MSI-X interrupt with no EQE\n");
  14588. else
  14589. /* Non MSI-X treated on interrupt as EQ share INT */
  14590. return IRQ_NONE;
  14591. }
  14592. return IRQ_HANDLED;
  14593. }
  14594. /**
  14595. * lpfc_cq_create - Create a Completion Queue on the HBA
  14596. * @phba: HBA structure that indicates port to create a queue on.
  14597. * @cq: The queue structure to use to create the completion queue.
  14598. * @eq: The event queue to bind this completion queue to.
  14599. * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
  14600. * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
  14601. *
  14602. * This function creates a completion queue, as detailed in @wq, on a port,
  14603. * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
  14604. *
  14605. * The @phba struct is used to send mailbox command to HBA. The @cq struct
  14606. * is used to get the entry count and entry size that are necessary to
  14607. * determine the number of pages to allocate and use for this queue. The @eq
  14608. * is used to indicate which event queue to bind this completion queue to. This
  14609. * function will send the CQ_CREATE mailbox command to the HBA to setup the
  14610. * completion queue. This function is asynchronous and will wait for the mailbox
  14611. * command to finish before continuing.
  14612. *
  14613. * On success this function will return a zero. If unable to allocate enough
  14614. * memory this function will return -ENOMEM. If the queue create mailbox command
  14615. * fails this function will return -ENXIO.
  14616. **/
  14617. int
  14618. lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
  14619. struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
  14620. {
  14621. struct lpfc_mbx_cq_create *cq_create;
  14622. struct lpfc_dmabuf *dmabuf;
  14623. LPFC_MBOXQ_t *mbox;
  14624. int rc, length, status = 0;
  14625. uint32_t shdr_status, shdr_add_status;
  14626. union lpfc_sli4_cfg_shdr *shdr;
  14627. /* sanity check on queue memory */
  14628. if (!cq || !eq)
  14629. return -ENODEV;
  14630. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  14631. if (!mbox)
  14632. return -ENOMEM;
  14633. length = (sizeof(struct lpfc_mbx_cq_create) -
  14634. sizeof(struct lpfc_sli4_cfg_mhdr));
  14635. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  14636. LPFC_MBOX_OPCODE_CQ_CREATE,
  14637. length, LPFC_SLI4_MBX_EMBED);
  14638. cq_create = &mbox->u.mqe.un.cq_create;
  14639. shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
  14640. bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
  14641. cq->page_count);
  14642. bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
  14643. bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
  14644. bf_set(lpfc_mbox_hdr_version, &shdr->request,
  14645. phba->sli4_hba.pc_sli4_params.cqv);
  14646. if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
  14647. bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
  14648. (cq->page_size / SLI4_PAGE_SIZE));
  14649. bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
  14650. eq->queue_id);
  14651. bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
  14652. phba->sli4_hba.pc_sli4_params.cqav);
  14653. } else {
  14654. bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
  14655. eq->queue_id);
  14656. }
  14657. switch (cq->entry_count) {
  14658. case 2048:
  14659. case 4096:
  14660. if (phba->sli4_hba.pc_sli4_params.cqv ==
  14661. LPFC_Q_CREATE_VERSION_2) {
  14662. cq_create->u.request.context.lpfc_cq_context_count =
  14663. cq->entry_count;
  14664. bf_set(lpfc_cq_context_count,
  14665. &cq_create->u.request.context,
  14666. LPFC_CQ_CNT_WORD7);
  14667. break;
  14668. }
  14669. fallthrough;
  14670. default:
  14671. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14672. "0361 Unsupported CQ count: "
  14673. "entry cnt %d sz %d pg cnt %d\n",
  14674. cq->entry_count, cq->entry_size,
  14675. cq->page_count);
  14676. if (cq->entry_count < 256) {
  14677. status = -EINVAL;
  14678. goto out;
  14679. }
  14680. fallthrough; /* otherwise default to smallest count */
  14681. case 256:
  14682. bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
  14683. LPFC_CQ_CNT_256);
  14684. break;
  14685. case 512:
  14686. bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
  14687. LPFC_CQ_CNT_512);
  14688. break;
  14689. case 1024:
  14690. bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
  14691. LPFC_CQ_CNT_1024);
  14692. break;
  14693. }
  14694. list_for_each_entry(dmabuf, &cq->page_list, list) {
  14695. memset(dmabuf->virt, 0, cq->page_size);
  14696. cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
  14697. putPaddrLow(dmabuf->phys);
  14698. cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
  14699. putPaddrHigh(dmabuf->phys);
  14700. }
  14701. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  14702. /* The IOCTL status is embedded in the mailbox subheader. */
  14703. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  14704. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  14705. if (shdr_status || shdr_add_status || rc) {
  14706. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14707. "2501 CQ_CREATE mailbox failed with "
  14708. "status x%x add_status x%x, mbx status x%x\n",
  14709. shdr_status, shdr_add_status, rc);
  14710. status = -ENXIO;
  14711. goto out;
  14712. }
  14713. cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
  14714. if (cq->queue_id == 0xFFFF) {
  14715. status = -ENXIO;
  14716. goto out;
  14717. }
  14718. /* link the cq onto the parent eq child list */
  14719. list_add_tail(&cq->list, &eq->child_list);
  14720. /* Set up completion queue's type and subtype */
  14721. cq->type = type;
  14722. cq->subtype = subtype;
  14723. cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
  14724. cq->assoc_qid = eq->queue_id;
  14725. cq->assoc_qp = eq;
  14726. cq->host_index = 0;
  14727. cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
  14728. cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
  14729. if (cq->queue_id > phba->sli4_hba.cq_max)
  14730. phba->sli4_hba.cq_max = cq->queue_id;
  14731. out:
  14732. mempool_free(mbox, phba->mbox_mem_pool);
  14733. return status;
  14734. }
  14735. /**
  14736. * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
  14737. * @phba: HBA structure that indicates port to create a queue on.
  14738. * @cqp: The queue structure array to use to create the completion queues.
  14739. * @hdwq: The hardware queue array with the EQ to bind completion queues to.
  14740. * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
  14741. * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
  14742. *
  14743. * This function creates a set of completion queue, s to support MRQ
  14744. * as detailed in @cqp, on a port,
  14745. * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
  14746. *
  14747. * The @phba struct is used to send mailbox command to HBA. The @cq struct
  14748. * is used to get the entry count and entry size that are necessary to
  14749. * determine the number of pages to allocate and use for this queue. The @eq
  14750. * is used to indicate which event queue to bind this completion queue to. This
  14751. * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
  14752. * completion queue. This function is asynchronous and will wait for the mailbox
  14753. * command to finish before continuing.
  14754. *
  14755. * On success this function will return a zero. If unable to allocate enough
  14756. * memory this function will return -ENOMEM. If the queue create mailbox command
  14757. * fails this function will return -ENXIO.
  14758. **/
  14759. int
  14760. lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
  14761. struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
  14762. uint32_t subtype)
  14763. {
  14764. struct lpfc_queue *cq;
  14765. struct lpfc_queue *eq;
  14766. struct lpfc_mbx_cq_create_set *cq_set;
  14767. struct lpfc_dmabuf *dmabuf;
  14768. LPFC_MBOXQ_t *mbox;
  14769. int rc, length, alloclen, status = 0;
  14770. int cnt, idx, numcq, page_idx = 0;
  14771. uint32_t shdr_status, shdr_add_status;
  14772. union lpfc_sli4_cfg_shdr *shdr;
  14773. uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  14774. /* sanity check on queue memory */
  14775. numcq = phba->cfg_nvmet_mrq;
  14776. if (!cqp || !hdwq || !numcq)
  14777. return -ENODEV;
  14778. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  14779. if (!mbox)
  14780. return -ENOMEM;
  14781. length = sizeof(struct lpfc_mbx_cq_create_set);
  14782. length += ((numcq * cqp[0]->page_count) *
  14783. sizeof(struct dma_address));
  14784. alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  14785. LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
  14786. LPFC_SLI4_MBX_NEMBED);
  14787. if (alloclen < length) {
  14788. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14789. "3098 Allocated DMA memory size (%d) is "
  14790. "less than the requested DMA memory size "
  14791. "(%d)\n", alloclen, length);
  14792. status = -ENOMEM;
  14793. goto out;
  14794. }
  14795. cq_set = mbox->sge_array->addr[0];
  14796. shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
  14797. bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
  14798. for (idx = 0; idx < numcq; idx++) {
  14799. cq = cqp[idx];
  14800. eq = hdwq[idx].hba_eq;
  14801. if (!cq || !eq) {
  14802. status = -ENOMEM;
  14803. goto out;
  14804. }
  14805. if (!phba->sli4_hba.pc_sli4_params.supported)
  14806. hw_page_size = cq->page_size;
  14807. switch (idx) {
  14808. case 0:
  14809. bf_set(lpfc_mbx_cq_create_set_page_size,
  14810. &cq_set->u.request,
  14811. (hw_page_size / SLI4_PAGE_SIZE));
  14812. bf_set(lpfc_mbx_cq_create_set_num_pages,
  14813. &cq_set->u.request, cq->page_count);
  14814. bf_set(lpfc_mbx_cq_create_set_evt,
  14815. &cq_set->u.request, 1);
  14816. bf_set(lpfc_mbx_cq_create_set_valid,
  14817. &cq_set->u.request, 1);
  14818. bf_set(lpfc_mbx_cq_create_set_cqe_size,
  14819. &cq_set->u.request, 0);
  14820. bf_set(lpfc_mbx_cq_create_set_num_cq,
  14821. &cq_set->u.request, numcq);
  14822. bf_set(lpfc_mbx_cq_create_set_autovalid,
  14823. &cq_set->u.request,
  14824. phba->sli4_hba.pc_sli4_params.cqav);
  14825. switch (cq->entry_count) {
  14826. case 2048:
  14827. case 4096:
  14828. if (phba->sli4_hba.pc_sli4_params.cqv ==
  14829. LPFC_Q_CREATE_VERSION_2) {
  14830. bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
  14831. &cq_set->u.request,
  14832. cq->entry_count);
  14833. bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
  14834. &cq_set->u.request,
  14835. LPFC_CQ_CNT_WORD7);
  14836. break;
  14837. }
  14838. fallthrough;
  14839. default:
  14840. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14841. "3118 Bad CQ count. (%d)\n",
  14842. cq->entry_count);
  14843. if (cq->entry_count < 256) {
  14844. status = -EINVAL;
  14845. goto out;
  14846. }
  14847. fallthrough; /* otherwise default to smallest */
  14848. case 256:
  14849. bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
  14850. &cq_set->u.request, LPFC_CQ_CNT_256);
  14851. break;
  14852. case 512:
  14853. bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
  14854. &cq_set->u.request, LPFC_CQ_CNT_512);
  14855. break;
  14856. case 1024:
  14857. bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
  14858. &cq_set->u.request, LPFC_CQ_CNT_1024);
  14859. break;
  14860. }
  14861. bf_set(lpfc_mbx_cq_create_set_eq_id0,
  14862. &cq_set->u.request, eq->queue_id);
  14863. break;
  14864. case 1:
  14865. bf_set(lpfc_mbx_cq_create_set_eq_id1,
  14866. &cq_set->u.request, eq->queue_id);
  14867. break;
  14868. case 2:
  14869. bf_set(lpfc_mbx_cq_create_set_eq_id2,
  14870. &cq_set->u.request, eq->queue_id);
  14871. break;
  14872. case 3:
  14873. bf_set(lpfc_mbx_cq_create_set_eq_id3,
  14874. &cq_set->u.request, eq->queue_id);
  14875. break;
  14876. case 4:
  14877. bf_set(lpfc_mbx_cq_create_set_eq_id4,
  14878. &cq_set->u.request, eq->queue_id);
  14879. break;
  14880. case 5:
  14881. bf_set(lpfc_mbx_cq_create_set_eq_id5,
  14882. &cq_set->u.request, eq->queue_id);
  14883. break;
  14884. case 6:
  14885. bf_set(lpfc_mbx_cq_create_set_eq_id6,
  14886. &cq_set->u.request, eq->queue_id);
  14887. break;
  14888. case 7:
  14889. bf_set(lpfc_mbx_cq_create_set_eq_id7,
  14890. &cq_set->u.request, eq->queue_id);
  14891. break;
  14892. case 8:
  14893. bf_set(lpfc_mbx_cq_create_set_eq_id8,
  14894. &cq_set->u.request, eq->queue_id);
  14895. break;
  14896. case 9:
  14897. bf_set(lpfc_mbx_cq_create_set_eq_id9,
  14898. &cq_set->u.request, eq->queue_id);
  14899. break;
  14900. case 10:
  14901. bf_set(lpfc_mbx_cq_create_set_eq_id10,
  14902. &cq_set->u.request, eq->queue_id);
  14903. break;
  14904. case 11:
  14905. bf_set(lpfc_mbx_cq_create_set_eq_id11,
  14906. &cq_set->u.request, eq->queue_id);
  14907. break;
  14908. case 12:
  14909. bf_set(lpfc_mbx_cq_create_set_eq_id12,
  14910. &cq_set->u.request, eq->queue_id);
  14911. break;
  14912. case 13:
  14913. bf_set(lpfc_mbx_cq_create_set_eq_id13,
  14914. &cq_set->u.request, eq->queue_id);
  14915. break;
  14916. case 14:
  14917. bf_set(lpfc_mbx_cq_create_set_eq_id14,
  14918. &cq_set->u.request, eq->queue_id);
  14919. break;
  14920. case 15:
  14921. bf_set(lpfc_mbx_cq_create_set_eq_id15,
  14922. &cq_set->u.request, eq->queue_id);
  14923. break;
  14924. }
  14925. /* link the cq onto the parent eq child list */
  14926. list_add_tail(&cq->list, &eq->child_list);
  14927. /* Set up completion queue's type and subtype */
  14928. cq->type = type;
  14929. cq->subtype = subtype;
  14930. cq->assoc_qid = eq->queue_id;
  14931. cq->assoc_qp = eq;
  14932. cq->host_index = 0;
  14933. cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
  14934. cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
  14935. cq->entry_count);
  14936. cq->chann = idx;
  14937. rc = 0;
  14938. list_for_each_entry(dmabuf, &cq->page_list, list) {
  14939. memset(dmabuf->virt, 0, hw_page_size);
  14940. cnt = page_idx + dmabuf->buffer_tag;
  14941. cq_set->u.request.page[cnt].addr_lo =
  14942. putPaddrLow(dmabuf->phys);
  14943. cq_set->u.request.page[cnt].addr_hi =
  14944. putPaddrHigh(dmabuf->phys);
  14945. rc++;
  14946. }
  14947. page_idx += rc;
  14948. }
  14949. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  14950. /* The IOCTL status is embedded in the mailbox subheader. */
  14951. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  14952. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  14953. if (shdr_status || shdr_add_status || rc) {
  14954. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  14955. "3119 CQ_CREATE_SET mailbox failed with "
  14956. "status x%x add_status x%x, mbx status x%x\n",
  14957. shdr_status, shdr_add_status, rc);
  14958. status = -ENXIO;
  14959. goto out;
  14960. }
  14961. rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
  14962. if (rc == 0xFFFF) {
  14963. status = -ENXIO;
  14964. goto out;
  14965. }
  14966. for (idx = 0; idx < numcq; idx++) {
  14967. cq = cqp[idx];
  14968. cq->queue_id = rc + idx;
  14969. if (cq->queue_id > phba->sli4_hba.cq_max)
  14970. phba->sli4_hba.cq_max = cq->queue_id;
  14971. }
  14972. out:
  14973. lpfc_sli4_mbox_cmd_free(phba, mbox);
  14974. return status;
  14975. }
  14976. /**
  14977. * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
  14978. * @phba: HBA structure that indicates port to create a queue on.
  14979. * @mq: The queue structure to use to create the mailbox queue.
  14980. * @mbox: An allocated pointer to type LPFC_MBOXQ_t
  14981. * @cq: The completion queue to associate with this cq.
  14982. *
  14983. * This function provides failback (fb) functionality when the
  14984. * mq_create_ext fails on older FW generations. It's purpose is identical
  14985. * to mq_create_ext otherwise.
  14986. *
  14987. * This routine cannot fail as all attributes were previously accessed and
  14988. * initialized in mq_create_ext.
  14989. **/
  14990. static void
  14991. lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
  14992. LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
  14993. {
  14994. struct lpfc_mbx_mq_create *mq_create;
  14995. struct lpfc_dmabuf *dmabuf;
  14996. int length;
  14997. length = (sizeof(struct lpfc_mbx_mq_create) -
  14998. sizeof(struct lpfc_sli4_cfg_mhdr));
  14999. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  15000. LPFC_MBOX_OPCODE_MQ_CREATE,
  15001. length, LPFC_SLI4_MBX_EMBED);
  15002. mq_create = &mbox->u.mqe.un.mq_create;
  15003. bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
  15004. mq->page_count);
  15005. bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
  15006. cq->queue_id);
  15007. bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
  15008. switch (mq->entry_count) {
  15009. case 16:
  15010. bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
  15011. LPFC_MQ_RING_SIZE_16);
  15012. break;
  15013. case 32:
  15014. bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
  15015. LPFC_MQ_RING_SIZE_32);
  15016. break;
  15017. case 64:
  15018. bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
  15019. LPFC_MQ_RING_SIZE_64);
  15020. break;
  15021. case 128:
  15022. bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
  15023. LPFC_MQ_RING_SIZE_128);
  15024. break;
  15025. }
  15026. list_for_each_entry(dmabuf, &mq->page_list, list) {
  15027. mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
  15028. putPaddrLow(dmabuf->phys);
  15029. mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
  15030. putPaddrHigh(dmabuf->phys);
  15031. }
  15032. }
  15033. /**
  15034. * lpfc_mq_create - Create a mailbox Queue on the HBA
  15035. * @phba: HBA structure that indicates port to create a queue on.
  15036. * @mq: The queue structure to use to create the mailbox queue.
  15037. * @cq: The completion queue to associate with this cq.
  15038. * @subtype: The queue's subtype.
  15039. *
  15040. * This function creates a mailbox queue, as detailed in @mq, on a port,
  15041. * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
  15042. *
  15043. * The @phba struct is used to send mailbox command to HBA. The @cq struct
  15044. * is used to get the entry count and entry size that are necessary to
  15045. * determine the number of pages to allocate and use for this queue. This
  15046. * function will send the MQ_CREATE mailbox command to the HBA to setup the
  15047. * mailbox queue. This function is asynchronous and will wait for the mailbox
  15048. * command to finish before continuing.
  15049. *
  15050. * On success this function will return a zero. If unable to allocate enough
  15051. * memory this function will return -ENOMEM. If the queue create mailbox command
  15052. * fails this function will return -ENXIO.
  15053. **/
  15054. int32_t
  15055. lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
  15056. struct lpfc_queue *cq, uint32_t subtype)
  15057. {
  15058. struct lpfc_mbx_mq_create *mq_create;
  15059. struct lpfc_mbx_mq_create_ext *mq_create_ext;
  15060. struct lpfc_dmabuf *dmabuf;
  15061. LPFC_MBOXQ_t *mbox;
  15062. int rc, length, status = 0;
  15063. uint32_t shdr_status, shdr_add_status;
  15064. union lpfc_sli4_cfg_shdr *shdr;
  15065. uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  15066. /* sanity check on queue memory */
  15067. if (!mq || !cq)
  15068. return -ENODEV;
  15069. if (!phba->sli4_hba.pc_sli4_params.supported)
  15070. hw_page_size = SLI4_PAGE_SIZE;
  15071. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  15072. if (!mbox)
  15073. return -ENOMEM;
  15074. length = (sizeof(struct lpfc_mbx_mq_create_ext) -
  15075. sizeof(struct lpfc_sli4_cfg_mhdr));
  15076. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  15077. LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
  15078. length, LPFC_SLI4_MBX_EMBED);
  15079. mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
  15080. shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
  15081. bf_set(lpfc_mbx_mq_create_ext_num_pages,
  15082. &mq_create_ext->u.request, mq->page_count);
  15083. bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
  15084. &mq_create_ext->u.request, 1);
  15085. bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
  15086. &mq_create_ext->u.request, 1);
  15087. bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
  15088. &mq_create_ext->u.request, 1);
  15089. bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
  15090. &mq_create_ext->u.request, 1);
  15091. bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
  15092. &mq_create_ext->u.request, 1);
  15093. bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
  15094. bf_set(lpfc_mbox_hdr_version, &shdr->request,
  15095. phba->sli4_hba.pc_sli4_params.mqv);
  15096. if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
  15097. bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
  15098. cq->queue_id);
  15099. else
  15100. bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
  15101. cq->queue_id);
  15102. switch (mq->entry_count) {
  15103. default:
  15104. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15105. "0362 Unsupported MQ count. (%d)\n",
  15106. mq->entry_count);
  15107. if (mq->entry_count < 16) {
  15108. status = -EINVAL;
  15109. goto out;
  15110. }
  15111. fallthrough; /* otherwise default to smallest count */
  15112. case 16:
  15113. bf_set(lpfc_mq_context_ring_size,
  15114. &mq_create_ext->u.request.context,
  15115. LPFC_MQ_RING_SIZE_16);
  15116. break;
  15117. case 32:
  15118. bf_set(lpfc_mq_context_ring_size,
  15119. &mq_create_ext->u.request.context,
  15120. LPFC_MQ_RING_SIZE_32);
  15121. break;
  15122. case 64:
  15123. bf_set(lpfc_mq_context_ring_size,
  15124. &mq_create_ext->u.request.context,
  15125. LPFC_MQ_RING_SIZE_64);
  15126. break;
  15127. case 128:
  15128. bf_set(lpfc_mq_context_ring_size,
  15129. &mq_create_ext->u.request.context,
  15130. LPFC_MQ_RING_SIZE_128);
  15131. break;
  15132. }
  15133. list_for_each_entry(dmabuf, &mq->page_list, list) {
  15134. memset(dmabuf->virt, 0, hw_page_size);
  15135. mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
  15136. putPaddrLow(dmabuf->phys);
  15137. mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
  15138. putPaddrHigh(dmabuf->phys);
  15139. }
  15140. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  15141. mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
  15142. &mq_create_ext->u.response);
  15143. if (rc != MBX_SUCCESS) {
  15144. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  15145. "2795 MQ_CREATE_EXT failed with "
  15146. "status x%x. Failback to MQ_CREATE.\n",
  15147. rc);
  15148. lpfc_mq_create_fb_init(phba, mq, mbox, cq);
  15149. mq_create = &mbox->u.mqe.un.mq_create;
  15150. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  15151. shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
  15152. mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
  15153. &mq_create->u.response);
  15154. }
  15155. /* The IOCTL status is embedded in the mailbox subheader. */
  15156. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  15157. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  15158. if (shdr_status || shdr_add_status || rc) {
  15159. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15160. "2502 MQ_CREATE mailbox failed with "
  15161. "status x%x add_status x%x, mbx status x%x\n",
  15162. shdr_status, shdr_add_status, rc);
  15163. status = -ENXIO;
  15164. goto out;
  15165. }
  15166. if (mq->queue_id == 0xFFFF) {
  15167. status = -ENXIO;
  15168. goto out;
  15169. }
  15170. mq->type = LPFC_MQ;
  15171. mq->assoc_qid = cq->queue_id;
  15172. mq->subtype = subtype;
  15173. mq->host_index = 0;
  15174. mq->hba_index = 0;
  15175. /* link the mq onto the parent cq child list */
  15176. list_add_tail(&mq->list, &cq->child_list);
  15177. out:
  15178. mempool_free(mbox, phba->mbox_mem_pool);
  15179. return status;
  15180. }
  15181. /**
  15182. * lpfc_wq_create - Create a Work Queue on the HBA
  15183. * @phba: HBA structure that indicates port to create a queue on.
  15184. * @wq: The queue structure to use to create the work queue.
  15185. * @cq: The completion queue to bind this work queue to.
  15186. * @subtype: The subtype of the work queue indicating its functionality.
  15187. *
  15188. * This function creates a work queue, as detailed in @wq, on a port, described
  15189. * by @phba by sending a WQ_CREATE mailbox command to the HBA.
  15190. *
  15191. * The @phba struct is used to send mailbox command to HBA. The @wq struct
  15192. * is used to get the entry count and entry size that are necessary to
  15193. * determine the number of pages to allocate and use for this queue. The @cq
  15194. * is used to indicate which completion queue to bind this work queue to. This
  15195. * function will send the WQ_CREATE mailbox command to the HBA to setup the
  15196. * work queue. This function is asynchronous and will wait for the mailbox
  15197. * command to finish before continuing.
  15198. *
  15199. * On success this function will return a zero. If unable to allocate enough
  15200. * memory this function will return -ENOMEM. If the queue create mailbox command
  15201. * fails this function will return -ENXIO.
  15202. **/
  15203. int
  15204. lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
  15205. struct lpfc_queue *cq, uint32_t subtype)
  15206. {
  15207. struct lpfc_mbx_wq_create *wq_create;
  15208. struct lpfc_dmabuf *dmabuf;
  15209. LPFC_MBOXQ_t *mbox;
  15210. int rc, length, status = 0;
  15211. uint32_t shdr_status, shdr_add_status;
  15212. union lpfc_sli4_cfg_shdr *shdr;
  15213. uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  15214. struct dma_address *page;
  15215. void __iomem *bar_memmap_p;
  15216. uint32_t db_offset;
  15217. uint16_t pci_barset;
  15218. uint8_t dpp_barset;
  15219. uint32_t dpp_offset;
  15220. uint8_t wq_create_version;
  15221. #ifdef CONFIG_X86
  15222. unsigned long pg_addr;
  15223. #endif
  15224. /* sanity check on queue memory */
  15225. if (!wq || !cq)
  15226. return -ENODEV;
  15227. if (!phba->sli4_hba.pc_sli4_params.supported)
  15228. hw_page_size = wq->page_size;
  15229. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  15230. if (!mbox)
  15231. return -ENOMEM;
  15232. length = (sizeof(struct lpfc_mbx_wq_create) -
  15233. sizeof(struct lpfc_sli4_cfg_mhdr));
  15234. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  15235. LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
  15236. length, LPFC_SLI4_MBX_EMBED);
  15237. wq_create = &mbox->u.mqe.un.wq_create;
  15238. shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
  15239. bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
  15240. wq->page_count);
  15241. bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
  15242. cq->queue_id);
  15243. /* wqv is the earliest version supported, NOT the latest */
  15244. bf_set(lpfc_mbox_hdr_version, &shdr->request,
  15245. phba->sli4_hba.pc_sli4_params.wqv);
  15246. if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
  15247. (wq->page_size > SLI4_PAGE_SIZE))
  15248. wq_create_version = LPFC_Q_CREATE_VERSION_1;
  15249. else
  15250. wq_create_version = LPFC_Q_CREATE_VERSION_0;
  15251. switch (wq_create_version) {
  15252. case LPFC_Q_CREATE_VERSION_1:
  15253. bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
  15254. wq->entry_count);
  15255. bf_set(lpfc_mbox_hdr_version, &shdr->request,
  15256. LPFC_Q_CREATE_VERSION_1);
  15257. switch (wq->entry_size) {
  15258. default:
  15259. case 64:
  15260. bf_set(lpfc_mbx_wq_create_wqe_size,
  15261. &wq_create->u.request_1,
  15262. LPFC_WQ_WQE_SIZE_64);
  15263. break;
  15264. case 128:
  15265. bf_set(lpfc_mbx_wq_create_wqe_size,
  15266. &wq_create->u.request_1,
  15267. LPFC_WQ_WQE_SIZE_128);
  15268. break;
  15269. }
  15270. /* Request DPP by default */
  15271. bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
  15272. bf_set(lpfc_mbx_wq_create_page_size,
  15273. &wq_create->u.request_1,
  15274. (wq->page_size / SLI4_PAGE_SIZE));
  15275. page = wq_create->u.request_1.page;
  15276. break;
  15277. default:
  15278. page = wq_create->u.request.page;
  15279. break;
  15280. }
  15281. list_for_each_entry(dmabuf, &wq->page_list, list) {
  15282. memset(dmabuf->virt, 0, hw_page_size);
  15283. page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
  15284. page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
  15285. }
  15286. if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
  15287. bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
  15288. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  15289. /* The IOCTL status is embedded in the mailbox subheader. */
  15290. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  15291. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  15292. if (shdr_status || shdr_add_status || rc) {
  15293. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15294. "2503 WQ_CREATE mailbox failed with "
  15295. "status x%x add_status x%x, mbx status x%x\n",
  15296. shdr_status, shdr_add_status, rc);
  15297. status = -ENXIO;
  15298. goto out;
  15299. }
  15300. if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
  15301. wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
  15302. &wq_create->u.response);
  15303. else
  15304. wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
  15305. &wq_create->u.response_1);
  15306. if (wq->queue_id == 0xFFFF) {
  15307. status = -ENXIO;
  15308. goto out;
  15309. }
  15310. wq->db_format = LPFC_DB_LIST_FORMAT;
  15311. if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
  15312. if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
  15313. wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
  15314. &wq_create->u.response);
  15315. if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
  15316. (wq->db_format != LPFC_DB_RING_FORMAT)) {
  15317. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15318. "3265 WQ[%d] doorbell format "
  15319. "not supported: x%x\n",
  15320. wq->queue_id, wq->db_format);
  15321. status = -EINVAL;
  15322. goto out;
  15323. }
  15324. pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
  15325. &wq_create->u.response);
  15326. bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
  15327. pci_barset);
  15328. if (!bar_memmap_p) {
  15329. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15330. "3263 WQ[%d] failed to memmap "
  15331. "pci barset:x%x\n",
  15332. wq->queue_id, pci_barset);
  15333. status = -ENOMEM;
  15334. goto out;
  15335. }
  15336. db_offset = wq_create->u.response.doorbell_offset;
  15337. if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
  15338. (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
  15339. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15340. "3252 WQ[%d] doorbell offset "
  15341. "not supported: x%x\n",
  15342. wq->queue_id, db_offset);
  15343. status = -EINVAL;
  15344. goto out;
  15345. }
  15346. wq->db_regaddr = bar_memmap_p + db_offset;
  15347. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  15348. "3264 WQ[%d]: barset:x%x, offset:x%x, "
  15349. "format:x%x\n", wq->queue_id,
  15350. pci_barset, db_offset, wq->db_format);
  15351. } else
  15352. wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
  15353. } else {
  15354. /* Check if DPP was honored by the firmware */
  15355. wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
  15356. &wq_create->u.response_1);
  15357. if (wq->dpp_enable) {
  15358. pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
  15359. &wq_create->u.response_1);
  15360. bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
  15361. pci_barset);
  15362. if (!bar_memmap_p) {
  15363. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15364. "3267 WQ[%d] failed to memmap "
  15365. "pci barset:x%x\n",
  15366. wq->queue_id, pci_barset);
  15367. status = -ENOMEM;
  15368. goto out;
  15369. }
  15370. db_offset = wq_create->u.response_1.doorbell_offset;
  15371. wq->db_regaddr = bar_memmap_p + db_offset;
  15372. wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
  15373. &wq_create->u.response_1);
  15374. dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
  15375. &wq_create->u.response_1);
  15376. bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
  15377. dpp_barset);
  15378. if (!bar_memmap_p) {
  15379. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15380. "3268 WQ[%d] failed to memmap "
  15381. "pci barset:x%x\n",
  15382. wq->queue_id, dpp_barset);
  15383. status = -ENOMEM;
  15384. goto out;
  15385. }
  15386. dpp_offset = wq_create->u.response_1.dpp_offset;
  15387. wq->dpp_regaddr = bar_memmap_p + dpp_offset;
  15388. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  15389. "3271 WQ[%d]: barset:x%x, offset:x%x, "
  15390. "dpp_id:x%x dpp_barset:x%x "
  15391. "dpp_offset:x%x\n",
  15392. wq->queue_id, pci_barset, db_offset,
  15393. wq->dpp_id, dpp_barset, dpp_offset);
  15394. #ifdef CONFIG_X86
  15395. /* Enable combined writes for DPP aperture */
  15396. pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
  15397. rc = set_memory_wc(pg_addr, 1);
  15398. if (rc) {
  15399. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  15400. "3272 Cannot setup Combined "
  15401. "Write on WQ[%d] - disable DPP\n",
  15402. wq->queue_id);
  15403. phba->cfg_enable_dpp = 0;
  15404. }
  15405. #else
  15406. phba->cfg_enable_dpp = 0;
  15407. #endif
  15408. } else
  15409. wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
  15410. }
  15411. wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
  15412. if (wq->pring == NULL) {
  15413. status = -ENOMEM;
  15414. goto out;
  15415. }
  15416. wq->type = LPFC_WQ;
  15417. wq->assoc_qid = cq->queue_id;
  15418. wq->subtype = subtype;
  15419. wq->host_index = 0;
  15420. wq->hba_index = 0;
  15421. wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
  15422. /* link the wq onto the parent cq child list */
  15423. list_add_tail(&wq->list, &cq->child_list);
  15424. out:
  15425. mempool_free(mbox, phba->mbox_mem_pool);
  15426. return status;
  15427. }
  15428. /**
  15429. * lpfc_rq_create - Create a Receive Queue on the HBA
  15430. * @phba: HBA structure that indicates port to create a queue on.
  15431. * @hrq: The queue structure to use to create the header receive queue.
  15432. * @drq: The queue structure to use to create the data receive queue.
  15433. * @cq: The completion queue to bind this work queue to.
  15434. * @subtype: The subtype of the work queue indicating its functionality.
  15435. *
  15436. * This function creates a receive buffer queue pair , as detailed in @hrq and
  15437. * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
  15438. * to the HBA.
  15439. *
  15440. * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
  15441. * struct is used to get the entry count that is necessary to determine the
  15442. * number of pages to use for this queue. The @cq is used to indicate which
  15443. * completion queue to bind received buffers that are posted to these queues to.
  15444. * This function will send the RQ_CREATE mailbox command to the HBA to setup the
  15445. * receive queue pair. This function is asynchronous and will wait for the
  15446. * mailbox command to finish before continuing.
  15447. *
  15448. * On success this function will return a zero. If unable to allocate enough
  15449. * memory this function will return -ENOMEM. If the queue create mailbox command
  15450. * fails this function will return -ENXIO.
  15451. **/
  15452. int
  15453. lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
  15454. struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
  15455. {
  15456. struct lpfc_mbx_rq_create *rq_create;
  15457. struct lpfc_dmabuf *dmabuf;
  15458. LPFC_MBOXQ_t *mbox;
  15459. int rc, length, status = 0;
  15460. uint32_t shdr_status, shdr_add_status;
  15461. union lpfc_sli4_cfg_shdr *shdr;
  15462. uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  15463. void __iomem *bar_memmap_p;
  15464. uint32_t db_offset;
  15465. uint16_t pci_barset;
  15466. /* sanity check on queue memory */
  15467. if (!hrq || !drq || !cq)
  15468. return -ENODEV;
  15469. if (!phba->sli4_hba.pc_sli4_params.supported)
  15470. hw_page_size = SLI4_PAGE_SIZE;
  15471. if (hrq->entry_count != drq->entry_count)
  15472. return -EINVAL;
  15473. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  15474. if (!mbox)
  15475. return -ENOMEM;
  15476. length = (sizeof(struct lpfc_mbx_rq_create) -
  15477. sizeof(struct lpfc_sli4_cfg_mhdr));
  15478. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  15479. LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
  15480. length, LPFC_SLI4_MBX_EMBED);
  15481. rq_create = &mbox->u.mqe.un.rq_create;
  15482. shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
  15483. bf_set(lpfc_mbox_hdr_version, &shdr->request,
  15484. phba->sli4_hba.pc_sli4_params.rqv);
  15485. if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
  15486. bf_set(lpfc_rq_context_rqe_count_1,
  15487. &rq_create->u.request.context,
  15488. hrq->entry_count);
  15489. rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
  15490. bf_set(lpfc_rq_context_rqe_size,
  15491. &rq_create->u.request.context,
  15492. LPFC_RQE_SIZE_8);
  15493. bf_set(lpfc_rq_context_page_size,
  15494. &rq_create->u.request.context,
  15495. LPFC_RQ_PAGE_SIZE_4096);
  15496. } else {
  15497. switch (hrq->entry_count) {
  15498. default:
  15499. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15500. "2535 Unsupported RQ count. (%d)\n",
  15501. hrq->entry_count);
  15502. if (hrq->entry_count < 512) {
  15503. status = -EINVAL;
  15504. goto out;
  15505. }
  15506. fallthrough; /* otherwise default to smallest count */
  15507. case 512:
  15508. bf_set(lpfc_rq_context_rqe_count,
  15509. &rq_create->u.request.context,
  15510. LPFC_RQ_RING_SIZE_512);
  15511. break;
  15512. case 1024:
  15513. bf_set(lpfc_rq_context_rqe_count,
  15514. &rq_create->u.request.context,
  15515. LPFC_RQ_RING_SIZE_1024);
  15516. break;
  15517. case 2048:
  15518. bf_set(lpfc_rq_context_rqe_count,
  15519. &rq_create->u.request.context,
  15520. LPFC_RQ_RING_SIZE_2048);
  15521. break;
  15522. case 4096:
  15523. bf_set(lpfc_rq_context_rqe_count,
  15524. &rq_create->u.request.context,
  15525. LPFC_RQ_RING_SIZE_4096);
  15526. break;
  15527. }
  15528. bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
  15529. LPFC_HDR_BUF_SIZE);
  15530. }
  15531. bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
  15532. cq->queue_id);
  15533. bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
  15534. hrq->page_count);
  15535. list_for_each_entry(dmabuf, &hrq->page_list, list) {
  15536. memset(dmabuf->virt, 0, hw_page_size);
  15537. rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
  15538. putPaddrLow(dmabuf->phys);
  15539. rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
  15540. putPaddrHigh(dmabuf->phys);
  15541. }
  15542. if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
  15543. bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
  15544. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  15545. /* The IOCTL status is embedded in the mailbox subheader. */
  15546. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  15547. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  15548. if (shdr_status || shdr_add_status || rc) {
  15549. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15550. "2504 RQ_CREATE mailbox failed with "
  15551. "status x%x add_status x%x, mbx status x%x\n",
  15552. shdr_status, shdr_add_status, rc);
  15553. status = -ENXIO;
  15554. goto out;
  15555. }
  15556. hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
  15557. if (hrq->queue_id == 0xFFFF) {
  15558. status = -ENXIO;
  15559. goto out;
  15560. }
  15561. if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
  15562. hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
  15563. &rq_create->u.response);
  15564. if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
  15565. (hrq->db_format != LPFC_DB_RING_FORMAT)) {
  15566. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15567. "3262 RQ [%d] doorbell format not "
  15568. "supported: x%x\n", hrq->queue_id,
  15569. hrq->db_format);
  15570. status = -EINVAL;
  15571. goto out;
  15572. }
  15573. pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
  15574. &rq_create->u.response);
  15575. bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
  15576. if (!bar_memmap_p) {
  15577. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15578. "3269 RQ[%d] failed to memmap pci "
  15579. "barset:x%x\n", hrq->queue_id,
  15580. pci_barset);
  15581. status = -ENOMEM;
  15582. goto out;
  15583. }
  15584. db_offset = rq_create->u.response.doorbell_offset;
  15585. if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
  15586. (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
  15587. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15588. "3270 RQ[%d] doorbell offset not "
  15589. "supported: x%x\n", hrq->queue_id,
  15590. db_offset);
  15591. status = -EINVAL;
  15592. goto out;
  15593. }
  15594. hrq->db_regaddr = bar_memmap_p + db_offset;
  15595. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  15596. "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
  15597. "format:x%x\n", hrq->queue_id, pci_barset,
  15598. db_offset, hrq->db_format);
  15599. } else {
  15600. hrq->db_format = LPFC_DB_RING_FORMAT;
  15601. hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
  15602. }
  15603. hrq->type = LPFC_HRQ;
  15604. hrq->assoc_qid = cq->queue_id;
  15605. hrq->subtype = subtype;
  15606. hrq->host_index = 0;
  15607. hrq->hba_index = 0;
  15608. hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
  15609. /* now create the data queue */
  15610. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  15611. LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
  15612. length, LPFC_SLI4_MBX_EMBED);
  15613. bf_set(lpfc_mbox_hdr_version, &shdr->request,
  15614. phba->sli4_hba.pc_sli4_params.rqv);
  15615. if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
  15616. bf_set(lpfc_rq_context_rqe_count_1,
  15617. &rq_create->u.request.context, hrq->entry_count);
  15618. if (subtype == LPFC_NVMET)
  15619. rq_create->u.request.context.buffer_size =
  15620. LPFC_NVMET_DATA_BUF_SIZE;
  15621. else
  15622. rq_create->u.request.context.buffer_size =
  15623. LPFC_DATA_BUF_SIZE;
  15624. bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
  15625. LPFC_RQE_SIZE_8);
  15626. bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
  15627. (PAGE_SIZE/SLI4_PAGE_SIZE));
  15628. } else {
  15629. switch (drq->entry_count) {
  15630. default:
  15631. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15632. "2536 Unsupported RQ count. (%d)\n",
  15633. drq->entry_count);
  15634. if (drq->entry_count < 512) {
  15635. status = -EINVAL;
  15636. goto out;
  15637. }
  15638. fallthrough; /* otherwise default to smallest count */
  15639. case 512:
  15640. bf_set(lpfc_rq_context_rqe_count,
  15641. &rq_create->u.request.context,
  15642. LPFC_RQ_RING_SIZE_512);
  15643. break;
  15644. case 1024:
  15645. bf_set(lpfc_rq_context_rqe_count,
  15646. &rq_create->u.request.context,
  15647. LPFC_RQ_RING_SIZE_1024);
  15648. break;
  15649. case 2048:
  15650. bf_set(lpfc_rq_context_rqe_count,
  15651. &rq_create->u.request.context,
  15652. LPFC_RQ_RING_SIZE_2048);
  15653. break;
  15654. case 4096:
  15655. bf_set(lpfc_rq_context_rqe_count,
  15656. &rq_create->u.request.context,
  15657. LPFC_RQ_RING_SIZE_4096);
  15658. break;
  15659. }
  15660. if (subtype == LPFC_NVMET)
  15661. bf_set(lpfc_rq_context_buf_size,
  15662. &rq_create->u.request.context,
  15663. LPFC_NVMET_DATA_BUF_SIZE);
  15664. else
  15665. bf_set(lpfc_rq_context_buf_size,
  15666. &rq_create->u.request.context,
  15667. LPFC_DATA_BUF_SIZE);
  15668. }
  15669. bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
  15670. cq->queue_id);
  15671. bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
  15672. drq->page_count);
  15673. list_for_each_entry(dmabuf, &drq->page_list, list) {
  15674. rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
  15675. putPaddrLow(dmabuf->phys);
  15676. rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
  15677. putPaddrHigh(dmabuf->phys);
  15678. }
  15679. if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
  15680. bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
  15681. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  15682. /* The IOCTL status is embedded in the mailbox subheader. */
  15683. shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
  15684. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  15685. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  15686. if (shdr_status || shdr_add_status || rc) {
  15687. status = -ENXIO;
  15688. goto out;
  15689. }
  15690. drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
  15691. if (drq->queue_id == 0xFFFF) {
  15692. status = -ENXIO;
  15693. goto out;
  15694. }
  15695. drq->type = LPFC_DRQ;
  15696. drq->assoc_qid = cq->queue_id;
  15697. drq->subtype = subtype;
  15698. drq->host_index = 0;
  15699. drq->hba_index = 0;
  15700. drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
  15701. /* link the header and data RQs onto the parent cq child list */
  15702. list_add_tail(&hrq->list, &cq->child_list);
  15703. list_add_tail(&drq->list, &cq->child_list);
  15704. out:
  15705. mempool_free(mbox, phba->mbox_mem_pool);
  15706. return status;
  15707. }
  15708. /**
  15709. * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
  15710. * @phba: HBA structure that indicates port to create a queue on.
  15711. * @hrqp: The queue structure array to use to create the header receive queues.
  15712. * @drqp: The queue structure array to use to create the data receive queues.
  15713. * @cqp: The completion queue array to bind these receive queues to.
  15714. * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
  15715. *
  15716. * This function creates a receive buffer queue pair , as detailed in @hrq and
  15717. * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
  15718. * to the HBA.
  15719. *
  15720. * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
  15721. * struct is used to get the entry count that is necessary to determine the
  15722. * number of pages to use for this queue. The @cq is used to indicate which
  15723. * completion queue to bind received buffers that are posted to these queues to.
  15724. * This function will send the RQ_CREATE mailbox command to the HBA to setup the
  15725. * receive queue pair. This function is asynchronous and will wait for the
  15726. * mailbox command to finish before continuing.
  15727. *
  15728. * On success this function will return a zero. If unable to allocate enough
  15729. * memory this function will return -ENOMEM. If the queue create mailbox command
  15730. * fails this function will return -ENXIO.
  15731. **/
  15732. int
  15733. lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
  15734. struct lpfc_queue **drqp, struct lpfc_queue **cqp,
  15735. uint32_t subtype)
  15736. {
  15737. struct lpfc_queue *hrq, *drq, *cq;
  15738. struct lpfc_mbx_rq_create_v2 *rq_create;
  15739. struct lpfc_dmabuf *dmabuf;
  15740. LPFC_MBOXQ_t *mbox;
  15741. int rc, length, alloclen, status = 0;
  15742. int cnt, idx, numrq, page_idx = 0;
  15743. uint32_t shdr_status, shdr_add_status;
  15744. union lpfc_sli4_cfg_shdr *shdr;
  15745. uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
  15746. numrq = phba->cfg_nvmet_mrq;
  15747. /* sanity check on array memory */
  15748. if (!hrqp || !drqp || !cqp || !numrq)
  15749. return -ENODEV;
  15750. if (!phba->sli4_hba.pc_sli4_params.supported)
  15751. hw_page_size = SLI4_PAGE_SIZE;
  15752. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  15753. if (!mbox)
  15754. return -ENOMEM;
  15755. length = sizeof(struct lpfc_mbx_rq_create_v2);
  15756. length += ((2 * numrq * hrqp[0]->page_count) *
  15757. sizeof(struct dma_address));
  15758. alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  15759. LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
  15760. LPFC_SLI4_MBX_NEMBED);
  15761. if (alloclen < length) {
  15762. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15763. "3099 Allocated DMA memory size (%d) is "
  15764. "less than the requested DMA memory size "
  15765. "(%d)\n", alloclen, length);
  15766. status = -ENOMEM;
  15767. goto out;
  15768. }
  15769. rq_create = mbox->sge_array->addr[0];
  15770. shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
  15771. bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
  15772. cnt = 0;
  15773. for (idx = 0; idx < numrq; idx++) {
  15774. hrq = hrqp[idx];
  15775. drq = drqp[idx];
  15776. cq = cqp[idx];
  15777. /* sanity check on queue memory */
  15778. if (!hrq || !drq || !cq) {
  15779. status = -ENODEV;
  15780. goto out;
  15781. }
  15782. if (hrq->entry_count != drq->entry_count) {
  15783. status = -EINVAL;
  15784. goto out;
  15785. }
  15786. if (idx == 0) {
  15787. bf_set(lpfc_mbx_rq_create_num_pages,
  15788. &rq_create->u.request,
  15789. hrq->page_count);
  15790. bf_set(lpfc_mbx_rq_create_rq_cnt,
  15791. &rq_create->u.request, (numrq * 2));
  15792. bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
  15793. 1);
  15794. bf_set(lpfc_rq_context_base_cq,
  15795. &rq_create->u.request.context,
  15796. cq->queue_id);
  15797. bf_set(lpfc_rq_context_data_size,
  15798. &rq_create->u.request.context,
  15799. LPFC_NVMET_DATA_BUF_SIZE);
  15800. bf_set(lpfc_rq_context_hdr_size,
  15801. &rq_create->u.request.context,
  15802. LPFC_HDR_BUF_SIZE);
  15803. bf_set(lpfc_rq_context_rqe_count_1,
  15804. &rq_create->u.request.context,
  15805. hrq->entry_count);
  15806. bf_set(lpfc_rq_context_rqe_size,
  15807. &rq_create->u.request.context,
  15808. LPFC_RQE_SIZE_8);
  15809. bf_set(lpfc_rq_context_page_size,
  15810. &rq_create->u.request.context,
  15811. (PAGE_SIZE/SLI4_PAGE_SIZE));
  15812. }
  15813. rc = 0;
  15814. list_for_each_entry(dmabuf, &hrq->page_list, list) {
  15815. memset(dmabuf->virt, 0, hw_page_size);
  15816. cnt = page_idx + dmabuf->buffer_tag;
  15817. rq_create->u.request.page[cnt].addr_lo =
  15818. putPaddrLow(dmabuf->phys);
  15819. rq_create->u.request.page[cnt].addr_hi =
  15820. putPaddrHigh(dmabuf->phys);
  15821. rc++;
  15822. }
  15823. page_idx += rc;
  15824. rc = 0;
  15825. list_for_each_entry(dmabuf, &drq->page_list, list) {
  15826. memset(dmabuf->virt, 0, hw_page_size);
  15827. cnt = page_idx + dmabuf->buffer_tag;
  15828. rq_create->u.request.page[cnt].addr_lo =
  15829. putPaddrLow(dmabuf->phys);
  15830. rq_create->u.request.page[cnt].addr_hi =
  15831. putPaddrHigh(dmabuf->phys);
  15832. rc++;
  15833. }
  15834. page_idx += rc;
  15835. hrq->db_format = LPFC_DB_RING_FORMAT;
  15836. hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
  15837. hrq->type = LPFC_HRQ;
  15838. hrq->assoc_qid = cq->queue_id;
  15839. hrq->subtype = subtype;
  15840. hrq->host_index = 0;
  15841. hrq->hba_index = 0;
  15842. hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
  15843. drq->db_format = LPFC_DB_RING_FORMAT;
  15844. drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
  15845. drq->type = LPFC_DRQ;
  15846. drq->assoc_qid = cq->queue_id;
  15847. drq->subtype = subtype;
  15848. drq->host_index = 0;
  15849. drq->hba_index = 0;
  15850. drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
  15851. list_add_tail(&hrq->list, &cq->child_list);
  15852. list_add_tail(&drq->list, &cq->child_list);
  15853. }
  15854. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  15855. /* The IOCTL status is embedded in the mailbox subheader. */
  15856. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  15857. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  15858. if (shdr_status || shdr_add_status || rc) {
  15859. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15860. "3120 RQ_CREATE mailbox failed with "
  15861. "status x%x add_status x%x, mbx status x%x\n",
  15862. shdr_status, shdr_add_status, rc);
  15863. status = -ENXIO;
  15864. goto out;
  15865. }
  15866. rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
  15867. if (rc == 0xFFFF) {
  15868. status = -ENXIO;
  15869. goto out;
  15870. }
  15871. /* Initialize all RQs with associated queue id */
  15872. for (idx = 0; idx < numrq; idx++) {
  15873. hrq = hrqp[idx];
  15874. hrq->queue_id = rc + (2 * idx);
  15875. drq = drqp[idx];
  15876. drq->queue_id = rc + (2 * idx) + 1;
  15877. }
  15878. out:
  15879. lpfc_sli4_mbox_cmd_free(phba, mbox);
  15880. return status;
  15881. }
  15882. /**
  15883. * lpfc_eq_destroy - Destroy an event Queue on the HBA
  15884. * @phba: HBA structure that indicates port to destroy a queue on.
  15885. * @eq: The queue structure associated with the queue to destroy.
  15886. *
  15887. * This function destroys a queue, as detailed in @eq by sending an mailbox
  15888. * command, specific to the type of queue, to the HBA.
  15889. *
  15890. * The @eq struct is used to get the queue ID of the queue to destroy.
  15891. *
  15892. * On success this function will return a zero. If the queue destroy mailbox
  15893. * command fails this function will return -ENXIO.
  15894. **/
  15895. int
  15896. lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
  15897. {
  15898. LPFC_MBOXQ_t *mbox;
  15899. int rc, length, status = 0;
  15900. uint32_t shdr_status, shdr_add_status;
  15901. union lpfc_sli4_cfg_shdr *shdr;
  15902. /* sanity check on queue memory */
  15903. if (!eq)
  15904. return -ENODEV;
  15905. if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
  15906. goto list_remove;
  15907. mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
  15908. if (!mbox)
  15909. return -ENOMEM;
  15910. length = (sizeof(struct lpfc_mbx_eq_destroy) -
  15911. sizeof(struct lpfc_sli4_cfg_mhdr));
  15912. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  15913. LPFC_MBOX_OPCODE_EQ_DESTROY,
  15914. length, LPFC_SLI4_MBX_EMBED);
  15915. bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
  15916. eq->queue_id);
  15917. mbox->vport = eq->phba->pport;
  15918. mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  15919. rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
  15920. /* The IOCTL status is embedded in the mailbox subheader. */
  15921. shdr = (union lpfc_sli4_cfg_shdr *)
  15922. &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
  15923. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  15924. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  15925. if (shdr_status || shdr_add_status || rc) {
  15926. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15927. "2505 EQ_DESTROY mailbox failed with "
  15928. "status x%x add_status x%x, mbx status x%x\n",
  15929. shdr_status, shdr_add_status, rc);
  15930. status = -ENXIO;
  15931. }
  15932. mempool_free(mbox, eq->phba->mbox_mem_pool);
  15933. list_remove:
  15934. /* Remove eq from any list */
  15935. list_del_init(&eq->list);
  15936. return status;
  15937. }
  15938. /**
  15939. * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
  15940. * @phba: HBA structure that indicates port to destroy a queue on.
  15941. * @cq: The queue structure associated with the queue to destroy.
  15942. *
  15943. * This function destroys a queue, as detailed in @cq by sending an mailbox
  15944. * command, specific to the type of queue, to the HBA.
  15945. *
  15946. * The @cq struct is used to get the queue ID of the queue to destroy.
  15947. *
  15948. * On success this function will return a zero. If the queue destroy mailbox
  15949. * command fails this function will return -ENXIO.
  15950. **/
  15951. int
  15952. lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
  15953. {
  15954. LPFC_MBOXQ_t *mbox;
  15955. int rc, length, status = 0;
  15956. uint32_t shdr_status, shdr_add_status;
  15957. union lpfc_sli4_cfg_shdr *shdr;
  15958. /* sanity check on queue memory */
  15959. if (!cq)
  15960. return -ENODEV;
  15961. if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
  15962. goto list_remove;
  15963. mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
  15964. if (!mbox)
  15965. return -ENOMEM;
  15966. length = (sizeof(struct lpfc_mbx_cq_destroy) -
  15967. sizeof(struct lpfc_sli4_cfg_mhdr));
  15968. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  15969. LPFC_MBOX_OPCODE_CQ_DESTROY,
  15970. length, LPFC_SLI4_MBX_EMBED);
  15971. bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
  15972. cq->queue_id);
  15973. mbox->vport = cq->phba->pport;
  15974. mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  15975. rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
  15976. /* The IOCTL status is embedded in the mailbox subheader. */
  15977. shdr = (union lpfc_sli4_cfg_shdr *)
  15978. &mbox->u.mqe.un.wq_create.header.cfg_shdr;
  15979. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  15980. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  15981. if (shdr_status || shdr_add_status || rc) {
  15982. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  15983. "2506 CQ_DESTROY mailbox failed with "
  15984. "status x%x add_status x%x, mbx status x%x\n",
  15985. shdr_status, shdr_add_status, rc);
  15986. status = -ENXIO;
  15987. }
  15988. mempool_free(mbox, cq->phba->mbox_mem_pool);
  15989. list_remove:
  15990. /* Remove cq from any list */
  15991. list_del_init(&cq->list);
  15992. return status;
  15993. }
  15994. /**
  15995. * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
  15996. * @phba: HBA structure that indicates port to destroy a queue on.
  15997. * @mq: The queue structure associated with the queue to destroy.
  15998. *
  15999. * This function destroys a queue, as detailed in @mq by sending an mailbox
  16000. * command, specific to the type of queue, to the HBA.
  16001. *
  16002. * The @mq struct is used to get the queue ID of the queue to destroy.
  16003. *
  16004. * On success this function will return a zero. If the queue destroy mailbox
  16005. * command fails this function will return -ENXIO.
  16006. **/
  16007. int
  16008. lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
  16009. {
  16010. LPFC_MBOXQ_t *mbox;
  16011. int rc, length, status = 0;
  16012. uint32_t shdr_status, shdr_add_status;
  16013. union lpfc_sli4_cfg_shdr *shdr;
  16014. /* sanity check on queue memory */
  16015. if (!mq)
  16016. return -ENODEV;
  16017. if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
  16018. goto list_remove;
  16019. mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
  16020. if (!mbox)
  16021. return -ENOMEM;
  16022. length = (sizeof(struct lpfc_mbx_mq_destroy) -
  16023. sizeof(struct lpfc_sli4_cfg_mhdr));
  16024. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  16025. LPFC_MBOX_OPCODE_MQ_DESTROY,
  16026. length, LPFC_SLI4_MBX_EMBED);
  16027. bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
  16028. mq->queue_id);
  16029. mbox->vport = mq->phba->pport;
  16030. mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  16031. rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
  16032. /* The IOCTL status is embedded in the mailbox subheader. */
  16033. shdr = (union lpfc_sli4_cfg_shdr *)
  16034. &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
  16035. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  16036. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  16037. if (shdr_status || shdr_add_status || rc) {
  16038. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16039. "2507 MQ_DESTROY mailbox failed with "
  16040. "status x%x add_status x%x, mbx status x%x\n",
  16041. shdr_status, shdr_add_status, rc);
  16042. status = -ENXIO;
  16043. }
  16044. mempool_free(mbox, mq->phba->mbox_mem_pool);
  16045. list_remove:
  16046. /* Remove mq from any list */
  16047. list_del_init(&mq->list);
  16048. return status;
  16049. }
  16050. /**
  16051. * lpfc_wq_destroy - Destroy a Work Queue on the HBA
  16052. * @phba: HBA structure that indicates port to destroy a queue on.
  16053. * @wq: The queue structure associated with the queue to destroy.
  16054. *
  16055. * This function destroys a queue, as detailed in @wq by sending an mailbox
  16056. * command, specific to the type of queue, to the HBA.
  16057. *
  16058. * The @wq struct is used to get the queue ID of the queue to destroy.
  16059. *
  16060. * On success this function will return a zero. If the queue destroy mailbox
  16061. * command fails this function will return -ENXIO.
  16062. **/
  16063. int
  16064. lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
  16065. {
  16066. LPFC_MBOXQ_t *mbox;
  16067. int rc, length, status = 0;
  16068. uint32_t shdr_status, shdr_add_status;
  16069. union lpfc_sli4_cfg_shdr *shdr;
  16070. /* sanity check on queue memory */
  16071. if (!wq)
  16072. return -ENODEV;
  16073. if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
  16074. goto list_remove;
  16075. mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
  16076. if (!mbox)
  16077. return -ENOMEM;
  16078. length = (sizeof(struct lpfc_mbx_wq_destroy) -
  16079. sizeof(struct lpfc_sli4_cfg_mhdr));
  16080. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  16081. LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
  16082. length, LPFC_SLI4_MBX_EMBED);
  16083. bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
  16084. wq->queue_id);
  16085. mbox->vport = wq->phba->pport;
  16086. mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  16087. rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
  16088. shdr = (union lpfc_sli4_cfg_shdr *)
  16089. &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
  16090. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  16091. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  16092. if (shdr_status || shdr_add_status || rc) {
  16093. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16094. "2508 WQ_DESTROY mailbox failed with "
  16095. "status x%x add_status x%x, mbx status x%x\n",
  16096. shdr_status, shdr_add_status, rc);
  16097. status = -ENXIO;
  16098. }
  16099. mempool_free(mbox, wq->phba->mbox_mem_pool);
  16100. list_remove:
  16101. /* Remove wq from any list */
  16102. list_del_init(&wq->list);
  16103. kfree(wq->pring);
  16104. wq->pring = NULL;
  16105. return status;
  16106. }
  16107. /**
  16108. * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
  16109. * @phba: HBA structure that indicates port to destroy a queue on.
  16110. * @hrq: The queue structure associated with the queue to destroy.
  16111. * @drq: The queue structure associated with the queue to destroy.
  16112. *
  16113. * This function destroys a queue, as detailed in @rq by sending an mailbox
  16114. * command, specific to the type of queue, to the HBA.
  16115. *
  16116. * The @rq struct is used to get the queue ID of the queue to destroy.
  16117. *
  16118. * On success this function will return a zero. If the queue destroy mailbox
  16119. * command fails this function will return -ENXIO.
  16120. **/
  16121. int
  16122. lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
  16123. struct lpfc_queue *drq)
  16124. {
  16125. LPFC_MBOXQ_t *mbox;
  16126. int rc, length, status = 0;
  16127. uint32_t shdr_status, shdr_add_status;
  16128. union lpfc_sli4_cfg_shdr *shdr;
  16129. /* sanity check on queue memory */
  16130. if (!hrq || !drq)
  16131. return -ENODEV;
  16132. if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
  16133. goto list_remove;
  16134. mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
  16135. if (!mbox)
  16136. return -ENOMEM;
  16137. length = (sizeof(struct lpfc_mbx_rq_destroy) -
  16138. sizeof(struct lpfc_sli4_cfg_mhdr));
  16139. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  16140. LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
  16141. length, LPFC_SLI4_MBX_EMBED);
  16142. bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
  16143. hrq->queue_id);
  16144. mbox->vport = hrq->phba->pport;
  16145. mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  16146. rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
  16147. /* The IOCTL status is embedded in the mailbox subheader. */
  16148. shdr = (union lpfc_sli4_cfg_shdr *)
  16149. &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
  16150. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  16151. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  16152. if (shdr_status || shdr_add_status || rc) {
  16153. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16154. "2509 RQ_DESTROY mailbox failed with "
  16155. "status x%x add_status x%x, mbx status x%x\n",
  16156. shdr_status, shdr_add_status, rc);
  16157. mempool_free(mbox, hrq->phba->mbox_mem_pool);
  16158. return -ENXIO;
  16159. }
  16160. bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
  16161. drq->queue_id);
  16162. rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
  16163. shdr = (union lpfc_sli4_cfg_shdr *)
  16164. &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
  16165. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  16166. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  16167. if (shdr_status || shdr_add_status || rc) {
  16168. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16169. "2510 RQ_DESTROY mailbox failed with "
  16170. "status x%x add_status x%x, mbx status x%x\n",
  16171. shdr_status, shdr_add_status, rc);
  16172. status = -ENXIO;
  16173. }
  16174. mempool_free(mbox, hrq->phba->mbox_mem_pool);
  16175. list_remove:
  16176. list_del_init(&hrq->list);
  16177. list_del_init(&drq->list);
  16178. return status;
  16179. }
  16180. /**
  16181. * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
  16182. * @phba: The virtual port for which this call being executed.
  16183. * @pdma_phys_addr0: Physical address of the 1st SGL page.
  16184. * @pdma_phys_addr1: Physical address of the 2nd SGL page.
  16185. * @xritag: the xritag that ties this io to the SGL pages.
  16186. *
  16187. * This routine will post the sgl pages for the IO that has the xritag
  16188. * that is in the iocbq structure. The xritag is assigned during iocbq
  16189. * creation and persists for as long as the driver is loaded.
  16190. * if the caller has fewer than 256 scatter gather segments to map then
  16191. * pdma_phys_addr1 should be 0.
  16192. * If the caller needs to map more than 256 scatter gather segment then
  16193. * pdma_phys_addr1 should be a valid physical address.
  16194. * physical address for SGLs must be 64 byte aligned.
  16195. * If you are going to map 2 SGL's then the first one must have 256 entries
  16196. * the second sgl can have between 1 and 256 entries.
  16197. *
  16198. * Return codes:
  16199. * 0 - Success
  16200. * -ENXIO, -ENOMEM - Failure
  16201. **/
  16202. int
  16203. lpfc_sli4_post_sgl(struct lpfc_hba *phba,
  16204. dma_addr_t pdma_phys_addr0,
  16205. dma_addr_t pdma_phys_addr1,
  16206. uint16_t xritag)
  16207. {
  16208. struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
  16209. LPFC_MBOXQ_t *mbox;
  16210. int rc;
  16211. uint32_t shdr_status, shdr_add_status;
  16212. uint32_t mbox_tmo;
  16213. union lpfc_sli4_cfg_shdr *shdr;
  16214. if (xritag == NO_XRI) {
  16215. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16216. "0364 Invalid param:\n");
  16217. return -EINVAL;
  16218. }
  16219. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  16220. if (!mbox)
  16221. return -ENOMEM;
  16222. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  16223. LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
  16224. sizeof(struct lpfc_mbx_post_sgl_pages) -
  16225. sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
  16226. post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
  16227. &mbox->u.mqe.un.post_sgl_pages;
  16228. bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
  16229. bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
  16230. post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
  16231. cpu_to_le32(putPaddrLow(pdma_phys_addr0));
  16232. post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
  16233. cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
  16234. post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
  16235. cpu_to_le32(putPaddrLow(pdma_phys_addr1));
  16236. post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
  16237. cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
  16238. if (!phba->sli4_hba.intr_enable)
  16239. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  16240. else {
  16241. mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
  16242. rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
  16243. }
  16244. /* The IOCTL status is embedded in the mailbox subheader. */
  16245. shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
  16246. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  16247. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  16248. if (!phba->sli4_hba.intr_enable)
  16249. mempool_free(mbox, phba->mbox_mem_pool);
  16250. else if (rc != MBX_TIMEOUT)
  16251. mempool_free(mbox, phba->mbox_mem_pool);
  16252. if (shdr_status || shdr_add_status || rc) {
  16253. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16254. "2511 POST_SGL mailbox failed with "
  16255. "status x%x add_status x%x, mbx status x%x\n",
  16256. shdr_status, shdr_add_status, rc);
  16257. }
  16258. return 0;
  16259. }
  16260. /**
  16261. * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
  16262. * @phba: pointer to lpfc hba data structure.
  16263. *
  16264. * This routine is invoked to post rpi header templates to the
  16265. * HBA consistent with the SLI-4 interface spec. This routine
  16266. * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
  16267. * SLI4_PAGE_SIZE modulo 64 rpi context headers.
  16268. *
  16269. * Returns
  16270. * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
  16271. * LPFC_RPI_ALLOC_ERROR if no rpis are available.
  16272. **/
  16273. static uint16_t
  16274. lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
  16275. {
  16276. unsigned long xri;
  16277. /*
  16278. * Fetch the next logical xri. Because this index is logical,
  16279. * the driver starts at 0 each time.
  16280. */
  16281. spin_lock_irq(&phba->hbalock);
  16282. xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
  16283. phba->sli4_hba.max_cfg_param.max_xri);
  16284. if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
  16285. spin_unlock_irq(&phba->hbalock);
  16286. return NO_XRI;
  16287. } else {
  16288. set_bit(xri, phba->sli4_hba.xri_bmask);
  16289. phba->sli4_hba.max_cfg_param.xri_used++;
  16290. }
  16291. spin_unlock_irq(&phba->hbalock);
  16292. return xri;
  16293. }
  16294. /**
  16295. * __lpfc_sli4_free_xri - Release an xri for reuse.
  16296. * @phba: pointer to lpfc hba data structure.
  16297. * @xri: xri to release.
  16298. *
  16299. * This routine is invoked to release an xri to the pool of
  16300. * available rpis maintained by the driver.
  16301. **/
  16302. static void
  16303. __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
  16304. {
  16305. if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
  16306. phba->sli4_hba.max_cfg_param.xri_used--;
  16307. }
  16308. }
  16309. /**
  16310. * lpfc_sli4_free_xri - Release an xri for reuse.
  16311. * @phba: pointer to lpfc hba data structure.
  16312. * @xri: xri to release.
  16313. *
  16314. * This routine is invoked to release an xri to the pool of
  16315. * available rpis maintained by the driver.
  16316. **/
  16317. void
  16318. lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
  16319. {
  16320. spin_lock_irq(&phba->hbalock);
  16321. __lpfc_sli4_free_xri(phba, xri);
  16322. spin_unlock_irq(&phba->hbalock);
  16323. }
  16324. /**
  16325. * lpfc_sli4_next_xritag - Get an xritag for the io
  16326. * @phba: Pointer to HBA context object.
  16327. *
  16328. * This function gets an xritag for the iocb. If there is no unused xritag
  16329. * it will return 0xffff.
  16330. * The function returns the allocated xritag if successful, else returns zero.
  16331. * Zero is not a valid xritag.
  16332. * The caller is not required to hold any lock.
  16333. **/
  16334. uint16_t
  16335. lpfc_sli4_next_xritag(struct lpfc_hba *phba)
  16336. {
  16337. uint16_t xri_index;
  16338. xri_index = lpfc_sli4_alloc_xri(phba);
  16339. if (xri_index == NO_XRI)
  16340. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  16341. "2004 Failed to allocate XRI.last XRITAG is %d"
  16342. " Max XRI is %d, Used XRI is %d\n",
  16343. xri_index,
  16344. phba->sli4_hba.max_cfg_param.max_xri,
  16345. phba->sli4_hba.max_cfg_param.xri_used);
  16346. return xri_index;
  16347. }
  16348. /**
  16349. * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
  16350. * @phba: pointer to lpfc hba data structure.
  16351. * @post_sgl_list: pointer to els sgl entry list.
  16352. * @post_cnt: number of els sgl entries on the list.
  16353. *
  16354. * This routine is invoked to post a block of driver's sgl pages to the
  16355. * HBA using non-embedded mailbox command. No Lock is held. This routine
  16356. * is only called when the driver is loading and after all IO has been
  16357. * stopped.
  16358. **/
  16359. static int
  16360. lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
  16361. struct list_head *post_sgl_list,
  16362. int post_cnt)
  16363. {
  16364. struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
  16365. struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
  16366. struct sgl_page_pairs *sgl_pg_pairs;
  16367. void *viraddr;
  16368. LPFC_MBOXQ_t *mbox;
  16369. uint32_t reqlen, alloclen, pg_pairs;
  16370. uint32_t mbox_tmo;
  16371. uint16_t xritag_start = 0;
  16372. int rc = 0;
  16373. uint32_t shdr_status, shdr_add_status;
  16374. union lpfc_sli4_cfg_shdr *shdr;
  16375. reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
  16376. sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
  16377. if (reqlen > SLI4_PAGE_SIZE) {
  16378. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16379. "2559 Block sgl registration required DMA "
  16380. "size (%d) great than a page\n", reqlen);
  16381. return -ENOMEM;
  16382. }
  16383. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  16384. if (!mbox)
  16385. return -ENOMEM;
  16386. /* Allocate DMA memory and set up the non-embedded mailbox command */
  16387. alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  16388. LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
  16389. LPFC_SLI4_MBX_NEMBED);
  16390. if (alloclen < reqlen) {
  16391. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16392. "0285 Allocated DMA memory size (%d) is "
  16393. "less than the requested DMA memory "
  16394. "size (%d)\n", alloclen, reqlen);
  16395. lpfc_sli4_mbox_cmd_free(phba, mbox);
  16396. return -ENOMEM;
  16397. }
  16398. /* Set up the SGL pages in the non-embedded DMA pages */
  16399. viraddr = mbox->sge_array->addr[0];
  16400. sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
  16401. sgl_pg_pairs = &sgl->sgl_pg_pairs;
  16402. pg_pairs = 0;
  16403. list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
  16404. /* Set up the sge entry */
  16405. sgl_pg_pairs->sgl_pg0_addr_lo =
  16406. cpu_to_le32(putPaddrLow(sglq_entry->phys));
  16407. sgl_pg_pairs->sgl_pg0_addr_hi =
  16408. cpu_to_le32(putPaddrHigh(sglq_entry->phys));
  16409. sgl_pg_pairs->sgl_pg1_addr_lo =
  16410. cpu_to_le32(putPaddrLow(0));
  16411. sgl_pg_pairs->sgl_pg1_addr_hi =
  16412. cpu_to_le32(putPaddrHigh(0));
  16413. /* Keep the first xritag on the list */
  16414. if (pg_pairs == 0)
  16415. xritag_start = sglq_entry->sli4_xritag;
  16416. sgl_pg_pairs++;
  16417. pg_pairs++;
  16418. }
  16419. /* Complete initialization and perform endian conversion. */
  16420. bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
  16421. bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
  16422. sgl->word0 = cpu_to_le32(sgl->word0);
  16423. if (!phba->sli4_hba.intr_enable)
  16424. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  16425. else {
  16426. mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
  16427. rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
  16428. }
  16429. shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
  16430. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  16431. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  16432. if (!phba->sli4_hba.intr_enable)
  16433. lpfc_sli4_mbox_cmd_free(phba, mbox);
  16434. else if (rc != MBX_TIMEOUT)
  16435. lpfc_sli4_mbox_cmd_free(phba, mbox);
  16436. if (shdr_status || shdr_add_status || rc) {
  16437. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16438. "2513 POST_SGL_BLOCK mailbox command failed "
  16439. "status x%x add_status x%x mbx status x%x\n",
  16440. shdr_status, shdr_add_status, rc);
  16441. rc = -ENXIO;
  16442. }
  16443. return rc;
  16444. }
  16445. /**
  16446. * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
  16447. * @phba: pointer to lpfc hba data structure.
  16448. * @nblist: pointer to nvme buffer list.
  16449. * @count: number of scsi buffers on the list.
  16450. *
  16451. * This routine is invoked to post a block of @count scsi sgl pages from a
  16452. * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
  16453. * No Lock is held.
  16454. *
  16455. **/
  16456. static int
  16457. lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
  16458. int count)
  16459. {
  16460. struct lpfc_io_buf *lpfc_ncmd;
  16461. struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
  16462. struct sgl_page_pairs *sgl_pg_pairs;
  16463. void *viraddr;
  16464. LPFC_MBOXQ_t *mbox;
  16465. uint32_t reqlen, alloclen, pg_pairs;
  16466. uint32_t mbox_tmo;
  16467. uint16_t xritag_start = 0;
  16468. int rc = 0;
  16469. uint32_t shdr_status, shdr_add_status;
  16470. dma_addr_t pdma_phys_bpl1;
  16471. union lpfc_sli4_cfg_shdr *shdr;
  16472. /* Calculate the requested length of the dma memory */
  16473. reqlen = count * sizeof(struct sgl_page_pairs) +
  16474. sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
  16475. if (reqlen > SLI4_PAGE_SIZE) {
  16476. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  16477. "6118 Block sgl registration required DMA "
  16478. "size (%d) great than a page\n", reqlen);
  16479. return -ENOMEM;
  16480. }
  16481. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  16482. if (!mbox) {
  16483. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16484. "6119 Failed to allocate mbox cmd memory\n");
  16485. return -ENOMEM;
  16486. }
  16487. /* Allocate DMA memory and set up the non-embedded mailbox command */
  16488. alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  16489. LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
  16490. reqlen, LPFC_SLI4_MBX_NEMBED);
  16491. if (alloclen < reqlen) {
  16492. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16493. "6120 Allocated DMA memory size (%d) is "
  16494. "less than the requested DMA memory "
  16495. "size (%d)\n", alloclen, reqlen);
  16496. lpfc_sli4_mbox_cmd_free(phba, mbox);
  16497. return -ENOMEM;
  16498. }
  16499. /* Get the first SGE entry from the non-embedded DMA memory */
  16500. viraddr = mbox->sge_array->addr[0];
  16501. /* Set up the SGL pages in the non-embedded DMA pages */
  16502. sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
  16503. sgl_pg_pairs = &sgl->sgl_pg_pairs;
  16504. pg_pairs = 0;
  16505. list_for_each_entry(lpfc_ncmd, nblist, list) {
  16506. /* Set up the sge entry */
  16507. sgl_pg_pairs->sgl_pg0_addr_lo =
  16508. cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
  16509. sgl_pg_pairs->sgl_pg0_addr_hi =
  16510. cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
  16511. if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
  16512. pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
  16513. SGL_PAGE_SIZE;
  16514. else
  16515. pdma_phys_bpl1 = 0;
  16516. sgl_pg_pairs->sgl_pg1_addr_lo =
  16517. cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
  16518. sgl_pg_pairs->sgl_pg1_addr_hi =
  16519. cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
  16520. /* Keep the first xritag on the list */
  16521. if (pg_pairs == 0)
  16522. xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
  16523. sgl_pg_pairs++;
  16524. pg_pairs++;
  16525. }
  16526. bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
  16527. bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
  16528. /* Perform endian conversion if necessary */
  16529. sgl->word0 = cpu_to_le32(sgl->word0);
  16530. if (!phba->sli4_hba.intr_enable) {
  16531. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  16532. } else {
  16533. mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
  16534. rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
  16535. }
  16536. shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
  16537. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  16538. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  16539. if (!phba->sli4_hba.intr_enable)
  16540. lpfc_sli4_mbox_cmd_free(phba, mbox);
  16541. else if (rc != MBX_TIMEOUT)
  16542. lpfc_sli4_mbox_cmd_free(phba, mbox);
  16543. if (shdr_status || shdr_add_status || rc) {
  16544. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  16545. "6125 POST_SGL_BLOCK mailbox command failed "
  16546. "status x%x add_status x%x mbx status x%x\n",
  16547. shdr_status, shdr_add_status, rc);
  16548. rc = -ENXIO;
  16549. }
  16550. return rc;
  16551. }
  16552. /**
  16553. * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
  16554. * @phba: pointer to lpfc hba data structure.
  16555. * @post_nblist: pointer to the nvme buffer list.
  16556. * @sb_count: number of nvme buffers.
  16557. *
  16558. * This routine walks a list of nvme buffers that was passed in. It attempts
  16559. * to construct blocks of nvme buffer sgls which contains contiguous xris and
  16560. * uses the non-embedded SGL block post mailbox commands to post to the port.
  16561. * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
  16562. * embedded SGL post mailbox command for posting. The @post_nblist passed in
  16563. * must be local list, thus no lock is needed when manipulate the list.
  16564. *
  16565. * Returns: 0 = failure, non-zero number of successfully posted buffers.
  16566. **/
  16567. int
  16568. lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
  16569. struct list_head *post_nblist, int sb_count)
  16570. {
  16571. struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
  16572. int status, sgl_size;
  16573. int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
  16574. dma_addr_t pdma_phys_sgl1;
  16575. int last_xritag = NO_XRI;
  16576. int cur_xritag;
  16577. LIST_HEAD(prep_nblist);
  16578. LIST_HEAD(blck_nblist);
  16579. LIST_HEAD(nvme_nblist);
  16580. /* sanity check */
  16581. if (sb_count <= 0)
  16582. return -EINVAL;
  16583. sgl_size = phba->cfg_sg_dma_buf_size;
  16584. list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
  16585. list_del_init(&lpfc_ncmd->list);
  16586. block_cnt++;
  16587. if ((last_xritag != NO_XRI) &&
  16588. (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
  16589. /* a hole in xri block, form a sgl posting block */
  16590. list_splice_init(&prep_nblist, &blck_nblist);
  16591. post_cnt = block_cnt - 1;
  16592. /* prepare list for next posting block */
  16593. list_add_tail(&lpfc_ncmd->list, &prep_nblist);
  16594. block_cnt = 1;
  16595. } else {
  16596. /* prepare list for next posting block */
  16597. list_add_tail(&lpfc_ncmd->list, &prep_nblist);
  16598. /* enough sgls for non-embed sgl mbox command */
  16599. if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
  16600. list_splice_init(&prep_nblist, &blck_nblist);
  16601. post_cnt = block_cnt;
  16602. block_cnt = 0;
  16603. }
  16604. }
  16605. num_posting++;
  16606. last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
  16607. /* end of repost sgl list condition for NVME buffers */
  16608. if (num_posting == sb_count) {
  16609. if (post_cnt == 0) {
  16610. /* last sgl posting block */
  16611. list_splice_init(&prep_nblist, &blck_nblist);
  16612. post_cnt = block_cnt;
  16613. } else if (block_cnt == 1) {
  16614. /* last single sgl with non-contiguous xri */
  16615. if (sgl_size > SGL_PAGE_SIZE)
  16616. pdma_phys_sgl1 =
  16617. lpfc_ncmd->dma_phys_sgl +
  16618. SGL_PAGE_SIZE;
  16619. else
  16620. pdma_phys_sgl1 = 0;
  16621. cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
  16622. status = lpfc_sli4_post_sgl(
  16623. phba, lpfc_ncmd->dma_phys_sgl,
  16624. pdma_phys_sgl1, cur_xritag);
  16625. if (status) {
  16626. /* Post error. Buffer unavailable. */
  16627. lpfc_ncmd->flags |=
  16628. LPFC_SBUF_NOT_POSTED;
  16629. } else {
  16630. /* Post success. Bffer available. */
  16631. lpfc_ncmd->flags &=
  16632. ~LPFC_SBUF_NOT_POSTED;
  16633. lpfc_ncmd->status = IOSTAT_SUCCESS;
  16634. num_posted++;
  16635. }
  16636. /* success, put on NVME buffer sgl list */
  16637. list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
  16638. }
  16639. }
  16640. /* continue until a nembed page worth of sgls */
  16641. if (post_cnt == 0)
  16642. continue;
  16643. /* post block of NVME buffer list sgls */
  16644. status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
  16645. post_cnt);
  16646. /* don't reset xirtag due to hole in xri block */
  16647. if (block_cnt == 0)
  16648. last_xritag = NO_XRI;
  16649. /* reset NVME buffer post count for next round of posting */
  16650. post_cnt = 0;
  16651. /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
  16652. while (!list_empty(&blck_nblist)) {
  16653. list_remove_head(&blck_nblist, lpfc_ncmd,
  16654. struct lpfc_io_buf, list);
  16655. if (status) {
  16656. /* Post error. Mark buffer unavailable. */
  16657. lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
  16658. } else {
  16659. /* Post success, Mark buffer available. */
  16660. lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
  16661. lpfc_ncmd->status = IOSTAT_SUCCESS;
  16662. num_posted++;
  16663. }
  16664. list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
  16665. }
  16666. }
  16667. /* Push NVME buffers with sgl posted to the available list */
  16668. lpfc_io_buf_replenish(phba, &nvme_nblist);
  16669. return num_posted;
  16670. }
  16671. /**
  16672. * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
  16673. * @phba: pointer to lpfc_hba struct that the frame was received on
  16674. * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
  16675. *
  16676. * This function checks the fields in the @fc_hdr to see if the FC frame is a
  16677. * valid type of frame that the LPFC driver will handle. This function will
  16678. * return a zero if the frame is a valid frame or a non zero value when the
  16679. * frame does not pass the check.
  16680. **/
  16681. static int
  16682. lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
  16683. {
  16684. /* make rctl_names static to save stack space */
  16685. struct fc_vft_header *fc_vft_hdr;
  16686. struct fc_app_header *fc_app_hdr;
  16687. uint32_t *header = (uint32_t *) fc_hdr;
  16688. #define FC_RCTL_MDS_DIAGS 0xF4
  16689. switch (fc_hdr->fh_r_ctl) {
  16690. case FC_RCTL_DD_UNCAT: /* uncategorized information */
  16691. case FC_RCTL_DD_SOL_DATA: /* solicited data */
  16692. case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
  16693. case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
  16694. case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
  16695. case FC_RCTL_DD_DATA_DESC: /* data descriptor */
  16696. case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
  16697. case FC_RCTL_DD_CMD_STATUS: /* command status */
  16698. case FC_RCTL_ELS_REQ: /* extended link services request */
  16699. case FC_RCTL_ELS_REP: /* extended link services reply */
  16700. case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
  16701. case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
  16702. case FC_RCTL_BA_ABTS: /* basic link service abort */
  16703. case FC_RCTL_BA_RMC: /* remove connection */
  16704. case FC_RCTL_BA_ACC: /* basic accept */
  16705. case FC_RCTL_BA_RJT: /* basic reject */
  16706. case FC_RCTL_BA_PRMT:
  16707. case FC_RCTL_ACK_1: /* acknowledge_1 */
  16708. case FC_RCTL_ACK_0: /* acknowledge_0 */
  16709. case FC_RCTL_P_RJT: /* port reject */
  16710. case FC_RCTL_F_RJT: /* fabric reject */
  16711. case FC_RCTL_P_BSY: /* port busy */
  16712. case FC_RCTL_F_BSY: /* fabric busy to data frame */
  16713. case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
  16714. case FC_RCTL_LCR: /* link credit reset */
  16715. case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
  16716. case FC_RCTL_END: /* end */
  16717. break;
  16718. case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
  16719. fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
  16720. fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
  16721. return lpfc_fc_frame_check(phba, fc_hdr);
  16722. case FC_RCTL_BA_NOP: /* basic link service NOP */
  16723. default:
  16724. goto drop;
  16725. }
  16726. switch (fc_hdr->fh_type) {
  16727. case FC_TYPE_BLS:
  16728. case FC_TYPE_ELS:
  16729. case FC_TYPE_FCP:
  16730. case FC_TYPE_CT:
  16731. case FC_TYPE_NVME:
  16732. break;
  16733. case FC_TYPE_IP:
  16734. case FC_TYPE_ILS:
  16735. default:
  16736. goto drop;
  16737. }
  16738. if (unlikely(phba->link_flag == LS_LOOPBACK_MODE &&
  16739. phba->cfg_vmid_app_header)) {
  16740. /* Application header is 16B device header */
  16741. if (fc_hdr->fh_df_ctl & LPFC_FC_16B_DEVICE_HEADER) {
  16742. fc_app_hdr = (struct fc_app_header *) (fc_hdr + 1);
  16743. if (be32_to_cpu(fc_app_hdr->src_app_id) !=
  16744. LOOPBACK_SRC_APPID) {
  16745. lpfc_printf_log(phba, KERN_WARNING,
  16746. LOG_ELS | LOG_LIBDFC,
  16747. "1932 Loopback src app id "
  16748. "not matched, app_id:x%x\n",
  16749. be32_to_cpu(fc_app_hdr->src_app_id));
  16750. goto drop;
  16751. }
  16752. } else {
  16753. lpfc_printf_log(phba, KERN_WARNING,
  16754. LOG_ELS | LOG_LIBDFC,
  16755. "1933 Loopback df_ctl bit not set, "
  16756. "df_ctl:x%x\n",
  16757. fc_hdr->fh_df_ctl);
  16758. goto drop;
  16759. }
  16760. }
  16761. lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
  16762. "2538 Received frame rctl:x%x, type:x%x, "
  16763. "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
  16764. fc_hdr->fh_r_ctl, fc_hdr->fh_type,
  16765. be32_to_cpu(header[0]), be32_to_cpu(header[1]),
  16766. be32_to_cpu(header[2]), be32_to_cpu(header[3]),
  16767. be32_to_cpu(header[4]), be32_to_cpu(header[5]),
  16768. be32_to_cpu(header[6]));
  16769. return 0;
  16770. drop:
  16771. lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
  16772. "2539 Dropped frame rctl:x%x type:x%x\n",
  16773. fc_hdr->fh_r_ctl, fc_hdr->fh_type);
  16774. return 1;
  16775. }
  16776. /**
  16777. * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
  16778. * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
  16779. *
  16780. * This function processes the FC header to retrieve the VFI from the VF
  16781. * header, if one exists. This function will return the VFI if one exists
  16782. * or 0 if no VSAN Header exists.
  16783. **/
  16784. static uint32_t
  16785. lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
  16786. {
  16787. struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
  16788. if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
  16789. return 0;
  16790. return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
  16791. }
  16792. /**
  16793. * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
  16794. * @phba: Pointer to the HBA structure to search for the vport on
  16795. * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
  16796. * @fcfi: The FC Fabric ID that the frame came from
  16797. * @did: Destination ID to match against
  16798. *
  16799. * This function searches the @phba for a vport that matches the content of the
  16800. * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
  16801. * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
  16802. * returns the matching vport pointer or NULL if unable to match frame to a
  16803. * vport.
  16804. **/
  16805. static struct lpfc_vport *
  16806. lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
  16807. uint16_t fcfi, uint32_t did)
  16808. {
  16809. struct lpfc_vport **vports;
  16810. struct lpfc_vport *vport = NULL;
  16811. int i;
  16812. if (did == Fabric_DID)
  16813. return phba->pport;
  16814. if (test_bit(FC_PT2PT, &phba->pport->fc_flag) &&
  16815. phba->link_state != LPFC_HBA_READY)
  16816. return phba->pport;
  16817. vports = lpfc_create_vport_work_array(phba);
  16818. if (vports != NULL) {
  16819. for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  16820. if (phba->fcf.fcfi == fcfi &&
  16821. vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
  16822. vports[i]->fc_myDID == did) {
  16823. vport = vports[i];
  16824. break;
  16825. }
  16826. }
  16827. }
  16828. lpfc_destroy_vport_work_array(phba, vports);
  16829. return vport;
  16830. }
  16831. /**
  16832. * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
  16833. * @vport: The vport to work on.
  16834. *
  16835. * This function updates the receive sequence time stamp for this vport. The
  16836. * receive sequence time stamp indicates the time that the last frame of the
  16837. * the sequence that has been idle for the longest amount of time was received.
  16838. * the driver uses this time stamp to indicate if any received sequences have
  16839. * timed out.
  16840. **/
  16841. static void
  16842. lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
  16843. {
  16844. struct lpfc_dmabuf *h_buf;
  16845. struct hbq_dmabuf *dmabuf = NULL;
  16846. /* get the oldest sequence on the rcv list */
  16847. h_buf = list_get_first(&vport->rcv_buffer_list,
  16848. struct lpfc_dmabuf, list);
  16849. if (!h_buf)
  16850. return;
  16851. dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
  16852. vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
  16853. }
  16854. /**
  16855. * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
  16856. * @vport: The vport that the received sequences were sent to.
  16857. *
  16858. * This function cleans up all outstanding received sequences. This is called
  16859. * by the driver when a link event or user action invalidates all the received
  16860. * sequences.
  16861. **/
  16862. void
  16863. lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
  16864. {
  16865. struct lpfc_dmabuf *h_buf, *hnext;
  16866. struct lpfc_dmabuf *d_buf, *dnext;
  16867. struct hbq_dmabuf *dmabuf = NULL;
  16868. /* start with the oldest sequence on the rcv list */
  16869. list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
  16870. dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
  16871. list_del_init(&dmabuf->hbuf.list);
  16872. list_for_each_entry_safe(d_buf, dnext,
  16873. &dmabuf->dbuf.list, list) {
  16874. list_del_init(&d_buf->list);
  16875. lpfc_in_buf_free(vport->phba, d_buf);
  16876. }
  16877. lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
  16878. }
  16879. }
  16880. /**
  16881. * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
  16882. * @vport: The vport that the received sequences were sent to.
  16883. *
  16884. * This function determines whether any received sequences have timed out by
  16885. * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
  16886. * indicates that there is at least one timed out sequence this routine will
  16887. * go through the received sequences one at a time from most inactive to most
  16888. * active to determine which ones need to be cleaned up. Once it has determined
  16889. * that a sequence needs to be cleaned up it will simply free up the resources
  16890. * without sending an abort.
  16891. **/
  16892. void
  16893. lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
  16894. {
  16895. struct lpfc_dmabuf *h_buf, *hnext;
  16896. struct lpfc_dmabuf *d_buf, *dnext;
  16897. struct hbq_dmabuf *dmabuf = NULL;
  16898. unsigned long timeout;
  16899. int abort_count = 0;
  16900. timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
  16901. vport->rcv_buffer_time_stamp);
  16902. if (list_empty(&vport->rcv_buffer_list) ||
  16903. time_before(jiffies, timeout))
  16904. return;
  16905. /* start with the oldest sequence on the rcv list */
  16906. list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
  16907. dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
  16908. timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
  16909. dmabuf->time_stamp);
  16910. if (time_before(jiffies, timeout))
  16911. break;
  16912. abort_count++;
  16913. list_del_init(&dmabuf->hbuf.list);
  16914. list_for_each_entry_safe(d_buf, dnext,
  16915. &dmabuf->dbuf.list, list) {
  16916. list_del_init(&d_buf->list);
  16917. lpfc_in_buf_free(vport->phba, d_buf);
  16918. }
  16919. lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
  16920. }
  16921. if (abort_count)
  16922. lpfc_update_rcv_time_stamp(vport);
  16923. }
  16924. /**
  16925. * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
  16926. * @vport: pointer to a vitural port
  16927. * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
  16928. *
  16929. * This function searches through the existing incomplete sequences that have
  16930. * been sent to this @vport. If the frame matches one of the incomplete
  16931. * sequences then the dbuf in the @dmabuf is added to the list of frames that
  16932. * make up that sequence. If no sequence is found that matches this frame then
  16933. * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
  16934. * This function returns a pointer to the first dmabuf in the sequence list that
  16935. * the frame was linked to.
  16936. **/
  16937. static struct hbq_dmabuf *
  16938. lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
  16939. {
  16940. struct fc_frame_header *new_hdr;
  16941. struct fc_frame_header *temp_hdr;
  16942. struct lpfc_dmabuf *d_buf;
  16943. struct lpfc_dmabuf *h_buf;
  16944. struct hbq_dmabuf *seq_dmabuf = NULL;
  16945. struct hbq_dmabuf *temp_dmabuf = NULL;
  16946. uint8_t found = 0;
  16947. INIT_LIST_HEAD(&dmabuf->dbuf.list);
  16948. dmabuf->time_stamp = jiffies;
  16949. new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
  16950. /* Use the hdr_buf to find the sequence that this frame belongs to */
  16951. list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
  16952. temp_hdr = (struct fc_frame_header *)h_buf->virt;
  16953. if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
  16954. (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
  16955. (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
  16956. continue;
  16957. /* found a pending sequence that matches this frame */
  16958. seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
  16959. break;
  16960. }
  16961. if (!seq_dmabuf) {
  16962. /*
  16963. * This indicates first frame received for this sequence.
  16964. * Queue the buffer on the vport's rcv_buffer_list.
  16965. */
  16966. list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
  16967. lpfc_update_rcv_time_stamp(vport);
  16968. return dmabuf;
  16969. }
  16970. temp_hdr = seq_dmabuf->hbuf.virt;
  16971. if (be16_to_cpu(new_hdr->fh_seq_cnt) <
  16972. be16_to_cpu(temp_hdr->fh_seq_cnt)) {
  16973. list_del_init(&seq_dmabuf->hbuf.list);
  16974. list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
  16975. list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
  16976. lpfc_update_rcv_time_stamp(vport);
  16977. return dmabuf;
  16978. }
  16979. /* move this sequence to the tail to indicate a young sequence */
  16980. list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
  16981. seq_dmabuf->time_stamp = jiffies;
  16982. lpfc_update_rcv_time_stamp(vport);
  16983. if (list_empty(&seq_dmabuf->dbuf.list)) {
  16984. list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
  16985. return seq_dmabuf;
  16986. }
  16987. /* find the correct place in the sequence to insert this frame */
  16988. d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
  16989. while (!found) {
  16990. temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
  16991. temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
  16992. /*
  16993. * If the frame's sequence count is greater than the frame on
  16994. * the list then insert the frame right after this frame
  16995. */
  16996. if (be16_to_cpu(new_hdr->fh_seq_cnt) >
  16997. be16_to_cpu(temp_hdr->fh_seq_cnt)) {
  16998. list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
  16999. found = 1;
  17000. break;
  17001. }
  17002. if (&d_buf->list == &seq_dmabuf->dbuf.list)
  17003. break;
  17004. d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
  17005. }
  17006. if (found)
  17007. return seq_dmabuf;
  17008. return NULL;
  17009. }
  17010. /**
  17011. * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
  17012. * @vport: pointer to a vitural port
  17013. * @dmabuf: pointer to a dmabuf that describes the FC sequence
  17014. *
  17015. * This function tries to abort from the partially assembed sequence, described
  17016. * by the information from basic abbort @dmabuf. It checks to see whether such
  17017. * partially assembled sequence held by the driver. If so, it shall free up all
  17018. * the frames from the partially assembled sequence.
  17019. *
  17020. * Return
  17021. * true -- if there is matching partially assembled sequence present and all
  17022. * the frames freed with the sequence;
  17023. * false -- if there is no matching partially assembled sequence present so
  17024. * nothing got aborted in the lower layer driver
  17025. **/
  17026. static bool
  17027. lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
  17028. struct hbq_dmabuf *dmabuf)
  17029. {
  17030. struct fc_frame_header *new_hdr;
  17031. struct fc_frame_header *temp_hdr;
  17032. struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
  17033. struct hbq_dmabuf *seq_dmabuf = NULL;
  17034. /* Use the hdr_buf to find the sequence that matches this frame */
  17035. INIT_LIST_HEAD(&dmabuf->dbuf.list);
  17036. INIT_LIST_HEAD(&dmabuf->hbuf.list);
  17037. new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
  17038. list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
  17039. temp_hdr = (struct fc_frame_header *)h_buf->virt;
  17040. if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
  17041. (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
  17042. (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
  17043. continue;
  17044. /* found a pending sequence that matches this frame */
  17045. seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
  17046. break;
  17047. }
  17048. /* Free up all the frames from the partially assembled sequence */
  17049. if (seq_dmabuf) {
  17050. list_for_each_entry_safe(d_buf, n_buf,
  17051. &seq_dmabuf->dbuf.list, list) {
  17052. list_del_init(&d_buf->list);
  17053. lpfc_in_buf_free(vport->phba, d_buf);
  17054. }
  17055. return true;
  17056. }
  17057. return false;
  17058. }
  17059. /**
  17060. * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
  17061. * @vport: pointer to a vitural port
  17062. * @dmabuf: pointer to a dmabuf that describes the FC sequence
  17063. *
  17064. * This function tries to abort from the assembed sequence from upper level
  17065. * protocol, described by the information from basic abbort @dmabuf. It
  17066. * checks to see whether such pending context exists at upper level protocol.
  17067. * If so, it shall clean up the pending context.
  17068. *
  17069. * Return
  17070. * true -- if there is matching pending context of the sequence cleaned
  17071. * at ulp;
  17072. * false -- if there is no matching pending context of the sequence present
  17073. * at ulp.
  17074. **/
  17075. static bool
  17076. lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
  17077. {
  17078. struct lpfc_hba *phba = vport->phba;
  17079. int handled;
  17080. /* Accepting abort at ulp with SLI4 only */
  17081. if (phba->sli_rev < LPFC_SLI_REV4)
  17082. return false;
  17083. /* Register all caring upper level protocols to attend abort */
  17084. handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
  17085. if (handled)
  17086. return true;
  17087. return false;
  17088. }
  17089. /**
  17090. * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
  17091. * @phba: Pointer to HBA context object.
  17092. * @cmd_iocbq: pointer to the command iocbq structure.
  17093. * @rsp_iocbq: pointer to the response iocbq structure.
  17094. *
  17095. * This function handles the sequence abort response iocb command complete
  17096. * event. It properly releases the memory allocated to the sequence abort
  17097. * accept iocb.
  17098. **/
  17099. static void
  17100. lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
  17101. struct lpfc_iocbq *cmd_iocbq,
  17102. struct lpfc_iocbq *rsp_iocbq)
  17103. {
  17104. if (cmd_iocbq) {
  17105. lpfc_nlp_put(cmd_iocbq->ndlp);
  17106. lpfc_sli_release_iocbq(phba, cmd_iocbq);
  17107. }
  17108. /* Failure means BLS ABORT RSP did not get delivered to remote node*/
  17109. if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
  17110. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17111. "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
  17112. get_job_ulpstatus(phba, rsp_iocbq),
  17113. get_job_word4(phba, rsp_iocbq));
  17114. }
  17115. /**
  17116. * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
  17117. * @phba: Pointer to HBA context object.
  17118. * @xri: xri id in transaction.
  17119. *
  17120. * This function validates the xri maps to the known range of XRIs allocated an
  17121. * used by the driver.
  17122. **/
  17123. uint16_t
  17124. lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
  17125. uint16_t xri)
  17126. {
  17127. uint16_t i;
  17128. for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
  17129. if (xri == phba->sli4_hba.xri_ids[i])
  17130. return i;
  17131. }
  17132. return NO_XRI;
  17133. }
  17134. /**
  17135. * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
  17136. * @vport: pointer to a virtual port.
  17137. * @fc_hdr: pointer to a FC frame header.
  17138. * @aborted: was the partially assembled receive sequence successfully aborted
  17139. *
  17140. * This function sends a basic response to a previous unsol sequence abort
  17141. * event after aborting the sequence handling.
  17142. **/
  17143. void
  17144. lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
  17145. struct fc_frame_header *fc_hdr, bool aborted)
  17146. {
  17147. struct lpfc_hba *phba = vport->phba;
  17148. struct lpfc_iocbq *ctiocb = NULL;
  17149. struct lpfc_nodelist *ndlp;
  17150. uint16_t oxid, rxid, xri, lxri;
  17151. uint32_t sid, fctl;
  17152. union lpfc_wqe128 *icmd;
  17153. int rc;
  17154. if (!lpfc_is_link_up(phba))
  17155. return;
  17156. sid = sli4_sid_from_fc_hdr(fc_hdr);
  17157. oxid = be16_to_cpu(fc_hdr->fh_ox_id);
  17158. rxid = be16_to_cpu(fc_hdr->fh_rx_id);
  17159. ndlp = lpfc_findnode_did(vport, sid);
  17160. if (!ndlp) {
  17161. ndlp = lpfc_nlp_init(vport, sid);
  17162. if (!ndlp) {
  17163. lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
  17164. "1268 Failed to allocate ndlp for "
  17165. "oxid:x%x SID:x%x\n", oxid, sid);
  17166. return;
  17167. }
  17168. /* Put ndlp onto vport node list */
  17169. lpfc_enqueue_node(vport, ndlp);
  17170. }
  17171. /* Allocate buffer for rsp iocb */
  17172. ctiocb = lpfc_sli_get_iocbq(phba);
  17173. if (!ctiocb)
  17174. return;
  17175. icmd = &ctiocb->wqe;
  17176. /* Extract the F_CTL field from FC_HDR */
  17177. fctl = sli4_fctl_from_fc_hdr(fc_hdr);
  17178. ctiocb->ndlp = lpfc_nlp_get(ndlp);
  17179. if (!ctiocb->ndlp) {
  17180. lpfc_sli_release_iocbq(phba, ctiocb);
  17181. return;
  17182. }
  17183. ctiocb->vport = vport;
  17184. ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
  17185. ctiocb->sli4_lxritag = NO_XRI;
  17186. ctiocb->sli4_xritag = NO_XRI;
  17187. ctiocb->abort_rctl = FC_RCTL_BA_ACC;
  17188. if (fctl & FC_FC_EX_CTX)
  17189. /* Exchange responder sent the abort so we
  17190. * own the oxid.
  17191. */
  17192. xri = oxid;
  17193. else
  17194. xri = rxid;
  17195. lxri = lpfc_sli4_xri_inrange(phba, xri);
  17196. if (lxri != NO_XRI)
  17197. lpfc_set_rrq_active(phba, ndlp, lxri,
  17198. (xri == oxid) ? rxid : oxid, 0);
  17199. /* For BA_ABTS from exchange responder, if the logical xri with
  17200. * the oxid maps to the FCP XRI range, the port no longer has
  17201. * that exchange context, send a BLS_RJT. Override the IOCB for
  17202. * a BA_RJT.
  17203. */
  17204. if ((fctl & FC_FC_EX_CTX) &&
  17205. (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
  17206. ctiocb->abort_rctl = FC_RCTL_BA_RJT;
  17207. bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
  17208. bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
  17209. FC_BA_RJT_INV_XID);
  17210. bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
  17211. FC_BA_RJT_UNABLE);
  17212. }
  17213. /* If BA_ABTS failed to abort a partially assembled receive sequence,
  17214. * the driver no longer has that exchange, send a BLS_RJT. Override
  17215. * the IOCB for a BA_RJT.
  17216. */
  17217. if (aborted == false) {
  17218. ctiocb->abort_rctl = FC_RCTL_BA_RJT;
  17219. bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
  17220. bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
  17221. FC_BA_RJT_INV_XID);
  17222. bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
  17223. FC_BA_RJT_UNABLE);
  17224. }
  17225. if (fctl & FC_FC_EX_CTX) {
  17226. /* ABTS sent by responder to CT exchange, construction
  17227. * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
  17228. * field and RX_ID from ABTS for RX_ID field.
  17229. */
  17230. ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
  17231. bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
  17232. } else {
  17233. /* ABTS sent by initiator to CT exchange, construction
  17234. * of BA_ACC will need to allocate a new XRI as for the
  17235. * XRI_TAG field.
  17236. */
  17237. ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
  17238. }
  17239. /* OX_ID is invariable to who sent ABTS to CT exchange */
  17240. bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
  17241. bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
  17242. /* Use CT=VPI */
  17243. bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
  17244. ndlp->nlp_DID);
  17245. bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
  17246. phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  17247. bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
  17248. /* Xmit CT abts response on exchange <xid> */
  17249. lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  17250. "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
  17251. ctiocb->abort_rctl, oxid, phba->link_state);
  17252. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
  17253. if (rc == IOCB_ERROR) {
  17254. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  17255. "2925 Failed to issue CT ABTS RSP x%x on "
  17256. "xri x%x, Data x%x\n",
  17257. ctiocb->abort_rctl, oxid,
  17258. phba->link_state);
  17259. lpfc_nlp_put(ndlp);
  17260. ctiocb->ndlp = NULL;
  17261. lpfc_sli_release_iocbq(phba, ctiocb);
  17262. }
  17263. /* if only usage of this nodelist is BLS response, release initial ref
  17264. * to free ndlp when transmit completes
  17265. */
  17266. if (ndlp->nlp_state == NLP_STE_UNUSED_NODE &&
  17267. !(ndlp->nlp_flag & NLP_DROPPED) &&
  17268. !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) {
  17269. ndlp->nlp_flag |= NLP_DROPPED;
  17270. lpfc_nlp_put(ndlp);
  17271. }
  17272. }
  17273. /**
  17274. * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
  17275. * @vport: Pointer to the vport on which this sequence was received
  17276. * @dmabuf: pointer to a dmabuf that describes the FC sequence
  17277. *
  17278. * This function handles an SLI-4 unsolicited abort event. If the unsolicited
  17279. * receive sequence is only partially assembed by the driver, it shall abort
  17280. * the partially assembled frames for the sequence. Otherwise, if the
  17281. * unsolicited receive sequence has been completely assembled and passed to
  17282. * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
  17283. * unsolicited sequence has been aborted. After that, it will issue a basic
  17284. * accept to accept the abort.
  17285. **/
  17286. static void
  17287. lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
  17288. struct hbq_dmabuf *dmabuf)
  17289. {
  17290. struct lpfc_hba *phba = vport->phba;
  17291. struct fc_frame_header fc_hdr;
  17292. uint32_t fctl;
  17293. bool aborted;
  17294. /* Make a copy of fc_hdr before the dmabuf being released */
  17295. memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
  17296. fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
  17297. if (fctl & FC_FC_EX_CTX) {
  17298. /* ABTS by responder to exchange, no cleanup needed */
  17299. aborted = true;
  17300. } else {
  17301. /* ABTS by initiator to exchange, need to do cleanup */
  17302. aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
  17303. if (aborted == false)
  17304. aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
  17305. }
  17306. lpfc_in_buf_free(phba, &dmabuf->dbuf);
  17307. if (phba->nvmet_support) {
  17308. lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
  17309. return;
  17310. }
  17311. /* Respond with BA_ACC or BA_RJT accordingly */
  17312. lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
  17313. }
  17314. /**
  17315. * lpfc_seq_complete - Indicates if a sequence is complete
  17316. * @dmabuf: pointer to a dmabuf that describes the FC sequence
  17317. *
  17318. * This function checks the sequence, starting with the frame described by
  17319. * @dmabuf, to see if all the frames associated with this sequence are present.
  17320. * the frames associated with this sequence are linked to the @dmabuf using the
  17321. * dbuf list. This function looks for two major things. 1) That the first frame
  17322. * has a sequence count of zero. 2) There is a frame with last frame of sequence
  17323. * set. 3) That there are no holes in the sequence count. The function will
  17324. * return 1 when the sequence is complete, otherwise it will return 0.
  17325. **/
  17326. static int
  17327. lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
  17328. {
  17329. struct fc_frame_header *hdr;
  17330. struct lpfc_dmabuf *d_buf;
  17331. struct hbq_dmabuf *seq_dmabuf;
  17332. uint32_t fctl;
  17333. int seq_count = 0;
  17334. hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
  17335. /* make sure first fame of sequence has a sequence count of zero */
  17336. if (hdr->fh_seq_cnt != seq_count)
  17337. return 0;
  17338. fctl = (hdr->fh_f_ctl[0] << 16 |
  17339. hdr->fh_f_ctl[1] << 8 |
  17340. hdr->fh_f_ctl[2]);
  17341. /* If last frame of sequence we can return success. */
  17342. if (fctl & FC_FC_END_SEQ)
  17343. return 1;
  17344. list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
  17345. seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
  17346. hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
  17347. /* If there is a hole in the sequence count then fail. */
  17348. if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
  17349. return 0;
  17350. fctl = (hdr->fh_f_ctl[0] << 16 |
  17351. hdr->fh_f_ctl[1] << 8 |
  17352. hdr->fh_f_ctl[2]);
  17353. /* If last frame of sequence we can return success. */
  17354. if (fctl & FC_FC_END_SEQ)
  17355. return 1;
  17356. }
  17357. return 0;
  17358. }
  17359. /**
  17360. * lpfc_prep_seq - Prep sequence for ULP processing
  17361. * @vport: Pointer to the vport on which this sequence was received
  17362. * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
  17363. *
  17364. * This function takes a sequence, described by a list of frames, and creates
  17365. * a list of iocbq structures to describe the sequence. This iocbq list will be
  17366. * used to issue to the generic unsolicited sequence handler. This routine
  17367. * returns a pointer to the first iocbq in the list. If the function is unable
  17368. * to allocate an iocbq then it throw out the received frames that were not
  17369. * able to be described and return a pointer to the first iocbq. If unable to
  17370. * allocate any iocbqs (including the first) this function will return NULL.
  17371. **/
  17372. static struct lpfc_iocbq *
  17373. lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
  17374. {
  17375. struct hbq_dmabuf *hbq_buf;
  17376. struct lpfc_dmabuf *d_buf, *n_buf;
  17377. struct lpfc_iocbq *first_iocbq, *iocbq;
  17378. struct fc_frame_header *fc_hdr;
  17379. uint32_t sid;
  17380. uint32_t len, tot_len;
  17381. fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
  17382. /* remove from receive buffer list */
  17383. list_del_init(&seq_dmabuf->hbuf.list);
  17384. lpfc_update_rcv_time_stamp(vport);
  17385. /* get the Remote Port's SID */
  17386. sid = sli4_sid_from_fc_hdr(fc_hdr);
  17387. tot_len = 0;
  17388. /* Get an iocbq struct to fill in. */
  17389. first_iocbq = lpfc_sli_get_iocbq(vport->phba);
  17390. if (first_iocbq) {
  17391. /* Initialize the first IOCB. */
  17392. first_iocbq->wcqe_cmpl.total_data_placed = 0;
  17393. bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
  17394. IOSTAT_SUCCESS);
  17395. first_iocbq->vport = vport;
  17396. /* Check FC Header to see what TYPE of frame we are rcv'ing */
  17397. if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
  17398. bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
  17399. sli4_did_from_fc_hdr(fc_hdr));
  17400. }
  17401. bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
  17402. NO_XRI);
  17403. bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
  17404. be16_to_cpu(fc_hdr->fh_ox_id));
  17405. /* put the first buffer into the first iocb */
  17406. tot_len = bf_get(lpfc_rcqe_length,
  17407. &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
  17408. first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
  17409. first_iocbq->bpl_dmabuf = NULL;
  17410. /* Keep track of the BDE count */
  17411. first_iocbq->wcqe_cmpl.word3 = 1;
  17412. if (tot_len > LPFC_DATA_BUF_SIZE)
  17413. first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
  17414. LPFC_DATA_BUF_SIZE;
  17415. else
  17416. first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
  17417. first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
  17418. bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
  17419. sid);
  17420. }
  17421. iocbq = first_iocbq;
  17422. /*
  17423. * Each IOCBq can have two Buffers assigned, so go through the list
  17424. * of buffers for this sequence and save two buffers in each IOCBq
  17425. */
  17426. list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
  17427. if (!iocbq) {
  17428. lpfc_in_buf_free(vport->phba, d_buf);
  17429. continue;
  17430. }
  17431. if (!iocbq->bpl_dmabuf) {
  17432. iocbq->bpl_dmabuf = d_buf;
  17433. iocbq->wcqe_cmpl.word3++;
  17434. /* We need to get the size out of the right CQE */
  17435. hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
  17436. len = bf_get(lpfc_rcqe_length,
  17437. &hbq_buf->cq_event.cqe.rcqe_cmpl);
  17438. iocbq->unsol_rcv_len = len;
  17439. iocbq->wcqe_cmpl.total_data_placed += len;
  17440. tot_len += len;
  17441. } else {
  17442. iocbq = lpfc_sli_get_iocbq(vport->phba);
  17443. if (!iocbq) {
  17444. if (first_iocbq) {
  17445. bf_set(lpfc_wcqe_c_status,
  17446. &first_iocbq->wcqe_cmpl,
  17447. IOSTAT_SUCCESS);
  17448. first_iocbq->wcqe_cmpl.parameter =
  17449. IOERR_NO_RESOURCES;
  17450. }
  17451. lpfc_in_buf_free(vport->phba, d_buf);
  17452. continue;
  17453. }
  17454. /* We need to get the size out of the right CQE */
  17455. hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
  17456. len = bf_get(lpfc_rcqe_length,
  17457. &hbq_buf->cq_event.cqe.rcqe_cmpl);
  17458. iocbq->cmd_dmabuf = d_buf;
  17459. iocbq->bpl_dmabuf = NULL;
  17460. iocbq->wcqe_cmpl.word3 = 1;
  17461. if (len > LPFC_DATA_BUF_SIZE)
  17462. iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
  17463. LPFC_DATA_BUF_SIZE;
  17464. else
  17465. iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
  17466. len;
  17467. tot_len += len;
  17468. iocbq->wcqe_cmpl.total_data_placed = tot_len;
  17469. bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
  17470. sid);
  17471. list_add_tail(&iocbq->list, &first_iocbq->list);
  17472. }
  17473. }
  17474. /* Free the sequence's header buffer */
  17475. if (!first_iocbq)
  17476. lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
  17477. return first_iocbq;
  17478. }
  17479. static void
  17480. lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
  17481. struct hbq_dmabuf *seq_dmabuf)
  17482. {
  17483. struct fc_frame_header *fc_hdr;
  17484. struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
  17485. struct lpfc_hba *phba = vport->phba;
  17486. fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
  17487. iocbq = lpfc_prep_seq(vport, seq_dmabuf);
  17488. if (!iocbq) {
  17489. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17490. "2707 Ring %d handler: Failed to allocate "
  17491. "iocb Rctl x%x Type x%x received\n",
  17492. LPFC_ELS_RING,
  17493. fc_hdr->fh_r_ctl, fc_hdr->fh_type);
  17494. return;
  17495. }
  17496. if (!lpfc_complete_unsol_iocb(phba,
  17497. phba->sli4_hba.els_wq->pring,
  17498. iocbq, fc_hdr->fh_r_ctl,
  17499. fc_hdr->fh_type)) {
  17500. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17501. "2540 Ring %d handler: unexpected Rctl "
  17502. "x%x Type x%x received\n",
  17503. LPFC_ELS_RING,
  17504. fc_hdr->fh_r_ctl, fc_hdr->fh_type);
  17505. lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
  17506. }
  17507. /* Free iocb created in lpfc_prep_seq */
  17508. list_for_each_entry_safe(curr_iocb, next_iocb,
  17509. &iocbq->list, list) {
  17510. list_del_init(&curr_iocb->list);
  17511. lpfc_sli_release_iocbq(phba, curr_iocb);
  17512. }
  17513. lpfc_sli_release_iocbq(phba, iocbq);
  17514. }
  17515. static void
  17516. lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  17517. struct lpfc_iocbq *rspiocb)
  17518. {
  17519. struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
  17520. if (pcmd && pcmd->virt)
  17521. dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
  17522. kfree(pcmd);
  17523. lpfc_sli_release_iocbq(phba, cmdiocb);
  17524. lpfc_drain_txq(phba);
  17525. }
  17526. static void
  17527. lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
  17528. struct hbq_dmabuf *dmabuf)
  17529. {
  17530. struct fc_frame_header *fc_hdr;
  17531. struct lpfc_hba *phba = vport->phba;
  17532. struct lpfc_iocbq *iocbq = NULL;
  17533. union lpfc_wqe128 *pwqe;
  17534. struct lpfc_dmabuf *pcmd = NULL;
  17535. uint32_t frame_len;
  17536. int rc;
  17537. unsigned long iflags;
  17538. fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
  17539. frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
  17540. /* Send the received frame back */
  17541. iocbq = lpfc_sli_get_iocbq(phba);
  17542. if (!iocbq) {
  17543. /* Queue cq event and wakeup worker thread to process it */
  17544. spin_lock_irqsave(&phba->hbalock, iflags);
  17545. list_add_tail(&dmabuf->cq_event.list,
  17546. &phba->sli4_hba.sp_queue_event);
  17547. spin_unlock_irqrestore(&phba->hbalock, iflags);
  17548. set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
  17549. lpfc_worker_wake_up(phba);
  17550. return;
  17551. }
  17552. /* Allocate buffer for command payload */
  17553. pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  17554. if (pcmd)
  17555. pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
  17556. &pcmd->phys);
  17557. if (!pcmd || !pcmd->virt)
  17558. goto exit;
  17559. INIT_LIST_HEAD(&pcmd->list);
  17560. /* copyin the payload */
  17561. memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
  17562. iocbq->cmd_dmabuf = pcmd;
  17563. iocbq->vport = vport;
  17564. iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
  17565. iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
  17566. iocbq->num_bdes = 0;
  17567. pwqe = &iocbq->wqe;
  17568. /* fill in BDE's for command */
  17569. pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
  17570. pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
  17571. pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
  17572. pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  17573. pwqe->send_frame.frame_len = frame_len;
  17574. pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
  17575. pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
  17576. pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
  17577. pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
  17578. pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
  17579. pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
  17580. pwqe->generic.wqe_com.word7 = 0;
  17581. pwqe->generic.wqe_com.word10 = 0;
  17582. bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
  17583. bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
  17584. bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
  17585. bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
  17586. bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
  17587. bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
  17588. bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
  17589. bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
  17590. bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
  17591. bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
  17592. bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
  17593. bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
  17594. pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
  17595. iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
  17596. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
  17597. if (rc == IOCB_ERROR)
  17598. goto exit;
  17599. lpfc_in_buf_free(phba, &dmabuf->dbuf);
  17600. return;
  17601. exit:
  17602. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  17603. "2023 Unable to process MDS loopback frame\n");
  17604. if (pcmd && pcmd->virt)
  17605. dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
  17606. kfree(pcmd);
  17607. if (iocbq)
  17608. lpfc_sli_release_iocbq(phba, iocbq);
  17609. lpfc_in_buf_free(phba, &dmabuf->dbuf);
  17610. }
  17611. /**
  17612. * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
  17613. * @phba: Pointer to HBA context object.
  17614. * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
  17615. *
  17616. * This function is called with no lock held. This function processes all
  17617. * the received buffers and gives it to upper layers when a received buffer
  17618. * indicates that it is the final frame in the sequence. The interrupt
  17619. * service routine processes received buffers at interrupt contexts.
  17620. * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
  17621. * appropriate receive function when the final frame in a sequence is received.
  17622. **/
  17623. void
  17624. lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
  17625. struct hbq_dmabuf *dmabuf)
  17626. {
  17627. struct hbq_dmabuf *seq_dmabuf;
  17628. struct fc_frame_header *fc_hdr;
  17629. struct lpfc_vport *vport;
  17630. uint32_t fcfi;
  17631. uint32_t did;
  17632. /* Process each received buffer */
  17633. fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
  17634. if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
  17635. fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
  17636. vport = phba->pport;
  17637. /* Handle MDS Loopback frames */
  17638. if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
  17639. lpfc_sli4_handle_mds_loopback(vport, dmabuf);
  17640. else
  17641. lpfc_in_buf_free(phba, &dmabuf->dbuf);
  17642. return;
  17643. }
  17644. /* check to see if this a valid type of frame */
  17645. if (lpfc_fc_frame_check(phba, fc_hdr)) {
  17646. lpfc_in_buf_free(phba, &dmabuf->dbuf);
  17647. return;
  17648. }
  17649. if ((bf_get(lpfc_cqe_code,
  17650. &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
  17651. fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
  17652. &dmabuf->cq_event.cqe.rcqe_cmpl);
  17653. else
  17654. fcfi = bf_get(lpfc_rcqe_fcf_id,
  17655. &dmabuf->cq_event.cqe.rcqe_cmpl);
  17656. if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
  17657. vport = phba->pport;
  17658. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  17659. "2023 MDS Loopback %d bytes\n",
  17660. bf_get(lpfc_rcqe_length,
  17661. &dmabuf->cq_event.cqe.rcqe_cmpl));
  17662. /* Handle MDS Loopback frames */
  17663. lpfc_sli4_handle_mds_loopback(vport, dmabuf);
  17664. return;
  17665. }
  17666. /* d_id this frame is directed to */
  17667. did = sli4_did_from_fc_hdr(fc_hdr);
  17668. vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
  17669. if (!vport) {
  17670. /* throw out the frame */
  17671. lpfc_in_buf_free(phba, &dmabuf->dbuf);
  17672. return;
  17673. }
  17674. /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
  17675. if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
  17676. (did != Fabric_DID)) {
  17677. /*
  17678. * Throw out the frame if we are not pt2pt.
  17679. * The pt2pt protocol allows for discovery frames
  17680. * to be received without a registered VPI.
  17681. */
  17682. if (!test_bit(FC_PT2PT, &vport->fc_flag) ||
  17683. phba->link_state == LPFC_HBA_READY) {
  17684. lpfc_in_buf_free(phba, &dmabuf->dbuf);
  17685. return;
  17686. }
  17687. }
  17688. /* Handle the basic abort sequence (BA_ABTS) event */
  17689. if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
  17690. lpfc_sli4_handle_unsol_abort(vport, dmabuf);
  17691. return;
  17692. }
  17693. /* Link this frame */
  17694. seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
  17695. if (!seq_dmabuf) {
  17696. /* unable to add frame to vport - throw it out */
  17697. lpfc_in_buf_free(phba, &dmabuf->dbuf);
  17698. return;
  17699. }
  17700. /* If not last frame in sequence continue processing frames. */
  17701. if (!lpfc_seq_complete(seq_dmabuf))
  17702. return;
  17703. /* Send the complete sequence to the upper layer protocol */
  17704. lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
  17705. }
  17706. /**
  17707. * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
  17708. * @phba: pointer to lpfc hba data structure.
  17709. *
  17710. * This routine is invoked to post rpi header templates to the
  17711. * HBA consistent with the SLI-4 interface spec. This routine
  17712. * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
  17713. * SLI4_PAGE_SIZE modulo 64 rpi context headers.
  17714. *
  17715. * This routine does not require any locks. It's usage is expected
  17716. * to be driver load or reset recovery when the driver is
  17717. * sequential.
  17718. *
  17719. * Return codes
  17720. * 0 - successful
  17721. * -EIO - The mailbox failed to complete successfully.
  17722. * When this error occurs, the driver is not guaranteed
  17723. * to have any rpi regions posted to the device and
  17724. * must either attempt to repost the regions or take a
  17725. * fatal error.
  17726. **/
  17727. int
  17728. lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
  17729. {
  17730. struct lpfc_rpi_hdr *rpi_page;
  17731. uint32_t rc = 0;
  17732. uint16_t lrpi = 0;
  17733. /* SLI4 ports that support extents do not require RPI headers. */
  17734. if (!phba->sli4_hba.rpi_hdrs_in_use)
  17735. goto exit;
  17736. if (phba->sli4_hba.extents_in_use)
  17737. return -EIO;
  17738. list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
  17739. /*
  17740. * Assign the rpi headers a physical rpi only if the driver
  17741. * has not initialized those resources. A port reset only
  17742. * needs the headers posted.
  17743. */
  17744. if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
  17745. LPFC_RPI_RSRC_RDY)
  17746. rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
  17747. rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
  17748. if (rc != MBX_SUCCESS) {
  17749. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17750. "2008 Error %d posting all rpi "
  17751. "headers\n", rc);
  17752. rc = -EIO;
  17753. break;
  17754. }
  17755. }
  17756. exit:
  17757. bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
  17758. LPFC_RPI_RSRC_RDY);
  17759. return rc;
  17760. }
  17761. /**
  17762. * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
  17763. * @phba: pointer to lpfc hba data structure.
  17764. * @rpi_page: pointer to the rpi memory region.
  17765. *
  17766. * This routine is invoked to post a single rpi header to the
  17767. * HBA consistent with the SLI-4 interface spec. This memory region
  17768. * maps up to 64 rpi context regions.
  17769. *
  17770. * Return codes
  17771. * 0 - successful
  17772. * -ENOMEM - No available memory
  17773. * -EIO - The mailbox failed to complete successfully.
  17774. **/
  17775. int
  17776. lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
  17777. {
  17778. LPFC_MBOXQ_t *mboxq;
  17779. struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
  17780. uint32_t rc = 0;
  17781. uint32_t shdr_status, shdr_add_status;
  17782. union lpfc_sli4_cfg_shdr *shdr;
  17783. /* SLI4 ports that support extents do not require RPI headers. */
  17784. if (!phba->sli4_hba.rpi_hdrs_in_use)
  17785. return rc;
  17786. if (phba->sli4_hba.extents_in_use)
  17787. return -EIO;
  17788. /* The port is notified of the header region via a mailbox command. */
  17789. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  17790. if (!mboxq) {
  17791. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17792. "2001 Unable to allocate memory for issuing "
  17793. "SLI_CONFIG_SPECIAL mailbox command\n");
  17794. return -ENOMEM;
  17795. }
  17796. /* Post all rpi memory regions to the port. */
  17797. hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
  17798. lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
  17799. LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
  17800. sizeof(struct lpfc_mbx_post_hdr_tmpl) -
  17801. sizeof(struct lpfc_sli4_cfg_mhdr),
  17802. LPFC_SLI4_MBX_EMBED);
  17803. /* Post the physical rpi to the port for this rpi header. */
  17804. bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
  17805. rpi_page->start_rpi);
  17806. bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
  17807. hdr_tmpl, rpi_page->page_count);
  17808. hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
  17809. hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
  17810. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  17811. shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
  17812. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  17813. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  17814. mempool_free(mboxq, phba->mbox_mem_pool);
  17815. if (shdr_status || shdr_add_status || rc) {
  17816. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17817. "2514 POST_RPI_HDR mailbox failed with "
  17818. "status x%x add_status x%x, mbx status x%x\n",
  17819. shdr_status, shdr_add_status, rc);
  17820. rc = -ENXIO;
  17821. } else {
  17822. /*
  17823. * The next_rpi stores the next logical module-64 rpi value used
  17824. * to post physical rpis in subsequent rpi postings.
  17825. */
  17826. spin_lock_irq(&phba->hbalock);
  17827. phba->sli4_hba.next_rpi = rpi_page->next_rpi;
  17828. spin_unlock_irq(&phba->hbalock);
  17829. }
  17830. return rc;
  17831. }
  17832. /**
  17833. * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
  17834. * @phba: pointer to lpfc hba data structure.
  17835. *
  17836. * This routine is invoked to post rpi header templates to the
  17837. * HBA consistent with the SLI-4 interface spec. This routine
  17838. * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
  17839. * SLI4_PAGE_SIZE modulo 64 rpi context headers.
  17840. *
  17841. * Returns
  17842. * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
  17843. * LPFC_RPI_ALLOC_ERROR if no rpis are available.
  17844. **/
  17845. int
  17846. lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
  17847. {
  17848. unsigned long rpi;
  17849. uint16_t max_rpi, rpi_limit;
  17850. uint16_t rpi_remaining, lrpi = 0;
  17851. struct lpfc_rpi_hdr *rpi_hdr;
  17852. unsigned long iflag;
  17853. /*
  17854. * Fetch the next logical rpi. Because this index is logical,
  17855. * the driver starts at 0 each time.
  17856. */
  17857. spin_lock_irqsave(&phba->hbalock, iflag);
  17858. max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
  17859. rpi_limit = phba->sli4_hba.next_rpi;
  17860. rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
  17861. if (rpi >= rpi_limit)
  17862. rpi = LPFC_RPI_ALLOC_ERROR;
  17863. else {
  17864. set_bit(rpi, phba->sli4_hba.rpi_bmask);
  17865. phba->sli4_hba.max_cfg_param.rpi_used++;
  17866. phba->sli4_hba.rpi_count++;
  17867. }
  17868. lpfc_printf_log(phba, KERN_INFO,
  17869. LOG_NODE | LOG_DISCOVERY,
  17870. "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
  17871. (int) rpi, max_rpi, rpi_limit);
  17872. /*
  17873. * Don't try to allocate more rpi header regions if the device limit
  17874. * has been exhausted.
  17875. */
  17876. if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
  17877. (phba->sli4_hba.rpi_count >= max_rpi)) {
  17878. spin_unlock_irqrestore(&phba->hbalock, iflag);
  17879. return rpi;
  17880. }
  17881. /*
  17882. * RPI header postings are not required for SLI4 ports capable of
  17883. * extents.
  17884. */
  17885. if (!phba->sli4_hba.rpi_hdrs_in_use) {
  17886. spin_unlock_irqrestore(&phba->hbalock, iflag);
  17887. return rpi;
  17888. }
  17889. /*
  17890. * If the driver is running low on rpi resources, allocate another
  17891. * page now. Note that the next_rpi value is used because
  17892. * it represents how many are actually in use whereas max_rpi notes
  17893. * how many are supported max by the device.
  17894. */
  17895. rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
  17896. spin_unlock_irqrestore(&phba->hbalock, iflag);
  17897. if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
  17898. rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
  17899. if (!rpi_hdr) {
  17900. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17901. "2002 Error Could not grow rpi "
  17902. "count\n");
  17903. } else {
  17904. lrpi = rpi_hdr->start_rpi;
  17905. rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
  17906. lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
  17907. }
  17908. }
  17909. return rpi;
  17910. }
  17911. /**
  17912. * __lpfc_sli4_free_rpi - Release an rpi for reuse.
  17913. * @phba: pointer to lpfc hba data structure.
  17914. * @rpi: rpi to free
  17915. *
  17916. * This routine is invoked to release an rpi to the pool of
  17917. * available rpis maintained by the driver.
  17918. **/
  17919. static void
  17920. __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
  17921. {
  17922. /*
  17923. * if the rpi value indicates a prior unreg has already
  17924. * been done, skip the unreg.
  17925. */
  17926. if (rpi == LPFC_RPI_ALLOC_ERROR)
  17927. return;
  17928. if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
  17929. phba->sli4_hba.rpi_count--;
  17930. phba->sli4_hba.max_cfg_param.rpi_used--;
  17931. } else {
  17932. lpfc_printf_log(phba, KERN_INFO,
  17933. LOG_NODE | LOG_DISCOVERY,
  17934. "2016 rpi %x not inuse\n",
  17935. rpi);
  17936. }
  17937. }
  17938. /**
  17939. * lpfc_sli4_free_rpi - Release an rpi for reuse.
  17940. * @phba: pointer to lpfc hba data structure.
  17941. * @rpi: rpi to free
  17942. *
  17943. * This routine is invoked to release an rpi to the pool of
  17944. * available rpis maintained by the driver.
  17945. **/
  17946. void
  17947. lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
  17948. {
  17949. spin_lock_irq(&phba->hbalock);
  17950. __lpfc_sli4_free_rpi(phba, rpi);
  17951. spin_unlock_irq(&phba->hbalock);
  17952. }
  17953. /**
  17954. * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
  17955. * @phba: pointer to lpfc hba data structure.
  17956. *
  17957. * This routine is invoked to remove the memory region that
  17958. * provided rpi via a bitmask.
  17959. **/
  17960. void
  17961. lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
  17962. {
  17963. kfree(phba->sli4_hba.rpi_bmask);
  17964. kfree(phba->sli4_hba.rpi_ids);
  17965. bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
  17966. }
  17967. /**
  17968. * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
  17969. * @ndlp: pointer to lpfc nodelist data structure.
  17970. * @cmpl: completion call-back.
  17971. * @iocbq: data to load as mbox ctx_u information
  17972. *
  17973. * This routine is invoked to remove the memory region that
  17974. * provided rpi via a bitmask.
  17975. **/
  17976. int
  17977. lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
  17978. void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *),
  17979. struct lpfc_iocbq *iocbq)
  17980. {
  17981. LPFC_MBOXQ_t *mboxq;
  17982. struct lpfc_hba *phba = ndlp->phba;
  17983. int rc;
  17984. /* The port is notified of the header region via a mailbox command. */
  17985. mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  17986. if (!mboxq)
  17987. return -ENOMEM;
  17988. /* If cmpl assigned, then this nlp_get pairs with
  17989. * lpfc_mbx_cmpl_resume_rpi.
  17990. *
  17991. * Else cmpl is NULL, then this nlp_get pairs with
  17992. * lpfc_sli_def_mbox_cmpl.
  17993. */
  17994. if (!lpfc_nlp_get(ndlp)) {
  17995. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  17996. "2122 %s: Failed to get nlp ref\n",
  17997. __func__);
  17998. mempool_free(mboxq, phba->mbox_mem_pool);
  17999. return -EIO;
  18000. }
  18001. /* Post all rpi memory regions to the port. */
  18002. lpfc_resume_rpi(mboxq, ndlp);
  18003. if (cmpl) {
  18004. mboxq->mbox_cmpl = cmpl;
  18005. mboxq->ctx_u.save_iocb = iocbq;
  18006. } else
  18007. mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  18008. mboxq->ctx_ndlp = ndlp;
  18009. mboxq->vport = ndlp->vport;
  18010. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  18011. if (rc == MBX_NOT_FINISHED) {
  18012. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18013. "2010 Resume RPI Mailbox failed "
  18014. "status %d, mbxStatus x%x\n", rc,
  18015. bf_get(lpfc_mqe_status, &mboxq->u.mqe));
  18016. lpfc_nlp_put(ndlp);
  18017. mempool_free(mboxq, phba->mbox_mem_pool);
  18018. return -EIO;
  18019. }
  18020. return 0;
  18021. }
  18022. /**
  18023. * lpfc_sli4_init_vpi - Initialize a vpi with the port
  18024. * @vport: Pointer to the vport for which the vpi is being initialized
  18025. *
  18026. * This routine is invoked to activate a vpi with the port.
  18027. *
  18028. * Returns:
  18029. * 0 success
  18030. * -Evalue otherwise
  18031. **/
  18032. int
  18033. lpfc_sli4_init_vpi(struct lpfc_vport *vport)
  18034. {
  18035. LPFC_MBOXQ_t *mboxq;
  18036. int rc = 0;
  18037. int retval = MBX_SUCCESS;
  18038. uint32_t mbox_tmo;
  18039. struct lpfc_hba *phba = vport->phba;
  18040. mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  18041. if (!mboxq)
  18042. return -ENOMEM;
  18043. lpfc_init_vpi(phba, mboxq, vport->vpi);
  18044. mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
  18045. rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
  18046. if (rc != MBX_SUCCESS) {
  18047. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  18048. "2022 INIT VPI Mailbox failed "
  18049. "status %d, mbxStatus x%x\n", rc,
  18050. bf_get(lpfc_mqe_status, &mboxq->u.mqe));
  18051. retval = -EIO;
  18052. }
  18053. if (rc != MBX_TIMEOUT)
  18054. mempool_free(mboxq, vport->phba->mbox_mem_pool);
  18055. return retval;
  18056. }
  18057. /**
  18058. * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
  18059. * @phba: pointer to lpfc hba data structure.
  18060. * @mboxq: Pointer to mailbox object.
  18061. *
  18062. * This routine is invoked to manually add a single FCF record. The caller
  18063. * must pass a completely initialized FCF_Record. This routine takes
  18064. * care of the nonembedded mailbox operations.
  18065. **/
  18066. static void
  18067. lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
  18068. {
  18069. void *virt_addr;
  18070. union lpfc_sli4_cfg_shdr *shdr;
  18071. uint32_t shdr_status, shdr_add_status;
  18072. virt_addr = mboxq->sge_array->addr[0];
  18073. /* The IOCTL status is embedded in the mailbox subheader. */
  18074. shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
  18075. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  18076. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  18077. if ((shdr_status || shdr_add_status) &&
  18078. (shdr_status != STATUS_FCF_IN_USE))
  18079. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18080. "2558 ADD_FCF_RECORD mailbox failed with "
  18081. "status x%x add_status x%x\n",
  18082. shdr_status, shdr_add_status);
  18083. lpfc_sli4_mbox_cmd_free(phba, mboxq);
  18084. }
  18085. /**
  18086. * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
  18087. * @phba: pointer to lpfc hba data structure.
  18088. * @fcf_record: pointer to the initialized fcf record to add.
  18089. *
  18090. * This routine is invoked to manually add a single FCF record. The caller
  18091. * must pass a completely initialized FCF_Record. This routine takes
  18092. * care of the nonembedded mailbox operations.
  18093. **/
  18094. int
  18095. lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
  18096. {
  18097. int rc = 0;
  18098. LPFC_MBOXQ_t *mboxq;
  18099. uint8_t *bytep;
  18100. void *virt_addr;
  18101. struct lpfc_mbx_sge sge;
  18102. uint32_t alloc_len, req_len;
  18103. uint32_t fcfindex;
  18104. mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  18105. if (!mboxq) {
  18106. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18107. "2009 Failed to allocate mbox for ADD_FCF cmd\n");
  18108. return -ENOMEM;
  18109. }
  18110. req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
  18111. sizeof(uint32_t);
  18112. /* Allocate DMA memory and set up the non-embedded mailbox command */
  18113. alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
  18114. LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
  18115. req_len, LPFC_SLI4_MBX_NEMBED);
  18116. if (alloc_len < req_len) {
  18117. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18118. "2523 Allocated DMA memory size (x%x) is "
  18119. "less than the requested DMA memory "
  18120. "size (x%x)\n", alloc_len, req_len);
  18121. lpfc_sli4_mbox_cmd_free(phba, mboxq);
  18122. return -ENOMEM;
  18123. }
  18124. /*
  18125. * Get the first SGE entry from the non-embedded DMA memory. This
  18126. * routine only uses a single SGE.
  18127. */
  18128. lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
  18129. virt_addr = mboxq->sge_array->addr[0];
  18130. /*
  18131. * Configure the FCF record for FCFI 0. This is the driver's
  18132. * hardcoded default and gets used in nonFIP mode.
  18133. */
  18134. fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
  18135. bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
  18136. lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
  18137. /*
  18138. * Copy the fcf_index and the FCF Record Data. The data starts after
  18139. * the FCoE header plus word10. The data copy needs to be endian
  18140. * correct.
  18141. */
  18142. bytep += sizeof(uint32_t);
  18143. lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
  18144. mboxq->vport = phba->pport;
  18145. mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
  18146. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  18147. if (rc == MBX_NOT_FINISHED) {
  18148. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18149. "2515 ADD_FCF_RECORD mailbox failed with "
  18150. "status 0x%x\n", rc);
  18151. lpfc_sli4_mbox_cmd_free(phba, mboxq);
  18152. rc = -EIO;
  18153. } else
  18154. rc = 0;
  18155. return rc;
  18156. }
  18157. /**
  18158. * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
  18159. * @phba: pointer to lpfc hba data structure.
  18160. * @fcf_record: pointer to the fcf record to write the default data.
  18161. * @fcf_index: FCF table entry index.
  18162. *
  18163. * This routine is invoked to build the driver's default FCF record. The
  18164. * values used are hardcoded. This routine handles memory initialization.
  18165. *
  18166. **/
  18167. void
  18168. lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
  18169. struct fcf_record *fcf_record,
  18170. uint16_t fcf_index)
  18171. {
  18172. memset(fcf_record, 0, sizeof(struct fcf_record));
  18173. fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
  18174. fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
  18175. fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
  18176. bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
  18177. bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
  18178. bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
  18179. bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
  18180. bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
  18181. bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
  18182. bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
  18183. bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
  18184. bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
  18185. bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
  18186. bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
  18187. bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
  18188. bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
  18189. LPFC_FCF_FPMA | LPFC_FCF_SPMA);
  18190. /* Set the VLAN bit map */
  18191. if (phba->valid_vlan) {
  18192. fcf_record->vlan_bitmap[phba->vlan_id / 8]
  18193. = 1 << (phba->vlan_id % 8);
  18194. }
  18195. }
  18196. /**
  18197. * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
  18198. * @phba: pointer to lpfc hba data structure.
  18199. * @fcf_index: FCF table entry offset.
  18200. *
  18201. * This routine is invoked to scan the entire FCF table by reading FCF
  18202. * record and processing it one at a time starting from the @fcf_index
  18203. * for initial FCF discovery or fast FCF failover rediscovery.
  18204. *
  18205. * Return 0 if the mailbox command is submitted successfully, none 0
  18206. * otherwise.
  18207. **/
  18208. int
  18209. lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
  18210. {
  18211. int rc = 0, error;
  18212. LPFC_MBOXQ_t *mboxq;
  18213. phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
  18214. phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
  18215. mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  18216. if (!mboxq) {
  18217. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18218. "2000 Failed to allocate mbox for "
  18219. "READ_FCF cmd\n");
  18220. error = -ENOMEM;
  18221. goto fail_fcf_scan;
  18222. }
  18223. /* Construct the read FCF record mailbox command */
  18224. rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
  18225. if (rc) {
  18226. error = -EINVAL;
  18227. goto fail_fcf_scan;
  18228. }
  18229. /* Issue the mailbox command asynchronously */
  18230. mboxq->vport = phba->pport;
  18231. mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
  18232. set_bit(FCF_TS_INPROG, &phba->hba_flag);
  18233. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  18234. if (rc == MBX_NOT_FINISHED)
  18235. error = -EIO;
  18236. else {
  18237. /* Reset eligible FCF count for new scan */
  18238. if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
  18239. phba->fcf.eligible_fcf_cnt = 0;
  18240. error = 0;
  18241. }
  18242. fail_fcf_scan:
  18243. if (error) {
  18244. if (mboxq)
  18245. lpfc_sli4_mbox_cmd_free(phba, mboxq);
  18246. /* FCF scan failed, clear FCF_TS_INPROG flag */
  18247. clear_bit(FCF_TS_INPROG, &phba->hba_flag);
  18248. }
  18249. return error;
  18250. }
  18251. /**
  18252. * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
  18253. * @phba: pointer to lpfc hba data structure.
  18254. * @fcf_index: FCF table entry offset.
  18255. *
  18256. * This routine is invoked to read an FCF record indicated by @fcf_index
  18257. * and to use it for FLOGI roundrobin FCF failover.
  18258. *
  18259. * Return 0 if the mailbox command is submitted successfully, none 0
  18260. * otherwise.
  18261. **/
  18262. int
  18263. lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
  18264. {
  18265. int rc = 0, error;
  18266. LPFC_MBOXQ_t *mboxq;
  18267. mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  18268. if (!mboxq) {
  18269. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
  18270. "2763 Failed to allocate mbox for "
  18271. "READ_FCF cmd\n");
  18272. error = -ENOMEM;
  18273. goto fail_fcf_read;
  18274. }
  18275. /* Construct the read FCF record mailbox command */
  18276. rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
  18277. if (rc) {
  18278. error = -EINVAL;
  18279. goto fail_fcf_read;
  18280. }
  18281. /* Issue the mailbox command asynchronously */
  18282. mboxq->vport = phba->pport;
  18283. mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
  18284. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  18285. if (rc == MBX_NOT_FINISHED)
  18286. error = -EIO;
  18287. else
  18288. error = 0;
  18289. fail_fcf_read:
  18290. if (error && mboxq)
  18291. lpfc_sli4_mbox_cmd_free(phba, mboxq);
  18292. return error;
  18293. }
  18294. /**
  18295. * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
  18296. * @phba: pointer to lpfc hba data structure.
  18297. * @fcf_index: FCF table entry offset.
  18298. *
  18299. * This routine is invoked to read an FCF record indicated by @fcf_index to
  18300. * determine whether it's eligible for FLOGI roundrobin failover list.
  18301. *
  18302. * Return 0 if the mailbox command is submitted successfully, none 0
  18303. * otherwise.
  18304. **/
  18305. int
  18306. lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
  18307. {
  18308. int rc = 0, error;
  18309. LPFC_MBOXQ_t *mboxq;
  18310. mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  18311. if (!mboxq) {
  18312. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
  18313. "2758 Failed to allocate mbox for "
  18314. "READ_FCF cmd\n");
  18315. error = -ENOMEM;
  18316. goto fail_fcf_read;
  18317. }
  18318. /* Construct the read FCF record mailbox command */
  18319. rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
  18320. if (rc) {
  18321. error = -EINVAL;
  18322. goto fail_fcf_read;
  18323. }
  18324. /* Issue the mailbox command asynchronously */
  18325. mboxq->vport = phba->pport;
  18326. mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
  18327. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
  18328. if (rc == MBX_NOT_FINISHED)
  18329. error = -EIO;
  18330. else
  18331. error = 0;
  18332. fail_fcf_read:
  18333. if (error && mboxq)
  18334. lpfc_sli4_mbox_cmd_free(phba, mboxq);
  18335. return error;
  18336. }
  18337. /**
  18338. * lpfc_check_next_fcf_pri_level
  18339. * @phba: pointer to the lpfc_hba struct for this port.
  18340. * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
  18341. * routine when the rr_bmask is empty. The FCF indecies are put into the
  18342. * rr_bmask based on their priority level. Starting from the highest priority
  18343. * to the lowest. The most likely FCF candidate will be in the highest
  18344. * priority group. When this routine is called it searches the fcf_pri list for
  18345. * next lowest priority group and repopulates the rr_bmask with only those
  18346. * fcf_indexes.
  18347. * returns:
  18348. * 1=success 0=failure
  18349. **/
  18350. static int
  18351. lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
  18352. {
  18353. uint16_t next_fcf_pri;
  18354. uint16_t last_index;
  18355. struct lpfc_fcf_pri *fcf_pri;
  18356. int rc;
  18357. int ret = 0;
  18358. last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
  18359. LPFC_SLI4_FCF_TBL_INDX_MAX);
  18360. lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
  18361. "3060 Last IDX %d\n", last_index);
  18362. /* Verify the priority list has 2 or more entries */
  18363. spin_lock_irq(&phba->hbalock);
  18364. if (list_empty(&phba->fcf.fcf_pri_list) ||
  18365. list_is_singular(&phba->fcf.fcf_pri_list)) {
  18366. spin_unlock_irq(&phba->hbalock);
  18367. lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
  18368. "3061 Last IDX %d\n", last_index);
  18369. return 0; /* Empty rr list */
  18370. }
  18371. spin_unlock_irq(&phba->hbalock);
  18372. next_fcf_pri = 0;
  18373. /*
  18374. * Clear the rr_bmask and set all of the bits that are at this
  18375. * priority.
  18376. */
  18377. memset(phba->fcf.fcf_rr_bmask, 0,
  18378. sizeof(*phba->fcf.fcf_rr_bmask));
  18379. spin_lock_irq(&phba->hbalock);
  18380. list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
  18381. if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
  18382. continue;
  18383. /*
  18384. * the 1st priority that has not FLOGI failed
  18385. * will be the highest.
  18386. */
  18387. if (!next_fcf_pri)
  18388. next_fcf_pri = fcf_pri->fcf_rec.priority;
  18389. spin_unlock_irq(&phba->hbalock);
  18390. if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
  18391. rc = lpfc_sli4_fcf_rr_index_set(phba,
  18392. fcf_pri->fcf_rec.fcf_index);
  18393. if (rc)
  18394. return 0;
  18395. }
  18396. spin_lock_irq(&phba->hbalock);
  18397. }
  18398. /*
  18399. * if next_fcf_pri was not set above and the list is not empty then
  18400. * we have failed flogis on all of them. So reset flogi failed
  18401. * and start at the beginning.
  18402. */
  18403. if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
  18404. list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
  18405. fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
  18406. /*
  18407. * the 1st priority that has not FLOGI failed
  18408. * will be the highest.
  18409. */
  18410. if (!next_fcf_pri)
  18411. next_fcf_pri = fcf_pri->fcf_rec.priority;
  18412. spin_unlock_irq(&phba->hbalock);
  18413. if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
  18414. rc = lpfc_sli4_fcf_rr_index_set(phba,
  18415. fcf_pri->fcf_rec.fcf_index);
  18416. if (rc)
  18417. return 0;
  18418. }
  18419. spin_lock_irq(&phba->hbalock);
  18420. }
  18421. } else
  18422. ret = 1;
  18423. spin_unlock_irq(&phba->hbalock);
  18424. return ret;
  18425. }
  18426. /**
  18427. * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
  18428. * @phba: pointer to lpfc hba data structure.
  18429. *
  18430. * This routine is to get the next eligible FCF record index in a round
  18431. * robin fashion. If the next eligible FCF record index equals to the
  18432. * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
  18433. * shall be returned, otherwise, the next eligible FCF record's index
  18434. * shall be returned.
  18435. **/
  18436. uint16_t
  18437. lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
  18438. {
  18439. uint16_t next_fcf_index;
  18440. initial_priority:
  18441. /* Search start from next bit of currently registered FCF index */
  18442. next_fcf_index = phba->fcf.current_rec.fcf_indx;
  18443. next_priority:
  18444. /* Determine the next fcf index to check */
  18445. next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
  18446. next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
  18447. LPFC_SLI4_FCF_TBL_INDX_MAX,
  18448. next_fcf_index);
  18449. /* Wrap around condition on phba->fcf.fcf_rr_bmask */
  18450. if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
  18451. /*
  18452. * If we have wrapped then we need to clear the bits that
  18453. * have been tested so that we can detect when we should
  18454. * change the priority level.
  18455. */
  18456. next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
  18457. LPFC_SLI4_FCF_TBL_INDX_MAX);
  18458. }
  18459. /* Check roundrobin failover list empty condition */
  18460. if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
  18461. next_fcf_index == phba->fcf.current_rec.fcf_indx) {
  18462. /*
  18463. * If next fcf index is not found check if there are lower
  18464. * Priority level fcf's in the fcf_priority list.
  18465. * Set up the rr_bmask with all of the avaiable fcf bits
  18466. * at that level and continue the selection process.
  18467. */
  18468. if (lpfc_check_next_fcf_pri_level(phba))
  18469. goto initial_priority;
  18470. lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
  18471. "2844 No roundrobin failover FCF available\n");
  18472. return LPFC_FCOE_FCF_NEXT_NONE;
  18473. }
  18474. if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
  18475. phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
  18476. LPFC_FCF_FLOGI_FAILED) {
  18477. if (list_is_singular(&phba->fcf.fcf_pri_list))
  18478. return LPFC_FCOE_FCF_NEXT_NONE;
  18479. goto next_priority;
  18480. }
  18481. lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
  18482. "2845 Get next roundrobin failover FCF (x%x)\n",
  18483. next_fcf_index);
  18484. return next_fcf_index;
  18485. }
  18486. /**
  18487. * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
  18488. * @phba: pointer to lpfc hba data structure.
  18489. * @fcf_index: index into the FCF table to 'set'
  18490. *
  18491. * This routine sets the FCF record index in to the eligible bmask for
  18492. * roundrobin failover search. It checks to make sure that the index
  18493. * does not go beyond the range of the driver allocated bmask dimension
  18494. * before setting the bit.
  18495. *
  18496. * Returns 0 if the index bit successfully set, otherwise, it returns
  18497. * -EINVAL.
  18498. **/
  18499. int
  18500. lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
  18501. {
  18502. if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
  18503. lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
  18504. "2610 FCF (x%x) reached driver's book "
  18505. "keeping dimension:x%x\n",
  18506. fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
  18507. return -EINVAL;
  18508. }
  18509. /* Set the eligible FCF record index bmask */
  18510. set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
  18511. lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
  18512. "2790 Set FCF (x%x) to roundrobin FCF failover "
  18513. "bmask\n", fcf_index);
  18514. return 0;
  18515. }
  18516. /**
  18517. * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
  18518. * @phba: pointer to lpfc hba data structure.
  18519. * @fcf_index: index into the FCF table to 'clear'
  18520. *
  18521. * This routine clears the FCF record index from the eligible bmask for
  18522. * roundrobin failover search. It checks to make sure that the index
  18523. * does not go beyond the range of the driver allocated bmask dimension
  18524. * before clearing the bit.
  18525. **/
  18526. void
  18527. lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
  18528. {
  18529. struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
  18530. if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
  18531. lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
  18532. "2762 FCF (x%x) reached driver's book "
  18533. "keeping dimension:x%x\n",
  18534. fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
  18535. return;
  18536. }
  18537. /* Clear the eligible FCF record index bmask */
  18538. spin_lock_irq(&phba->hbalock);
  18539. list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
  18540. list) {
  18541. if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
  18542. list_del_init(&fcf_pri->list);
  18543. break;
  18544. }
  18545. }
  18546. spin_unlock_irq(&phba->hbalock);
  18547. clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
  18548. lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
  18549. "2791 Clear FCF (x%x) from roundrobin failover "
  18550. "bmask\n", fcf_index);
  18551. }
  18552. /**
  18553. * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
  18554. * @phba: pointer to lpfc hba data structure.
  18555. * @mbox: An allocated pointer to type LPFC_MBOXQ_t
  18556. *
  18557. * This routine is the completion routine for the rediscover FCF table mailbox
  18558. * command. If the mailbox command returned failure, it will try to stop the
  18559. * FCF rediscover wait timer.
  18560. **/
  18561. static void
  18562. lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
  18563. {
  18564. struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
  18565. uint32_t shdr_status, shdr_add_status;
  18566. redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
  18567. shdr_status = bf_get(lpfc_mbox_hdr_status,
  18568. &redisc_fcf->header.cfg_shdr.response);
  18569. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
  18570. &redisc_fcf->header.cfg_shdr.response);
  18571. if (shdr_status || shdr_add_status) {
  18572. lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
  18573. "2746 Requesting for FCF rediscovery failed "
  18574. "status x%x add_status x%x\n",
  18575. shdr_status, shdr_add_status);
  18576. if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
  18577. spin_lock_irq(&phba->hbalock);
  18578. phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
  18579. spin_unlock_irq(&phba->hbalock);
  18580. /*
  18581. * CVL event triggered FCF rediscover request failed,
  18582. * last resort to re-try current registered FCF entry.
  18583. */
  18584. lpfc_retry_pport_discovery(phba);
  18585. } else {
  18586. spin_lock_irq(&phba->hbalock);
  18587. phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
  18588. spin_unlock_irq(&phba->hbalock);
  18589. /*
  18590. * DEAD FCF event triggered FCF rediscover request
  18591. * failed, last resort to fail over as a link down
  18592. * to FCF registration.
  18593. */
  18594. lpfc_sli4_fcf_dead_failthrough(phba);
  18595. }
  18596. } else {
  18597. lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
  18598. "2775 Start FCF rediscover quiescent timer\n");
  18599. /*
  18600. * Start FCF rediscovery wait timer for pending FCF
  18601. * before rescan FCF record table.
  18602. */
  18603. lpfc_fcf_redisc_wait_start_timer(phba);
  18604. }
  18605. mempool_free(mbox, phba->mbox_mem_pool);
  18606. }
  18607. /**
  18608. * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
  18609. * @phba: pointer to lpfc hba data structure.
  18610. *
  18611. * This routine is invoked to request for rediscovery of the entire FCF table
  18612. * by the port.
  18613. **/
  18614. int
  18615. lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
  18616. {
  18617. LPFC_MBOXQ_t *mbox;
  18618. struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
  18619. int rc, length;
  18620. /* Cancel retry delay timers to all vports before FCF rediscover */
  18621. lpfc_cancel_all_vport_retry_delay_timer(phba);
  18622. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  18623. if (!mbox) {
  18624. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18625. "2745 Failed to allocate mbox for "
  18626. "requesting FCF rediscover.\n");
  18627. return -ENOMEM;
  18628. }
  18629. length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
  18630. sizeof(struct lpfc_sli4_cfg_mhdr));
  18631. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
  18632. LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
  18633. length, LPFC_SLI4_MBX_EMBED);
  18634. redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
  18635. /* Set count to 0 for invalidating the entire FCF database */
  18636. bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
  18637. /* Issue the mailbox command asynchronously */
  18638. mbox->vport = phba->pport;
  18639. mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
  18640. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
  18641. if (rc == MBX_NOT_FINISHED) {
  18642. mempool_free(mbox, phba->mbox_mem_pool);
  18643. return -EIO;
  18644. }
  18645. return 0;
  18646. }
  18647. /**
  18648. * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
  18649. * @phba: pointer to lpfc hba data structure.
  18650. *
  18651. * This function is the failover routine as a last resort to the FCF DEAD
  18652. * event when driver failed to perform fast FCF failover.
  18653. **/
  18654. void
  18655. lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
  18656. {
  18657. uint32_t link_state;
  18658. /*
  18659. * Last resort as FCF DEAD event failover will treat this as
  18660. * a link down, but save the link state because we don't want
  18661. * it to be changed to Link Down unless it is already down.
  18662. */
  18663. link_state = phba->link_state;
  18664. lpfc_linkdown(phba);
  18665. phba->link_state = link_state;
  18666. /* Unregister FCF if no devices connected to it */
  18667. lpfc_unregister_unused_fcf(phba);
  18668. }
  18669. /**
  18670. * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
  18671. * @phba: pointer to lpfc hba data structure.
  18672. * @rgn23_data: pointer to configure region 23 data.
  18673. *
  18674. * This function gets SLI3 port configure region 23 data through memory dump
  18675. * mailbox command. When it successfully retrieves data, the size of the data
  18676. * will be returned, otherwise, 0 will be returned.
  18677. **/
  18678. static uint32_t
  18679. lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
  18680. {
  18681. LPFC_MBOXQ_t *pmb = NULL;
  18682. MAILBOX_t *mb;
  18683. uint32_t offset = 0;
  18684. int rc;
  18685. if (!rgn23_data)
  18686. return 0;
  18687. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  18688. if (!pmb) {
  18689. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18690. "2600 failed to allocate mailbox memory\n");
  18691. return 0;
  18692. }
  18693. mb = &pmb->u.mb;
  18694. do {
  18695. lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
  18696. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  18697. if (rc != MBX_SUCCESS) {
  18698. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  18699. "2601 failed to read config "
  18700. "region 23, rc 0x%x Status 0x%x\n",
  18701. rc, mb->mbxStatus);
  18702. mb->un.varDmp.word_cnt = 0;
  18703. }
  18704. /*
  18705. * dump mem may return a zero when finished or we got a
  18706. * mailbox error, either way we are done.
  18707. */
  18708. if (mb->un.varDmp.word_cnt == 0)
  18709. break;
  18710. if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
  18711. mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
  18712. lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
  18713. rgn23_data + offset,
  18714. mb->un.varDmp.word_cnt);
  18715. offset += mb->un.varDmp.word_cnt;
  18716. } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
  18717. mempool_free(pmb, phba->mbox_mem_pool);
  18718. return offset;
  18719. }
  18720. /**
  18721. * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
  18722. * @phba: pointer to lpfc hba data structure.
  18723. * @rgn23_data: pointer to configure region 23 data.
  18724. *
  18725. * This function gets SLI4 port configure region 23 data through memory dump
  18726. * mailbox command. When it successfully retrieves data, the size of the data
  18727. * will be returned, otherwise, 0 will be returned.
  18728. **/
  18729. static uint32_t
  18730. lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
  18731. {
  18732. LPFC_MBOXQ_t *mboxq = NULL;
  18733. struct lpfc_dmabuf *mp = NULL;
  18734. struct lpfc_mqe *mqe;
  18735. uint32_t data_length = 0;
  18736. int rc;
  18737. if (!rgn23_data)
  18738. return 0;
  18739. mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  18740. if (!mboxq) {
  18741. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18742. "3105 failed to allocate mailbox memory\n");
  18743. return 0;
  18744. }
  18745. if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
  18746. goto out;
  18747. mqe = &mboxq->u.mqe;
  18748. mp = mboxq->ctx_buf;
  18749. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  18750. if (rc)
  18751. goto out;
  18752. data_length = mqe->un.mb_words[5];
  18753. if (data_length == 0)
  18754. goto out;
  18755. if (data_length > DMP_RGN23_SIZE) {
  18756. data_length = 0;
  18757. goto out;
  18758. }
  18759. lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
  18760. out:
  18761. lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
  18762. return data_length;
  18763. }
  18764. /**
  18765. * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
  18766. * @phba: pointer to lpfc hba data structure.
  18767. *
  18768. * This function read region 23 and parse TLV for port status to
  18769. * decide if the user disaled the port. If the TLV indicates the
  18770. * port is disabled, the hba_flag is set accordingly.
  18771. **/
  18772. void
  18773. lpfc_sli_read_link_ste(struct lpfc_hba *phba)
  18774. {
  18775. uint8_t *rgn23_data = NULL;
  18776. uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
  18777. uint32_t offset = 0;
  18778. /* Get adapter Region 23 data */
  18779. rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
  18780. if (!rgn23_data)
  18781. goto out;
  18782. if (phba->sli_rev < LPFC_SLI_REV4)
  18783. data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
  18784. else {
  18785. if_type = bf_get(lpfc_sli_intf_if_type,
  18786. &phba->sli4_hba.sli_intf);
  18787. if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
  18788. goto out;
  18789. data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
  18790. }
  18791. if (!data_size)
  18792. goto out;
  18793. /* Check the region signature first */
  18794. if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
  18795. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18796. "2619 Config region 23 has bad signature\n");
  18797. goto out;
  18798. }
  18799. offset += 4;
  18800. /* Check the data structure version */
  18801. if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
  18802. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  18803. "2620 Config region 23 has bad version\n");
  18804. goto out;
  18805. }
  18806. offset += 4;
  18807. /* Parse TLV entries in the region */
  18808. while (offset < data_size) {
  18809. if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
  18810. break;
  18811. /*
  18812. * If the TLV is not driver specific TLV or driver id is
  18813. * not linux driver id, skip the record.
  18814. */
  18815. if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
  18816. (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
  18817. (rgn23_data[offset + 3] != 0)) {
  18818. offset += rgn23_data[offset + 1] * 4 + 4;
  18819. continue;
  18820. }
  18821. /* Driver found a driver specific TLV in the config region */
  18822. sub_tlv_len = rgn23_data[offset + 1] * 4;
  18823. offset += 4;
  18824. tlv_offset = 0;
  18825. /*
  18826. * Search for configured port state sub-TLV.
  18827. */
  18828. while ((offset < data_size) &&
  18829. (tlv_offset < sub_tlv_len)) {
  18830. if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
  18831. offset += 4;
  18832. tlv_offset += 4;
  18833. break;
  18834. }
  18835. if (rgn23_data[offset] != PORT_STE_TYPE) {
  18836. offset += rgn23_data[offset + 1] * 4 + 4;
  18837. tlv_offset += rgn23_data[offset + 1] * 4 + 4;
  18838. continue;
  18839. }
  18840. /* This HBA contains PORT_STE configured */
  18841. if (!rgn23_data[offset + 2])
  18842. set_bit(LINK_DISABLED, &phba->hba_flag);
  18843. goto out;
  18844. }
  18845. }
  18846. out:
  18847. kfree(rgn23_data);
  18848. return;
  18849. }
  18850. /**
  18851. * lpfc_log_fw_write_cmpl - logs firmware write completion status
  18852. * @phba: pointer to lpfc hba data structure
  18853. * @shdr_status: wr_object rsp's status field
  18854. * @shdr_add_status: wr_object rsp's add_status field
  18855. * @shdr_add_status_2: wr_object rsp's add_status_2 field
  18856. * @shdr_change_status: wr_object rsp's change_status field
  18857. * @shdr_csf: wr_object rsp's csf bit
  18858. *
  18859. * This routine is intended to be called after a firmware write completes.
  18860. * It will log next action items to be performed by the user to instantiate
  18861. * the newly downloaded firmware or reason for incompatibility.
  18862. **/
  18863. static void
  18864. lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
  18865. u32 shdr_add_status, u32 shdr_add_status_2,
  18866. u32 shdr_change_status, u32 shdr_csf)
  18867. {
  18868. lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
  18869. "4198 %s: flash_id x%02x, asic_rev x%02x, "
  18870. "status x%02x, add_status x%02x, add_status_2 x%02x, "
  18871. "change_status x%02x, csf %01x\n", __func__,
  18872. phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
  18873. shdr_status, shdr_add_status, shdr_add_status_2,
  18874. shdr_change_status, shdr_csf);
  18875. if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
  18876. switch (shdr_add_status_2) {
  18877. case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
  18878. lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
  18879. "4199 Firmware write failed: "
  18880. "image incompatible with flash x%02x\n",
  18881. phba->sli4_hba.flash_id);
  18882. break;
  18883. case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
  18884. lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
  18885. "4200 Firmware write failed: "
  18886. "image incompatible with ASIC "
  18887. "architecture x%02x\n",
  18888. phba->sli4_hba.asic_rev);
  18889. break;
  18890. default:
  18891. lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
  18892. "4210 Firmware write failed: "
  18893. "add_status_2 x%02x\n",
  18894. shdr_add_status_2);
  18895. break;
  18896. }
  18897. } else if (!shdr_status && !shdr_add_status) {
  18898. if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
  18899. shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
  18900. if (shdr_csf)
  18901. shdr_change_status =
  18902. LPFC_CHANGE_STATUS_PCI_RESET;
  18903. }
  18904. switch (shdr_change_status) {
  18905. case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
  18906. lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
  18907. "3198 Firmware write complete: System "
  18908. "reboot required to instantiate\n");
  18909. break;
  18910. case (LPFC_CHANGE_STATUS_FW_RESET):
  18911. lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
  18912. "3199 Firmware write complete: "
  18913. "Firmware reset required to "
  18914. "instantiate\n");
  18915. break;
  18916. case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
  18917. lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
  18918. "3200 Firmware write complete: Port "
  18919. "Migration or PCI Reset required to "
  18920. "instantiate\n");
  18921. break;
  18922. case (LPFC_CHANGE_STATUS_PCI_RESET):
  18923. lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
  18924. "3201 Firmware write complete: PCI "
  18925. "Reset required to instantiate\n");
  18926. break;
  18927. default:
  18928. break;
  18929. }
  18930. }
  18931. }
  18932. /**
  18933. * lpfc_wr_object - write an object to the firmware
  18934. * @phba: HBA structure that indicates port to create a queue on.
  18935. * @dmabuf_list: list of dmabufs to write to the port.
  18936. * @size: the total byte value of the objects to write to the port.
  18937. * @offset: the current offset to be used to start the transfer.
  18938. *
  18939. * This routine will create a wr_object mailbox command to send to the port.
  18940. * the mailbox command will be constructed using the dma buffers described in
  18941. * @dmabuf_list to create a list of BDEs. This routine will fill in as many
  18942. * BDEs that the imbedded mailbox can support. The @offset variable will be
  18943. * used to indicate the starting offset of the transfer and will also return
  18944. * the offset after the write object mailbox has completed. @size is used to
  18945. * determine the end of the object and whether the eof bit should be set.
  18946. *
  18947. * Return 0 is successful and offset will contain the new offset to use
  18948. * for the next write.
  18949. * Return negative value for error cases.
  18950. **/
  18951. int
  18952. lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
  18953. uint32_t size, uint32_t *offset)
  18954. {
  18955. struct lpfc_mbx_wr_object *wr_object;
  18956. LPFC_MBOXQ_t *mbox;
  18957. int rc = 0, i = 0;
  18958. int mbox_status = 0;
  18959. uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
  18960. uint32_t shdr_change_status = 0, shdr_csf = 0;
  18961. uint32_t mbox_tmo;
  18962. struct lpfc_dmabuf *dmabuf;
  18963. uint32_t written = 0;
  18964. bool check_change_status = false;
  18965. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  18966. if (!mbox)
  18967. return -ENOMEM;
  18968. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  18969. LPFC_MBOX_OPCODE_WRITE_OBJECT,
  18970. sizeof(struct lpfc_mbx_wr_object) -
  18971. sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
  18972. wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
  18973. wr_object->u.request.write_offset = *offset;
  18974. sprintf((uint8_t *)wr_object->u.request.object_name, "/");
  18975. wr_object->u.request.object_name[0] =
  18976. cpu_to_le32(wr_object->u.request.object_name[0]);
  18977. bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
  18978. list_for_each_entry(dmabuf, dmabuf_list, list) {
  18979. if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
  18980. break;
  18981. wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
  18982. wr_object->u.request.bde[i].addrHigh =
  18983. putPaddrHigh(dmabuf->phys);
  18984. if (written + SLI4_PAGE_SIZE >= size) {
  18985. wr_object->u.request.bde[i].tus.f.bdeSize =
  18986. (size - written);
  18987. written += (size - written);
  18988. bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
  18989. bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
  18990. check_change_status = true;
  18991. } else {
  18992. wr_object->u.request.bde[i].tus.f.bdeSize =
  18993. SLI4_PAGE_SIZE;
  18994. written += SLI4_PAGE_SIZE;
  18995. }
  18996. i++;
  18997. }
  18998. wr_object->u.request.bde_count = i;
  18999. bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
  19000. if (!phba->sli4_hba.intr_enable)
  19001. mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  19002. else {
  19003. mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
  19004. mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
  19005. }
  19006. /* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */
  19007. rc = mbox_status;
  19008. /* The IOCTL status is embedded in the mailbox subheader. */
  19009. shdr_status = bf_get(lpfc_mbox_hdr_status,
  19010. &wr_object->header.cfg_shdr.response);
  19011. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
  19012. &wr_object->header.cfg_shdr.response);
  19013. shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
  19014. &wr_object->header.cfg_shdr.response);
  19015. if (check_change_status) {
  19016. shdr_change_status = bf_get(lpfc_wr_object_change_status,
  19017. &wr_object->u.response);
  19018. shdr_csf = bf_get(lpfc_wr_object_csf,
  19019. &wr_object->u.response);
  19020. }
  19021. if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
  19022. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19023. "3025 Write Object mailbox failed with "
  19024. "status x%x add_status x%x, add_status_2 x%x, "
  19025. "mbx status x%x\n",
  19026. shdr_status, shdr_add_status, shdr_add_status_2,
  19027. rc);
  19028. rc = -ENXIO;
  19029. *offset = shdr_add_status;
  19030. } else {
  19031. *offset += wr_object->u.response.actual_write_length;
  19032. }
  19033. if (rc || check_change_status)
  19034. lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
  19035. shdr_add_status_2, shdr_change_status,
  19036. shdr_csf);
  19037. if (!phba->sli4_hba.intr_enable)
  19038. mempool_free(mbox, phba->mbox_mem_pool);
  19039. else if (mbox_status != MBX_TIMEOUT)
  19040. mempool_free(mbox, phba->mbox_mem_pool);
  19041. return rc;
  19042. }
  19043. /**
  19044. * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
  19045. * @vport: pointer to vport data structure.
  19046. *
  19047. * This function iterate through the mailboxq and clean up all REG_LOGIN
  19048. * and REG_VPI mailbox commands associated with the vport. This function
  19049. * is called when driver want to restart discovery of the vport due to
  19050. * a Clear Virtual Link event.
  19051. **/
  19052. void
  19053. lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
  19054. {
  19055. struct lpfc_hba *phba = vport->phba;
  19056. LPFC_MBOXQ_t *mb, *nextmb;
  19057. struct lpfc_nodelist *ndlp;
  19058. struct lpfc_nodelist *act_mbx_ndlp = NULL;
  19059. LIST_HEAD(mbox_cmd_list);
  19060. uint8_t restart_loop;
  19061. /* Clean up internally queued mailbox commands with the vport */
  19062. spin_lock_irq(&phba->hbalock);
  19063. list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
  19064. if (mb->vport != vport)
  19065. continue;
  19066. if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
  19067. (mb->u.mb.mbxCommand != MBX_REG_VPI))
  19068. continue;
  19069. list_move_tail(&mb->list, &mbox_cmd_list);
  19070. }
  19071. /* Clean up active mailbox command with the vport */
  19072. mb = phba->sli.mbox_active;
  19073. if (mb && (mb->vport == vport)) {
  19074. if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
  19075. (mb->u.mb.mbxCommand == MBX_REG_VPI))
  19076. mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  19077. if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
  19078. act_mbx_ndlp = mb->ctx_ndlp;
  19079. /* This reference is local to this routine. The
  19080. * reference is removed at routine exit.
  19081. */
  19082. act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
  19083. /* Unregister the RPI when mailbox complete */
  19084. mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
  19085. }
  19086. }
  19087. /* Cleanup any mailbox completions which are not yet processed */
  19088. do {
  19089. restart_loop = 0;
  19090. list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
  19091. /*
  19092. * If this mailox is already processed or it is
  19093. * for another vport ignore it.
  19094. */
  19095. if ((mb->vport != vport) ||
  19096. (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
  19097. continue;
  19098. if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
  19099. (mb->u.mb.mbxCommand != MBX_REG_VPI))
  19100. continue;
  19101. mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  19102. if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
  19103. ndlp = mb->ctx_ndlp;
  19104. /* Unregister the RPI when mailbox complete */
  19105. mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
  19106. restart_loop = 1;
  19107. spin_unlock_irq(&phba->hbalock);
  19108. spin_lock(&ndlp->lock);
  19109. ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
  19110. spin_unlock(&ndlp->lock);
  19111. spin_lock_irq(&phba->hbalock);
  19112. break;
  19113. }
  19114. }
  19115. } while (restart_loop);
  19116. spin_unlock_irq(&phba->hbalock);
  19117. /* Release the cleaned-up mailbox commands */
  19118. while (!list_empty(&mbox_cmd_list)) {
  19119. list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
  19120. if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
  19121. ndlp = mb->ctx_ndlp;
  19122. mb->ctx_ndlp = NULL;
  19123. if (ndlp) {
  19124. spin_lock(&ndlp->lock);
  19125. ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
  19126. spin_unlock(&ndlp->lock);
  19127. lpfc_nlp_put(ndlp);
  19128. }
  19129. }
  19130. lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
  19131. }
  19132. /* Release the ndlp with the cleaned-up active mailbox command */
  19133. if (act_mbx_ndlp) {
  19134. spin_lock(&act_mbx_ndlp->lock);
  19135. act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
  19136. spin_unlock(&act_mbx_ndlp->lock);
  19137. lpfc_nlp_put(act_mbx_ndlp);
  19138. }
  19139. }
  19140. /**
  19141. * lpfc_drain_txq - Drain the txq
  19142. * @phba: Pointer to HBA context object.
  19143. *
  19144. * This function attempt to submit IOCBs on the txq
  19145. * to the adapter. For SLI4 adapters, the txq contains
  19146. * ELS IOCBs that have been deferred because the there
  19147. * are no SGLs. This congestion can occur with large
  19148. * vport counts during node discovery.
  19149. **/
  19150. uint32_t
  19151. lpfc_drain_txq(struct lpfc_hba *phba)
  19152. {
  19153. LIST_HEAD(completions);
  19154. struct lpfc_sli_ring *pring;
  19155. struct lpfc_iocbq *piocbq = NULL;
  19156. unsigned long iflags = 0;
  19157. char *fail_msg = NULL;
  19158. uint32_t txq_cnt = 0;
  19159. struct lpfc_queue *wq;
  19160. int ret = 0;
  19161. if (phba->link_flag & LS_MDS_LOOPBACK) {
  19162. /* MDS WQE are posted only to first WQ*/
  19163. wq = phba->sli4_hba.hdwq[0].io_wq;
  19164. if (unlikely(!wq))
  19165. return 0;
  19166. pring = wq->pring;
  19167. } else {
  19168. wq = phba->sli4_hba.els_wq;
  19169. if (unlikely(!wq))
  19170. return 0;
  19171. pring = lpfc_phba_elsring(phba);
  19172. }
  19173. if (unlikely(!pring) || list_empty(&pring->txq))
  19174. return 0;
  19175. spin_lock_irqsave(&pring->ring_lock, iflags);
  19176. list_for_each_entry(piocbq, &pring->txq, list) {
  19177. txq_cnt++;
  19178. }
  19179. if (txq_cnt > pring->txq_max)
  19180. pring->txq_max = txq_cnt;
  19181. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  19182. while (!list_empty(&pring->txq)) {
  19183. spin_lock_irqsave(&pring->ring_lock, iflags);
  19184. piocbq = lpfc_sli_ringtx_get(phba, pring);
  19185. if (!piocbq) {
  19186. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  19187. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19188. "2823 txq empty and txq_cnt is %d\n",
  19189. txq_cnt);
  19190. break;
  19191. }
  19192. txq_cnt--;
  19193. ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
  19194. if (ret && ret != IOCB_BUSY) {
  19195. fail_msg = " - Cannot send IO ";
  19196. piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
  19197. }
  19198. if (fail_msg) {
  19199. piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
  19200. /* Failed means we can't issue and need to cancel */
  19201. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  19202. "2822 IOCB failed %s iotag 0x%x "
  19203. "xri 0x%x %d flg x%x\n",
  19204. fail_msg, piocbq->iotag,
  19205. piocbq->sli4_xritag, ret,
  19206. piocbq->cmd_flag);
  19207. list_add_tail(&piocbq->list, &completions);
  19208. fail_msg = NULL;
  19209. }
  19210. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  19211. if (txq_cnt == 0 || ret == IOCB_BUSY)
  19212. break;
  19213. }
  19214. /* Cancel all the IOCBs that cannot be issued */
  19215. lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
  19216. IOERR_SLI_ABORTED);
  19217. return txq_cnt;
  19218. }
  19219. /**
  19220. * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
  19221. * @phba: Pointer to HBA context object.
  19222. * @pwqeq: Pointer to command WQE.
  19223. * @sglq: Pointer to the scatter gather queue object.
  19224. *
  19225. * This routine converts the bpl or bde that is in the WQE
  19226. * to a sgl list for the sli4 hardware. The physical address
  19227. * of the bpl/bde is converted back to a virtual address.
  19228. * If the WQE contains a BPL then the list of BDE's is
  19229. * converted to sli4_sge's. If the WQE contains a single
  19230. * BDE then it is converted to a single sli_sge.
  19231. * The WQE is still in cpu endianness so the contents of
  19232. * the bpl can be used without byte swapping.
  19233. *
  19234. * Returns valid XRI = Success, NO_XRI = Failure.
  19235. */
  19236. static uint16_t
  19237. lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
  19238. struct lpfc_sglq *sglq)
  19239. {
  19240. uint16_t xritag = NO_XRI;
  19241. struct ulp_bde64 *bpl = NULL;
  19242. struct ulp_bde64 bde;
  19243. struct sli4_sge *sgl = NULL;
  19244. struct lpfc_dmabuf *dmabuf;
  19245. union lpfc_wqe128 *wqe;
  19246. int numBdes = 0;
  19247. int i = 0;
  19248. uint32_t offset = 0; /* accumulated offset in the sg request list */
  19249. int inbound = 0; /* number of sg reply entries inbound from firmware */
  19250. uint32_t cmd;
  19251. if (!pwqeq || !sglq)
  19252. return xritag;
  19253. sgl = (struct sli4_sge *)sglq->sgl;
  19254. wqe = &pwqeq->wqe;
  19255. pwqeq->iocb.ulpIoTag = pwqeq->iotag;
  19256. cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
  19257. if (cmd == CMD_XMIT_BLS_RSP64_WQE)
  19258. return sglq->sli4_xritag;
  19259. numBdes = pwqeq->num_bdes;
  19260. if (numBdes) {
  19261. /* The addrHigh and addrLow fields within the WQE
  19262. * have not been byteswapped yet so there is no
  19263. * need to swap them back.
  19264. */
  19265. if (pwqeq->bpl_dmabuf)
  19266. dmabuf = pwqeq->bpl_dmabuf;
  19267. else
  19268. return xritag;
  19269. bpl = (struct ulp_bde64 *)dmabuf->virt;
  19270. if (!bpl)
  19271. return xritag;
  19272. for (i = 0; i < numBdes; i++) {
  19273. /* Should already be byte swapped. */
  19274. sgl->addr_hi = bpl->addrHigh;
  19275. sgl->addr_lo = bpl->addrLow;
  19276. sgl->word2 = le32_to_cpu(sgl->word2);
  19277. if ((i+1) == numBdes)
  19278. bf_set(lpfc_sli4_sge_last, sgl, 1);
  19279. else
  19280. bf_set(lpfc_sli4_sge_last, sgl, 0);
  19281. /* swap the size field back to the cpu so we
  19282. * can assign it to the sgl.
  19283. */
  19284. bde.tus.w = le32_to_cpu(bpl->tus.w);
  19285. sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
  19286. /* The offsets in the sgl need to be accumulated
  19287. * separately for the request and reply lists.
  19288. * The request is always first, the reply follows.
  19289. */
  19290. switch (cmd) {
  19291. case CMD_GEN_REQUEST64_WQE:
  19292. /* add up the reply sg entries */
  19293. if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
  19294. inbound++;
  19295. /* first inbound? reset the offset */
  19296. if (inbound == 1)
  19297. offset = 0;
  19298. bf_set(lpfc_sli4_sge_offset, sgl, offset);
  19299. bf_set(lpfc_sli4_sge_type, sgl,
  19300. LPFC_SGE_TYPE_DATA);
  19301. offset += bde.tus.f.bdeSize;
  19302. break;
  19303. case CMD_FCP_TRSP64_WQE:
  19304. bf_set(lpfc_sli4_sge_offset, sgl, 0);
  19305. bf_set(lpfc_sli4_sge_type, sgl,
  19306. LPFC_SGE_TYPE_DATA);
  19307. break;
  19308. case CMD_FCP_TSEND64_WQE:
  19309. case CMD_FCP_TRECEIVE64_WQE:
  19310. bf_set(lpfc_sli4_sge_type, sgl,
  19311. bpl->tus.f.bdeFlags);
  19312. if (i < 3)
  19313. offset = 0;
  19314. else
  19315. offset += bde.tus.f.bdeSize;
  19316. bf_set(lpfc_sli4_sge_offset, sgl, offset);
  19317. break;
  19318. }
  19319. sgl->word2 = cpu_to_le32(sgl->word2);
  19320. bpl++;
  19321. sgl++;
  19322. }
  19323. } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
  19324. /* The addrHigh and addrLow fields of the BDE have not
  19325. * been byteswapped yet so they need to be swapped
  19326. * before putting them in the sgl.
  19327. */
  19328. sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
  19329. sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
  19330. sgl->word2 = le32_to_cpu(sgl->word2);
  19331. bf_set(lpfc_sli4_sge_last, sgl, 1);
  19332. sgl->word2 = cpu_to_le32(sgl->word2);
  19333. sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
  19334. }
  19335. return sglq->sli4_xritag;
  19336. }
  19337. /**
  19338. * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
  19339. * @phba: Pointer to HBA context object.
  19340. * @qp: Pointer to HDW queue.
  19341. * @pwqe: Pointer to command WQE.
  19342. **/
  19343. int
  19344. lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
  19345. struct lpfc_iocbq *pwqe)
  19346. {
  19347. union lpfc_wqe128 *wqe = &pwqe->wqe;
  19348. struct lpfc_async_xchg_ctx *ctxp;
  19349. struct lpfc_queue *wq;
  19350. struct lpfc_sglq *sglq;
  19351. struct lpfc_sli_ring *pring;
  19352. unsigned long iflags;
  19353. uint32_t ret = 0;
  19354. /* NVME_LS and NVME_LS ABTS requests. */
  19355. if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
  19356. pring = phba->sli4_hba.nvmels_wq->pring;
  19357. lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
  19358. qp, wq_access);
  19359. sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
  19360. if (!sglq) {
  19361. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  19362. return WQE_BUSY;
  19363. }
  19364. pwqe->sli4_lxritag = sglq->sli4_lxritag;
  19365. pwqe->sli4_xritag = sglq->sli4_xritag;
  19366. if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
  19367. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  19368. return WQE_ERROR;
  19369. }
  19370. bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
  19371. pwqe->sli4_xritag);
  19372. ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
  19373. if (ret) {
  19374. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  19375. return ret;
  19376. }
  19377. lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
  19378. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  19379. lpfc_sli4_poll_eq(qp->hba_eq);
  19380. return 0;
  19381. }
  19382. /* NVME_FCREQ and NVME_ABTS requests */
  19383. if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
  19384. /* Get the IO distribution (hba_wqidx) for WQ assignment. */
  19385. wq = qp->io_wq;
  19386. pring = wq->pring;
  19387. bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
  19388. lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
  19389. qp, wq_access);
  19390. ret = lpfc_sli4_wq_put(wq, wqe);
  19391. if (ret) {
  19392. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  19393. return ret;
  19394. }
  19395. lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
  19396. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  19397. lpfc_sli4_poll_eq(qp->hba_eq);
  19398. return 0;
  19399. }
  19400. /* NVMET requests */
  19401. if (pwqe->cmd_flag & LPFC_IO_NVMET) {
  19402. /* Get the IO distribution (hba_wqidx) for WQ assignment. */
  19403. wq = qp->io_wq;
  19404. pring = wq->pring;
  19405. ctxp = pwqe->context_un.axchg;
  19406. sglq = ctxp->ctxbuf->sglq;
  19407. if (pwqe->sli4_xritag == NO_XRI) {
  19408. pwqe->sli4_lxritag = sglq->sli4_lxritag;
  19409. pwqe->sli4_xritag = sglq->sli4_xritag;
  19410. }
  19411. bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
  19412. pwqe->sli4_xritag);
  19413. bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
  19414. lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
  19415. qp, wq_access);
  19416. ret = lpfc_sli4_wq_put(wq, wqe);
  19417. if (ret) {
  19418. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  19419. return ret;
  19420. }
  19421. lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
  19422. spin_unlock_irqrestore(&pring->ring_lock, iflags);
  19423. lpfc_sli4_poll_eq(qp->hba_eq);
  19424. return 0;
  19425. }
  19426. return WQE_ERROR;
  19427. }
  19428. /**
  19429. * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
  19430. * @phba: Pointer to HBA context object.
  19431. * @cmdiocb: Pointer to driver command iocb object.
  19432. * @cmpl: completion function.
  19433. *
  19434. * Fill the appropriate fields for the abort WQE and call
  19435. * internal routine lpfc_sli4_issue_wqe to send the WQE
  19436. * This function is called with hbalock held and no ring_lock held.
  19437. *
  19438. * RETURNS 0 - SUCCESS
  19439. **/
  19440. int
  19441. lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  19442. void *cmpl)
  19443. {
  19444. struct lpfc_vport *vport = cmdiocb->vport;
  19445. struct lpfc_iocbq *abtsiocb = NULL;
  19446. union lpfc_wqe128 *abtswqe;
  19447. struct lpfc_io_buf *lpfc_cmd;
  19448. int retval = IOCB_ERROR;
  19449. u16 xritag = cmdiocb->sli4_xritag;
  19450. /*
  19451. * The scsi command can not be in txq and it is in flight because the
  19452. * pCmd is still pointing at the SCSI command we have to abort. There
  19453. * is no need to search the txcmplq. Just send an abort to the FW.
  19454. */
  19455. abtsiocb = __lpfc_sli_get_iocbq(phba);
  19456. if (!abtsiocb)
  19457. return WQE_NORESOURCE;
  19458. /* Indicate the IO is being aborted by the driver. */
  19459. cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
  19460. abtswqe = &abtsiocb->wqe;
  19461. memset(abtswqe, 0, sizeof(*abtswqe));
  19462. if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
  19463. bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
  19464. bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
  19465. abtswqe->abort_cmd.rsrvd5 = 0;
  19466. abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
  19467. bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
  19468. bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
  19469. bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
  19470. bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
  19471. bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
  19472. bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
  19473. /* ABTS WQE must go to the same WQ as the WQE to be aborted */
  19474. abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
  19475. abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
  19476. if (cmdiocb->cmd_flag & LPFC_IO_FCP)
  19477. abtsiocb->cmd_flag |= LPFC_IO_FCP;
  19478. if (cmdiocb->cmd_flag & LPFC_IO_NVME)
  19479. abtsiocb->cmd_flag |= LPFC_IO_NVME;
  19480. if (cmdiocb->cmd_flag & LPFC_IO_FOF)
  19481. abtsiocb->cmd_flag |= LPFC_IO_FOF;
  19482. abtsiocb->vport = vport;
  19483. abtsiocb->cmd_cmpl = cmpl;
  19484. lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
  19485. retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
  19486. lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
  19487. "0359 Abort xri x%x, original iotag x%x, "
  19488. "abort cmd iotag x%x retval x%x\n",
  19489. xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
  19490. if (retval) {
  19491. cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
  19492. __lpfc_sli_release_iocbq(phba, abtsiocb);
  19493. }
  19494. return retval;
  19495. }
  19496. #ifdef LPFC_MXP_STAT
  19497. /**
  19498. * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
  19499. * @phba: pointer to lpfc hba data structure.
  19500. * @hwqid: belong to which HWQ.
  19501. *
  19502. * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
  19503. * 15 seconds after a test case is running.
  19504. *
  19505. * The user should call lpfc_debugfs_multixripools_write before running a test
  19506. * case to clear stat_snapshot_taken. Then the user starts a test case. During
  19507. * test case is running, stat_snapshot_taken is incremented by 1 every time when
  19508. * this routine is called from heartbeat timer. When stat_snapshot_taken is
  19509. * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
  19510. **/
  19511. void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
  19512. {
  19513. struct lpfc_sli4_hdw_queue *qp;
  19514. struct lpfc_multixri_pool *multixri_pool;
  19515. struct lpfc_pvt_pool *pvt_pool;
  19516. struct lpfc_pbl_pool *pbl_pool;
  19517. u32 txcmplq_cnt;
  19518. qp = &phba->sli4_hba.hdwq[hwqid];
  19519. multixri_pool = qp->p_multixri_pool;
  19520. if (!multixri_pool)
  19521. return;
  19522. if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
  19523. pvt_pool = &qp->p_multixri_pool->pvt_pool;
  19524. pbl_pool = &qp->p_multixri_pool->pbl_pool;
  19525. txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
  19526. multixri_pool->stat_pbl_count = pbl_pool->count;
  19527. multixri_pool->stat_pvt_count = pvt_pool->count;
  19528. multixri_pool->stat_busy_count = txcmplq_cnt;
  19529. }
  19530. multixri_pool->stat_snapshot_taken++;
  19531. }
  19532. #endif
  19533. /**
  19534. * lpfc_adjust_pvt_pool_count - Adjust private pool count
  19535. * @phba: pointer to lpfc hba data structure.
  19536. * @hwqid: belong to which HWQ.
  19537. *
  19538. * This routine moves some XRIs from private to public pool when private pool
  19539. * is not busy.
  19540. **/
  19541. void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
  19542. {
  19543. struct lpfc_multixri_pool *multixri_pool;
  19544. u32 io_req_count;
  19545. u32 prev_io_req_count;
  19546. multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
  19547. if (!multixri_pool)
  19548. return;
  19549. io_req_count = multixri_pool->io_req_count;
  19550. prev_io_req_count = multixri_pool->prev_io_req_count;
  19551. if (prev_io_req_count != io_req_count) {
  19552. /* Private pool is busy */
  19553. multixri_pool->prev_io_req_count = io_req_count;
  19554. } else {
  19555. /* Private pool is not busy.
  19556. * Move XRIs from private to public pool.
  19557. */
  19558. lpfc_move_xri_pvt_to_pbl(phba, hwqid);
  19559. }
  19560. }
  19561. /**
  19562. * lpfc_adjust_high_watermark - Adjust high watermark
  19563. * @phba: pointer to lpfc hba data structure.
  19564. * @hwqid: belong to which HWQ.
  19565. *
  19566. * This routine sets high watermark as number of outstanding XRIs,
  19567. * but make sure the new value is between xri_limit/2 and xri_limit.
  19568. **/
  19569. void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
  19570. {
  19571. u32 new_watermark;
  19572. u32 watermark_max;
  19573. u32 watermark_min;
  19574. u32 xri_limit;
  19575. u32 txcmplq_cnt;
  19576. u32 abts_io_bufs;
  19577. struct lpfc_multixri_pool *multixri_pool;
  19578. struct lpfc_sli4_hdw_queue *qp;
  19579. qp = &phba->sli4_hba.hdwq[hwqid];
  19580. multixri_pool = qp->p_multixri_pool;
  19581. if (!multixri_pool)
  19582. return;
  19583. xri_limit = multixri_pool->xri_limit;
  19584. watermark_max = xri_limit;
  19585. watermark_min = xri_limit / 2;
  19586. txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
  19587. abts_io_bufs = qp->abts_scsi_io_bufs;
  19588. abts_io_bufs += qp->abts_nvme_io_bufs;
  19589. new_watermark = txcmplq_cnt + abts_io_bufs;
  19590. new_watermark = min(watermark_max, new_watermark);
  19591. new_watermark = max(watermark_min, new_watermark);
  19592. multixri_pool->pvt_pool.high_watermark = new_watermark;
  19593. #ifdef LPFC_MXP_STAT
  19594. multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
  19595. new_watermark);
  19596. #endif
  19597. }
  19598. /**
  19599. * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
  19600. * @phba: pointer to lpfc hba data structure.
  19601. * @hwqid: belong to which HWQ.
  19602. *
  19603. * This routine is called from hearbeat timer when pvt_pool is idle.
  19604. * All free XRIs are moved from private to public pool on hwqid with 2 steps.
  19605. * The first step moves (all - low_watermark) amount of XRIs.
  19606. * The second step moves the rest of XRIs.
  19607. **/
  19608. void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
  19609. {
  19610. struct lpfc_pbl_pool *pbl_pool;
  19611. struct lpfc_pvt_pool *pvt_pool;
  19612. struct lpfc_sli4_hdw_queue *qp;
  19613. struct lpfc_io_buf *lpfc_ncmd;
  19614. struct lpfc_io_buf *lpfc_ncmd_next;
  19615. unsigned long iflag;
  19616. struct list_head tmp_list;
  19617. u32 tmp_count;
  19618. qp = &phba->sli4_hba.hdwq[hwqid];
  19619. pbl_pool = &qp->p_multixri_pool->pbl_pool;
  19620. pvt_pool = &qp->p_multixri_pool->pvt_pool;
  19621. tmp_count = 0;
  19622. lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
  19623. lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
  19624. if (pvt_pool->count > pvt_pool->low_watermark) {
  19625. /* Step 1: move (all - low_watermark) from pvt_pool
  19626. * to pbl_pool
  19627. */
  19628. /* Move low watermark of bufs from pvt_pool to tmp_list */
  19629. INIT_LIST_HEAD(&tmp_list);
  19630. list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  19631. &pvt_pool->list, list) {
  19632. list_move_tail(&lpfc_ncmd->list, &tmp_list);
  19633. tmp_count++;
  19634. if (tmp_count >= pvt_pool->low_watermark)
  19635. break;
  19636. }
  19637. /* Move all bufs from pvt_pool to pbl_pool */
  19638. list_splice_init(&pvt_pool->list, &pbl_pool->list);
  19639. /* Move all bufs from tmp_list to pvt_pool */
  19640. list_splice(&tmp_list, &pvt_pool->list);
  19641. pbl_pool->count += (pvt_pool->count - tmp_count);
  19642. pvt_pool->count = tmp_count;
  19643. } else {
  19644. /* Step 2: move the rest from pvt_pool to pbl_pool */
  19645. list_splice_init(&pvt_pool->list, &pbl_pool->list);
  19646. pbl_pool->count += pvt_pool->count;
  19647. pvt_pool->count = 0;
  19648. }
  19649. spin_unlock(&pvt_pool->lock);
  19650. spin_unlock_irqrestore(&pbl_pool->lock, iflag);
  19651. }
  19652. /**
  19653. * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
  19654. * @phba: pointer to lpfc hba data structure
  19655. * @qp: pointer to HDW queue
  19656. * @pbl_pool: specified public free XRI pool
  19657. * @pvt_pool: specified private free XRI pool
  19658. * @count: number of XRIs to move
  19659. *
  19660. * This routine tries to move some free common bufs from the specified pbl_pool
  19661. * to the specified pvt_pool. It might move less than count XRIs if there's not
  19662. * enough in public pool.
  19663. *
  19664. * Return:
  19665. * true - if XRIs are successfully moved from the specified pbl_pool to the
  19666. * specified pvt_pool
  19667. * false - if the specified pbl_pool is empty or locked by someone else
  19668. **/
  19669. static bool
  19670. _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
  19671. struct lpfc_pbl_pool *pbl_pool,
  19672. struct lpfc_pvt_pool *pvt_pool, u32 count)
  19673. {
  19674. struct lpfc_io_buf *lpfc_ncmd;
  19675. struct lpfc_io_buf *lpfc_ncmd_next;
  19676. unsigned long iflag;
  19677. int ret;
  19678. ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
  19679. if (ret) {
  19680. if (pbl_pool->count) {
  19681. /* Move a batch of XRIs from public to private pool */
  19682. lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
  19683. list_for_each_entry_safe(lpfc_ncmd,
  19684. lpfc_ncmd_next,
  19685. &pbl_pool->list,
  19686. list) {
  19687. list_move_tail(&lpfc_ncmd->list,
  19688. &pvt_pool->list);
  19689. pvt_pool->count++;
  19690. pbl_pool->count--;
  19691. count--;
  19692. if (count == 0)
  19693. break;
  19694. }
  19695. spin_unlock(&pvt_pool->lock);
  19696. spin_unlock_irqrestore(&pbl_pool->lock, iflag);
  19697. return true;
  19698. }
  19699. spin_unlock_irqrestore(&pbl_pool->lock, iflag);
  19700. }
  19701. return false;
  19702. }
  19703. /**
  19704. * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
  19705. * @phba: pointer to lpfc hba data structure.
  19706. * @hwqid: belong to which HWQ.
  19707. * @count: number of XRIs to move
  19708. *
  19709. * This routine tries to find some free common bufs in one of public pools with
  19710. * Round Robin method. The search always starts from local hwqid, then the next
  19711. * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
  19712. * a batch of free common bufs are moved to private pool on hwqid.
  19713. * It might move less than count XRIs if there's not enough in public pool.
  19714. **/
  19715. void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
  19716. {
  19717. struct lpfc_multixri_pool *multixri_pool;
  19718. struct lpfc_multixri_pool *next_multixri_pool;
  19719. struct lpfc_pvt_pool *pvt_pool;
  19720. struct lpfc_pbl_pool *pbl_pool;
  19721. struct lpfc_sli4_hdw_queue *qp;
  19722. u32 next_hwqid;
  19723. u32 hwq_count;
  19724. int ret;
  19725. qp = &phba->sli4_hba.hdwq[hwqid];
  19726. multixri_pool = qp->p_multixri_pool;
  19727. pvt_pool = &multixri_pool->pvt_pool;
  19728. pbl_pool = &multixri_pool->pbl_pool;
  19729. /* Check if local pbl_pool is available */
  19730. ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
  19731. if (ret) {
  19732. #ifdef LPFC_MXP_STAT
  19733. multixri_pool->local_pbl_hit_count++;
  19734. #endif
  19735. return;
  19736. }
  19737. hwq_count = phba->cfg_hdw_queue;
  19738. /* Get the next hwqid which was found last time */
  19739. next_hwqid = multixri_pool->rrb_next_hwqid;
  19740. do {
  19741. /* Go to next hwq */
  19742. next_hwqid = (next_hwqid + 1) % hwq_count;
  19743. next_multixri_pool =
  19744. phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
  19745. pbl_pool = &next_multixri_pool->pbl_pool;
  19746. /* Check if the public free xri pool is available */
  19747. ret = _lpfc_move_xri_pbl_to_pvt(
  19748. phba, qp, pbl_pool, pvt_pool, count);
  19749. /* Exit while-loop if success or all hwqid are checked */
  19750. } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
  19751. /* Starting point for the next time */
  19752. multixri_pool->rrb_next_hwqid = next_hwqid;
  19753. if (!ret) {
  19754. /* stats: all public pools are empty*/
  19755. multixri_pool->pbl_empty_count++;
  19756. }
  19757. #ifdef LPFC_MXP_STAT
  19758. if (ret) {
  19759. if (next_hwqid == hwqid)
  19760. multixri_pool->local_pbl_hit_count++;
  19761. else
  19762. multixri_pool->other_pbl_hit_count++;
  19763. }
  19764. #endif
  19765. }
  19766. /**
  19767. * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
  19768. * @phba: pointer to lpfc hba data structure.
  19769. * @hwqid: belong to which HWQ.
  19770. *
  19771. * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
  19772. * low watermark.
  19773. **/
  19774. void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
  19775. {
  19776. struct lpfc_multixri_pool *multixri_pool;
  19777. struct lpfc_pvt_pool *pvt_pool;
  19778. multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
  19779. pvt_pool = &multixri_pool->pvt_pool;
  19780. if (pvt_pool->count < pvt_pool->low_watermark)
  19781. lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
  19782. }
  19783. /**
  19784. * lpfc_release_io_buf - Return one IO buf back to free pool
  19785. * @phba: pointer to lpfc hba data structure.
  19786. * @lpfc_ncmd: IO buf to be returned.
  19787. * @qp: belong to which HWQ.
  19788. *
  19789. * This routine returns one IO buf back to free pool. If this is an urgent IO,
  19790. * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
  19791. * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
  19792. * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
  19793. * lpfc_io_buf_list_put.
  19794. **/
  19795. void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
  19796. struct lpfc_sli4_hdw_queue *qp)
  19797. {
  19798. unsigned long iflag;
  19799. struct lpfc_pbl_pool *pbl_pool;
  19800. struct lpfc_pvt_pool *pvt_pool;
  19801. struct lpfc_epd_pool *epd_pool;
  19802. u32 txcmplq_cnt;
  19803. u32 xri_owned;
  19804. u32 xri_limit;
  19805. u32 abts_io_bufs;
  19806. /* MUST zero fields if buffer is reused by another protocol */
  19807. lpfc_ncmd->nvmeCmd = NULL;
  19808. lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
  19809. if (phba->cfg_xpsgl && !phba->nvmet_support &&
  19810. !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
  19811. lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
  19812. if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
  19813. lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
  19814. if (phba->cfg_xri_rebalancing) {
  19815. if (lpfc_ncmd->expedite) {
  19816. /* Return to expedite pool */
  19817. epd_pool = &phba->epd_pool;
  19818. spin_lock_irqsave(&epd_pool->lock, iflag);
  19819. list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
  19820. epd_pool->count++;
  19821. spin_unlock_irqrestore(&epd_pool->lock, iflag);
  19822. return;
  19823. }
  19824. /* Avoid invalid access if an IO sneaks in and is being rejected
  19825. * just _after_ xri pools are destroyed in lpfc_offline.
  19826. * Nothing much can be done at this point.
  19827. */
  19828. if (!qp->p_multixri_pool)
  19829. return;
  19830. pbl_pool = &qp->p_multixri_pool->pbl_pool;
  19831. pvt_pool = &qp->p_multixri_pool->pvt_pool;
  19832. txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
  19833. abts_io_bufs = qp->abts_scsi_io_bufs;
  19834. abts_io_bufs += qp->abts_nvme_io_bufs;
  19835. xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
  19836. xri_limit = qp->p_multixri_pool->xri_limit;
  19837. #ifdef LPFC_MXP_STAT
  19838. if (xri_owned <= xri_limit)
  19839. qp->p_multixri_pool->below_limit_count++;
  19840. else
  19841. qp->p_multixri_pool->above_limit_count++;
  19842. #endif
  19843. /* XRI goes to either public or private free xri pool
  19844. * based on watermark and xri_limit
  19845. */
  19846. if ((pvt_pool->count < pvt_pool->low_watermark) ||
  19847. (xri_owned < xri_limit &&
  19848. pvt_pool->count < pvt_pool->high_watermark)) {
  19849. lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
  19850. qp, free_pvt_pool);
  19851. list_add_tail(&lpfc_ncmd->list,
  19852. &pvt_pool->list);
  19853. pvt_pool->count++;
  19854. spin_unlock_irqrestore(&pvt_pool->lock, iflag);
  19855. } else {
  19856. lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
  19857. qp, free_pub_pool);
  19858. list_add_tail(&lpfc_ncmd->list,
  19859. &pbl_pool->list);
  19860. pbl_pool->count++;
  19861. spin_unlock_irqrestore(&pbl_pool->lock, iflag);
  19862. }
  19863. } else {
  19864. lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
  19865. qp, free_xri);
  19866. list_add_tail(&lpfc_ncmd->list,
  19867. &qp->lpfc_io_buf_list_put);
  19868. qp->put_io_bufs++;
  19869. spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
  19870. iflag);
  19871. }
  19872. }
  19873. /**
  19874. * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
  19875. * @phba: pointer to lpfc hba data structure.
  19876. * @qp: pointer to HDW queue
  19877. * @pvt_pool: pointer to private pool data structure.
  19878. * @ndlp: pointer to lpfc nodelist data structure.
  19879. *
  19880. * This routine tries to get one free IO buf from private pool.
  19881. *
  19882. * Return:
  19883. * pointer to one free IO buf - if private pool is not empty
  19884. * NULL - if private pool is empty
  19885. **/
  19886. static struct lpfc_io_buf *
  19887. lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
  19888. struct lpfc_sli4_hdw_queue *qp,
  19889. struct lpfc_pvt_pool *pvt_pool,
  19890. struct lpfc_nodelist *ndlp)
  19891. {
  19892. struct lpfc_io_buf *lpfc_ncmd;
  19893. struct lpfc_io_buf *lpfc_ncmd_next;
  19894. unsigned long iflag;
  19895. lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
  19896. list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  19897. &pvt_pool->list, list) {
  19898. if (lpfc_test_rrq_active(
  19899. phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
  19900. continue;
  19901. list_del(&lpfc_ncmd->list);
  19902. pvt_pool->count--;
  19903. spin_unlock_irqrestore(&pvt_pool->lock, iflag);
  19904. return lpfc_ncmd;
  19905. }
  19906. spin_unlock_irqrestore(&pvt_pool->lock, iflag);
  19907. return NULL;
  19908. }
  19909. /**
  19910. * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
  19911. * @phba: pointer to lpfc hba data structure.
  19912. *
  19913. * This routine tries to get one free IO buf from expedite pool.
  19914. *
  19915. * Return:
  19916. * pointer to one free IO buf - if expedite pool is not empty
  19917. * NULL - if expedite pool is empty
  19918. **/
  19919. static struct lpfc_io_buf *
  19920. lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
  19921. {
  19922. struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
  19923. struct lpfc_io_buf *lpfc_ncmd_next;
  19924. unsigned long iflag;
  19925. struct lpfc_epd_pool *epd_pool;
  19926. epd_pool = &phba->epd_pool;
  19927. spin_lock_irqsave(&epd_pool->lock, iflag);
  19928. if (epd_pool->count > 0) {
  19929. list_for_each_entry_safe(iter, lpfc_ncmd_next,
  19930. &epd_pool->list, list) {
  19931. list_del(&iter->list);
  19932. epd_pool->count--;
  19933. lpfc_ncmd = iter;
  19934. break;
  19935. }
  19936. }
  19937. spin_unlock_irqrestore(&epd_pool->lock, iflag);
  19938. return lpfc_ncmd;
  19939. }
  19940. /**
  19941. * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
  19942. * @phba: pointer to lpfc hba data structure.
  19943. * @ndlp: pointer to lpfc nodelist data structure.
  19944. * @hwqid: belong to which HWQ
  19945. * @expedite: 1 means this request is urgent.
  19946. *
  19947. * This routine will do the following actions and then return a pointer to
  19948. * one free IO buf.
  19949. *
  19950. * 1. If private free xri count is empty, move some XRIs from public to
  19951. * private pool.
  19952. * 2. Get one XRI from private free xri pool.
  19953. * 3. If we fail to get one from pvt_pool and this is an expedite request,
  19954. * get one free xri from expedite pool.
  19955. *
  19956. * Note: ndlp is only used on SCSI side for RRQ testing.
  19957. * The caller should pass NULL for ndlp on NVME side.
  19958. *
  19959. * Return:
  19960. * pointer to one free IO buf - if private pool is not empty
  19961. * NULL - if private pool is empty
  19962. **/
  19963. static struct lpfc_io_buf *
  19964. lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
  19965. struct lpfc_nodelist *ndlp,
  19966. int hwqid, int expedite)
  19967. {
  19968. struct lpfc_sli4_hdw_queue *qp;
  19969. struct lpfc_multixri_pool *multixri_pool;
  19970. struct lpfc_pvt_pool *pvt_pool;
  19971. struct lpfc_io_buf *lpfc_ncmd;
  19972. qp = &phba->sli4_hba.hdwq[hwqid];
  19973. lpfc_ncmd = NULL;
  19974. if (!qp) {
  19975. lpfc_printf_log(phba, KERN_INFO,
  19976. LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
  19977. "5556 NULL qp for hwqid x%x\n", hwqid);
  19978. return lpfc_ncmd;
  19979. }
  19980. multixri_pool = qp->p_multixri_pool;
  19981. if (!multixri_pool) {
  19982. lpfc_printf_log(phba, KERN_INFO,
  19983. LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
  19984. "5557 NULL multixri for hwqid x%x\n", hwqid);
  19985. return lpfc_ncmd;
  19986. }
  19987. pvt_pool = &multixri_pool->pvt_pool;
  19988. if (!pvt_pool) {
  19989. lpfc_printf_log(phba, KERN_INFO,
  19990. LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
  19991. "5558 NULL pvt_pool for hwqid x%x\n", hwqid);
  19992. return lpfc_ncmd;
  19993. }
  19994. multixri_pool->io_req_count++;
  19995. /* If pvt_pool is empty, move some XRIs from public to private pool */
  19996. if (pvt_pool->count == 0)
  19997. lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
  19998. /* Get one XRI from private free xri pool */
  19999. lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
  20000. if (lpfc_ncmd) {
  20001. lpfc_ncmd->hdwq = qp;
  20002. lpfc_ncmd->hdwq_no = hwqid;
  20003. } else if (expedite) {
  20004. /* If we fail to get one from pvt_pool and this is an expedite
  20005. * request, get one free xri from expedite pool.
  20006. */
  20007. lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
  20008. }
  20009. return lpfc_ncmd;
  20010. }
  20011. static inline struct lpfc_io_buf *
  20012. lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
  20013. {
  20014. struct lpfc_sli4_hdw_queue *qp;
  20015. struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
  20016. qp = &phba->sli4_hba.hdwq[idx];
  20017. list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
  20018. &qp->lpfc_io_buf_list_get, list) {
  20019. if (lpfc_test_rrq_active(phba, ndlp,
  20020. lpfc_cmd->cur_iocbq.sli4_lxritag))
  20021. continue;
  20022. if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
  20023. continue;
  20024. list_del_init(&lpfc_cmd->list);
  20025. qp->get_io_bufs--;
  20026. lpfc_cmd->hdwq = qp;
  20027. lpfc_cmd->hdwq_no = idx;
  20028. return lpfc_cmd;
  20029. }
  20030. return NULL;
  20031. }
  20032. /**
  20033. * lpfc_get_io_buf - Get one IO buffer from free pool
  20034. * @phba: The HBA for which this call is being executed.
  20035. * @ndlp: pointer to lpfc nodelist data structure.
  20036. * @hwqid: belong to which HWQ
  20037. * @expedite: 1 means this request is urgent.
  20038. *
  20039. * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
  20040. * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
  20041. * a IO buffer from head of @hdwq io_buf_list and returns to caller.
  20042. *
  20043. * Note: ndlp is only used on SCSI side for RRQ testing.
  20044. * The caller should pass NULL for ndlp on NVME side.
  20045. *
  20046. * Return codes:
  20047. * NULL - Error
  20048. * Pointer to lpfc_io_buf - Success
  20049. **/
  20050. struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
  20051. struct lpfc_nodelist *ndlp,
  20052. u32 hwqid, int expedite)
  20053. {
  20054. struct lpfc_sli4_hdw_queue *qp;
  20055. unsigned long iflag;
  20056. struct lpfc_io_buf *lpfc_cmd;
  20057. qp = &phba->sli4_hba.hdwq[hwqid];
  20058. lpfc_cmd = NULL;
  20059. if (!qp) {
  20060. lpfc_printf_log(phba, KERN_WARNING,
  20061. LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
  20062. "5555 NULL qp for hwqid x%x\n", hwqid);
  20063. return lpfc_cmd;
  20064. }
  20065. if (phba->cfg_xri_rebalancing)
  20066. lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
  20067. phba, ndlp, hwqid, expedite);
  20068. else {
  20069. lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
  20070. qp, alloc_xri_get);
  20071. if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
  20072. lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
  20073. if (!lpfc_cmd) {
  20074. lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
  20075. qp, alloc_xri_put);
  20076. list_splice(&qp->lpfc_io_buf_list_put,
  20077. &qp->lpfc_io_buf_list_get);
  20078. qp->get_io_bufs += qp->put_io_bufs;
  20079. INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
  20080. qp->put_io_bufs = 0;
  20081. spin_unlock(&qp->io_buf_list_put_lock);
  20082. if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
  20083. expedite)
  20084. lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
  20085. }
  20086. spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
  20087. }
  20088. return lpfc_cmd;
  20089. }
  20090. /**
  20091. * lpfc_read_object - Retrieve object data from HBA
  20092. * @phba: The HBA for which this call is being executed.
  20093. * @rdobject: Pathname of object data we want to read.
  20094. * @datap: Pointer to where data will be copied to.
  20095. * @datasz: size of data area
  20096. *
  20097. * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
  20098. * The data will be truncated if datasz is not large enough.
  20099. * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
  20100. * Returns the actual bytes read from the object.
  20101. *
  20102. * This routine is hard coded to use a poll completion. Unlike other
  20103. * sli4_config mailboxes, it uses lpfc_mbuf memory which is not
  20104. * cleaned up in lpfc_sli4_cmd_mbox_free. If this routine is modified
  20105. * to use interrupt-based completions, code is needed to fully cleanup
  20106. * the memory.
  20107. */
  20108. int
  20109. lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
  20110. uint32_t datasz)
  20111. {
  20112. struct lpfc_mbx_read_object *read_object;
  20113. LPFC_MBOXQ_t *mbox;
  20114. int rc, length, eof, j, byte_cnt = 0;
  20115. uint32_t shdr_status, shdr_add_status;
  20116. union lpfc_sli4_cfg_shdr *shdr;
  20117. struct lpfc_dmabuf *pcmd;
  20118. u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
  20119. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  20120. if (!mbox)
  20121. return -ENOMEM;
  20122. length = (sizeof(struct lpfc_mbx_read_object) -
  20123. sizeof(struct lpfc_sli4_cfg_mhdr));
  20124. lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
  20125. LPFC_MBOX_OPCODE_READ_OBJECT,
  20126. length, LPFC_SLI4_MBX_EMBED);
  20127. read_object = &mbox->u.mqe.un.read_object;
  20128. shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
  20129. bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
  20130. bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
  20131. read_object->u.request.rd_object_offset = 0;
  20132. read_object->u.request.rd_object_cnt = 1;
  20133. memset((void *)read_object->u.request.rd_object_name, 0,
  20134. LPFC_OBJ_NAME_SZ);
  20135. scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
  20136. for (j = 0; j < strlen(rdobject); j++)
  20137. read_object->u.request.rd_object_name[j] =
  20138. cpu_to_le32(rd_object_name[j]);
  20139. pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
  20140. if (pcmd)
  20141. pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
  20142. if (!pcmd || !pcmd->virt) {
  20143. kfree(pcmd);
  20144. mempool_free(mbox, phba->mbox_mem_pool);
  20145. return -ENOMEM;
  20146. }
  20147. memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
  20148. read_object->u.request.rd_object_hbuf[0].pa_lo =
  20149. putPaddrLow(pcmd->phys);
  20150. read_object->u.request.rd_object_hbuf[0].pa_hi =
  20151. putPaddrHigh(pcmd->phys);
  20152. read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
  20153. mbox->vport = phba->pport;
  20154. mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  20155. mbox->ctx_ndlp = NULL;
  20156. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
  20157. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  20158. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  20159. if (shdr_status == STATUS_FAILED &&
  20160. shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
  20161. lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
  20162. "4674 No port cfg file in FW.\n");
  20163. byte_cnt = -ENOENT;
  20164. } else if (shdr_status || shdr_add_status || rc) {
  20165. lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
  20166. "2625 READ_OBJECT mailbox failed with "
  20167. "status x%x add_status x%x, mbx status x%x\n",
  20168. shdr_status, shdr_add_status, rc);
  20169. byte_cnt = -ENXIO;
  20170. } else {
  20171. /* Success */
  20172. length = read_object->u.response.rd_object_actual_rlen;
  20173. eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
  20174. lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
  20175. "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
  20176. length, datasz, eof);
  20177. /* Detect the port config file exists but is empty */
  20178. if (!length && eof) {
  20179. byte_cnt = 0;
  20180. goto exit;
  20181. }
  20182. byte_cnt = length;
  20183. lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
  20184. }
  20185. exit:
  20186. /* This is an embedded SLI4 mailbox with an external buffer allocated.
  20187. * Free the pcmd and then cleanup with the correct routine.
  20188. */
  20189. lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
  20190. kfree(pcmd);
  20191. lpfc_sli4_mbox_cmd_free(phba, mbox);
  20192. return byte_cnt;
  20193. }
  20194. /**
  20195. * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
  20196. * @phba: The HBA for which this call is being executed.
  20197. * @lpfc_buf: IO buf structure to append the SGL chunk
  20198. *
  20199. * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
  20200. * and will allocate an SGL chunk if the pool is empty.
  20201. *
  20202. * Return codes:
  20203. * NULL - Error
  20204. * Pointer to sli4_hybrid_sgl - Success
  20205. **/
  20206. struct sli4_hybrid_sgl *
  20207. lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
  20208. {
  20209. struct sli4_hybrid_sgl *list_entry = NULL;
  20210. struct sli4_hybrid_sgl *tmp = NULL;
  20211. struct sli4_hybrid_sgl *allocated_sgl = NULL;
  20212. struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
  20213. struct list_head *buf_list = &hdwq->sgl_list;
  20214. unsigned long iflags;
  20215. spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  20216. if (likely(!list_empty(buf_list))) {
  20217. /* break off 1 chunk from the sgl_list */
  20218. list_for_each_entry_safe(list_entry, tmp,
  20219. buf_list, list_node) {
  20220. list_move_tail(&list_entry->list_node,
  20221. &lpfc_buf->dma_sgl_xtra_list);
  20222. break;
  20223. }
  20224. } else {
  20225. /* allocate more */
  20226. spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  20227. tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
  20228. cpu_to_node(hdwq->io_wq->chann));
  20229. if (!tmp) {
  20230. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  20231. "8353 error kmalloc memory for HDWQ "
  20232. "%d %s\n",
  20233. lpfc_buf->hdwq_no, __func__);
  20234. return NULL;
  20235. }
  20236. tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
  20237. GFP_ATOMIC, &tmp->dma_phys_sgl);
  20238. if (!tmp->dma_sgl) {
  20239. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  20240. "8354 error pool_alloc memory for HDWQ "
  20241. "%d %s\n",
  20242. lpfc_buf->hdwq_no, __func__);
  20243. kfree(tmp);
  20244. return NULL;
  20245. }
  20246. spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  20247. list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
  20248. }
  20249. allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
  20250. struct sli4_hybrid_sgl,
  20251. list_node);
  20252. spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  20253. return allocated_sgl;
  20254. }
  20255. /**
  20256. * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
  20257. * @phba: The HBA for which this call is being executed.
  20258. * @lpfc_buf: IO buf structure with the SGL chunk
  20259. *
  20260. * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
  20261. *
  20262. * Return codes:
  20263. * 0 - Success
  20264. * -EINVAL - Error
  20265. **/
  20266. int
  20267. lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
  20268. {
  20269. int rc = 0;
  20270. struct sli4_hybrid_sgl *list_entry = NULL;
  20271. struct sli4_hybrid_sgl *tmp = NULL;
  20272. struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
  20273. struct list_head *buf_list = &hdwq->sgl_list;
  20274. unsigned long iflags;
  20275. spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  20276. if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
  20277. list_for_each_entry_safe(list_entry, tmp,
  20278. &lpfc_buf->dma_sgl_xtra_list,
  20279. list_node) {
  20280. list_move_tail(&list_entry->list_node,
  20281. buf_list);
  20282. }
  20283. } else {
  20284. rc = -EINVAL;
  20285. }
  20286. spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  20287. return rc;
  20288. }
  20289. /**
  20290. * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
  20291. * @phba: phba object
  20292. * @hdwq: hdwq to cleanup sgl buff resources on
  20293. *
  20294. * This routine frees all SGL chunks of hdwq SGL chunk pool.
  20295. *
  20296. * Return codes:
  20297. * None
  20298. **/
  20299. void
  20300. lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
  20301. struct lpfc_sli4_hdw_queue *hdwq)
  20302. {
  20303. struct list_head *buf_list = &hdwq->sgl_list;
  20304. struct sli4_hybrid_sgl *list_entry = NULL;
  20305. struct sli4_hybrid_sgl *tmp = NULL;
  20306. unsigned long iflags;
  20307. spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  20308. /* Free sgl pool */
  20309. list_for_each_entry_safe(list_entry, tmp,
  20310. buf_list, list_node) {
  20311. list_del(&list_entry->list_node);
  20312. dma_pool_free(phba->lpfc_sg_dma_buf_pool,
  20313. list_entry->dma_sgl,
  20314. list_entry->dma_phys_sgl);
  20315. kfree(list_entry);
  20316. }
  20317. spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  20318. }
  20319. /**
  20320. * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
  20321. * @phba: The HBA for which this call is being executed.
  20322. * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
  20323. *
  20324. * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
  20325. * and will allocate an CMD/RSP buffer if the pool is empty.
  20326. *
  20327. * Return codes:
  20328. * NULL - Error
  20329. * Pointer to fcp_cmd_rsp_buf - Success
  20330. **/
  20331. struct fcp_cmd_rsp_buf *
  20332. lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
  20333. struct lpfc_io_buf *lpfc_buf)
  20334. {
  20335. struct fcp_cmd_rsp_buf *list_entry = NULL;
  20336. struct fcp_cmd_rsp_buf *tmp = NULL;
  20337. struct fcp_cmd_rsp_buf *allocated_buf = NULL;
  20338. struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
  20339. struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
  20340. unsigned long iflags;
  20341. spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  20342. if (likely(!list_empty(buf_list))) {
  20343. /* break off 1 chunk from the list */
  20344. list_for_each_entry_safe(list_entry, tmp,
  20345. buf_list,
  20346. list_node) {
  20347. list_move_tail(&list_entry->list_node,
  20348. &lpfc_buf->dma_cmd_rsp_list);
  20349. break;
  20350. }
  20351. } else {
  20352. /* allocate more */
  20353. spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  20354. tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
  20355. cpu_to_node(hdwq->io_wq->chann));
  20356. if (!tmp) {
  20357. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  20358. "8355 error kmalloc memory for HDWQ "
  20359. "%d %s\n",
  20360. lpfc_buf->hdwq_no, __func__);
  20361. return NULL;
  20362. }
  20363. tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
  20364. GFP_ATOMIC,
  20365. &tmp->fcp_cmd_rsp_dma_handle);
  20366. if (!tmp->fcp_cmnd) {
  20367. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  20368. "8356 error pool_alloc memory for HDWQ "
  20369. "%d %s\n",
  20370. lpfc_buf->hdwq_no, __func__);
  20371. kfree(tmp);
  20372. return NULL;
  20373. }
  20374. tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
  20375. sizeof(struct fcp_cmnd32));
  20376. spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  20377. list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
  20378. }
  20379. allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
  20380. struct fcp_cmd_rsp_buf,
  20381. list_node);
  20382. spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  20383. return allocated_buf;
  20384. }
  20385. /**
  20386. * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
  20387. * @phba: The HBA for which this call is being executed.
  20388. * @lpfc_buf: IO buf structure with the CMD/RSP buf
  20389. *
  20390. * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
  20391. *
  20392. * Return codes:
  20393. * 0 - Success
  20394. * -EINVAL - Error
  20395. **/
  20396. int
  20397. lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
  20398. struct lpfc_io_buf *lpfc_buf)
  20399. {
  20400. int rc = 0;
  20401. struct fcp_cmd_rsp_buf *list_entry = NULL;
  20402. struct fcp_cmd_rsp_buf *tmp = NULL;
  20403. struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
  20404. struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
  20405. unsigned long iflags;
  20406. spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  20407. if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
  20408. list_for_each_entry_safe(list_entry, tmp,
  20409. &lpfc_buf->dma_cmd_rsp_list,
  20410. list_node) {
  20411. list_move_tail(&list_entry->list_node,
  20412. buf_list);
  20413. }
  20414. } else {
  20415. rc = -EINVAL;
  20416. }
  20417. spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  20418. return rc;
  20419. }
  20420. /**
  20421. * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
  20422. * @phba: phba object
  20423. * @hdwq: hdwq to cleanup cmd rsp buff resources on
  20424. *
  20425. * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
  20426. *
  20427. * Return codes:
  20428. * None
  20429. **/
  20430. void
  20431. lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
  20432. struct lpfc_sli4_hdw_queue *hdwq)
  20433. {
  20434. struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
  20435. struct fcp_cmd_rsp_buf *list_entry = NULL;
  20436. struct fcp_cmd_rsp_buf *tmp = NULL;
  20437. unsigned long iflags;
  20438. spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
  20439. /* Free cmd_rsp buf pool */
  20440. list_for_each_entry_safe(list_entry, tmp,
  20441. buf_list,
  20442. list_node) {
  20443. list_del(&list_entry->list_node);
  20444. dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
  20445. list_entry->fcp_cmnd,
  20446. list_entry->fcp_cmd_rsp_dma_handle);
  20447. kfree(list_entry);
  20448. }
  20449. spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
  20450. }
  20451. /**
  20452. * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
  20453. * @phba: phba object
  20454. * @job: job entry of the command to be posted.
  20455. *
  20456. * Fill the common fields of the wqe for each of the command.
  20457. *
  20458. * Return codes:
  20459. * None
  20460. **/
  20461. void
  20462. lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
  20463. {
  20464. u8 cmnd;
  20465. u32 *pcmd;
  20466. u32 if_type = 0;
  20467. u32 abort_tag;
  20468. bool fip;
  20469. struct lpfc_nodelist *ndlp = NULL;
  20470. union lpfc_wqe128 *wqe = &job->wqe;
  20471. u8 command_type = ELS_COMMAND_NON_FIP;
  20472. fip = test_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
  20473. /* The fcp commands will set command type */
  20474. if (job->cmd_flag & LPFC_IO_FCP)
  20475. command_type = FCP_COMMAND;
  20476. else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
  20477. command_type = ELS_COMMAND_FIP;
  20478. else
  20479. command_type = ELS_COMMAND_NON_FIP;
  20480. abort_tag = job->iotag;
  20481. cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
  20482. switch (cmnd) {
  20483. case CMD_ELS_REQUEST64_WQE:
  20484. ndlp = job->ndlp;
  20485. if_type = bf_get(lpfc_sli_intf_if_type,
  20486. &phba->sli4_hba.sli_intf);
  20487. if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
  20488. pcmd = (u32 *)job->cmd_dmabuf->virt;
  20489. if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
  20490. *pcmd == ELS_CMD_SCR ||
  20491. *pcmd == ELS_CMD_RDF ||
  20492. *pcmd == ELS_CMD_EDC ||
  20493. *pcmd == ELS_CMD_RSCN_XMT ||
  20494. *pcmd == ELS_CMD_FDISC ||
  20495. *pcmd == ELS_CMD_LOGO ||
  20496. *pcmd == ELS_CMD_QFPA ||
  20497. *pcmd == ELS_CMD_UVEM ||
  20498. *pcmd == ELS_CMD_PLOGI)) {
  20499. bf_set(els_req64_sp, &wqe->els_req, 1);
  20500. bf_set(els_req64_sid, &wqe->els_req,
  20501. job->vport->fc_myDID);
  20502. if ((*pcmd == ELS_CMD_FLOGI) &&
  20503. !(phba->fc_topology ==
  20504. LPFC_TOPOLOGY_LOOP))
  20505. bf_set(els_req64_sid, &wqe->els_req, 0);
  20506. bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
  20507. bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
  20508. phba->vpi_ids[job->vport->vpi]);
  20509. } else if (pcmd) {
  20510. bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
  20511. bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
  20512. phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  20513. }
  20514. }
  20515. bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
  20516. phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  20517. bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
  20518. bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
  20519. bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
  20520. bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
  20521. bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
  20522. break;
  20523. case CMD_XMIT_ELS_RSP64_WQE:
  20524. ndlp = job->ndlp;
  20525. /* word4 */
  20526. wqe->xmit_els_rsp.word4 = 0;
  20527. if_type = bf_get(lpfc_sli_intf_if_type,
  20528. &phba->sli4_hba.sli_intf);
  20529. if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
  20530. if (test_bit(FC_PT2PT, &job->vport->fc_flag)) {
  20531. bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
  20532. bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
  20533. job->vport->fc_myDID);
  20534. if (job->vport->fc_myDID == Fabric_DID) {
  20535. bf_set(wqe_els_did,
  20536. &wqe->xmit_els_rsp.wqe_dest, 0);
  20537. }
  20538. }
  20539. }
  20540. bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
  20541. bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
  20542. bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
  20543. bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
  20544. LPFC_WQE_LENLOC_WORD3);
  20545. bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
  20546. if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
  20547. bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
  20548. bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
  20549. job->vport->fc_myDID);
  20550. bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
  20551. }
  20552. if (phba->sli_rev == LPFC_SLI_REV4) {
  20553. bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
  20554. phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  20555. if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
  20556. bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
  20557. phba->vpi_ids[job->vport->vpi]);
  20558. }
  20559. command_type = OTHER_COMMAND;
  20560. break;
  20561. case CMD_GEN_REQUEST64_WQE:
  20562. /* Word 10 */
  20563. bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
  20564. bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
  20565. bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
  20566. bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
  20567. bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
  20568. command_type = OTHER_COMMAND;
  20569. break;
  20570. case CMD_XMIT_SEQUENCE64_WQE:
  20571. if (phba->link_flag & LS_LOOPBACK_MODE)
  20572. bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
  20573. wqe->xmit_sequence.rsvd3 = 0;
  20574. bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
  20575. bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
  20576. bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
  20577. LPFC_WQE_IOD_WRITE);
  20578. bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
  20579. LPFC_WQE_LENLOC_WORD12);
  20580. bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
  20581. command_type = OTHER_COMMAND;
  20582. break;
  20583. case CMD_XMIT_BLS_RSP64_WQE:
  20584. bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
  20585. bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
  20586. bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
  20587. bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
  20588. phba->vpi_ids[phba->pport->vpi]);
  20589. bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
  20590. bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
  20591. LPFC_WQE_LENLOC_NONE);
  20592. /* Overwrite the pre-set comnd type with OTHER_COMMAND */
  20593. command_type = OTHER_COMMAND;
  20594. break;
  20595. case CMD_FCP_ICMND64_WQE: /* task mgmt commands */
  20596. case CMD_ABORT_XRI_WQE: /* abort iotag */
  20597. case CMD_SEND_FRAME: /* mds loopback */
  20598. /* cases already formatted for sli4 wqe - no chgs necessary */
  20599. return;
  20600. default:
  20601. dump_stack();
  20602. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  20603. "6207 Invalid command 0x%x\n",
  20604. cmnd);
  20605. break;
  20606. }
  20607. wqe->generic.wqe_com.abort_tag = abort_tag;
  20608. bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
  20609. bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
  20610. bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
  20611. }