lpfc_init.c 368 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
  6. * Copyright (C) 2004-2016 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  10. * *
  11. * This program is free software; you can redistribute it and/or *
  12. * modify it under the terms of version 2 of the GNU General *
  13. * Public License as published by the Free Software Foundation. *
  14. * This program is distributed in the hope that it will be useful. *
  15. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  16. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  17. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  18. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  20. * more details, a copy of which can be found in the file COPYING *
  21. * included with this package. *
  22. *******************************************************************/
  23. #include <linux/blkdev.h>
  24. #include <linux/delay.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/idr.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/module.h>
  29. #include <linux/kthread.h>
  30. #include <linux/pci.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/ctype.h>
  33. #include <linux/aer.h>
  34. #include <linux/slab.h>
  35. #include <linux/firmware.h>
  36. #include <linux/miscdevice.h>
  37. #include <linux/percpu.h>
  38. #include <linux/msi.h>
  39. #include <linux/bitops.h>
  40. #include <scsi/scsi.h>
  41. #include <scsi/scsi_device.h>
  42. #include <scsi/scsi_host.h>
  43. #include <scsi/scsi_transport_fc.h>
  44. #include <scsi/scsi_tcq.h>
  45. #include <scsi/fc/fc_fs.h>
  46. #include <linux/nvme-fc-driver.h>
  47. #include "lpfc_hw4.h"
  48. #include "lpfc_hw.h"
  49. #include "lpfc_sli.h"
  50. #include "lpfc_sli4.h"
  51. #include "lpfc_nl.h"
  52. #include "lpfc_disc.h"
  53. #include "lpfc.h"
  54. #include "lpfc_scsi.h"
  55. #include "lpfc_nvme.h"
  56. #include "lpfc_nvmet.h"
  57. #include "lpfc_logmsg.h"
  58. #include "lpfc_crtn.h"
  59. #include "lpfc_vport.h"
  60. #include "lpfc_version.h"
  61. #include "lpfc_ids.h"
  62. char *_dump_buf_data;
  63. unsigned long _dump_buf_data_order;
  64. char *_dump_buf_dif;
  65. unsigned long _dump_buf_dif_order;
  66. spinlock_t _dump_buf_lock;
  67. /* Used when mapping IRQ vectors in a driver centric manner */
  68. uint16_t *lpfc_used_cpu;
  69. uint32_t lpfc_present_cpu;
  70. static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
  71. static int lpfc_post_rcv_buf(struct lpfc_hba *);
  72. static int lpfc_sli4_queue_verify(struct lpfc_hba *);
  73. static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
  74. static int lpfc_setup_endian_order(struct lpfc_hba *);
  75. static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
  76. static void lpfc_free_els_sgl_list(struct lpfc_hba *);
  77. static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
  78. static void lpfc_init_sgl_list(struct lpfc_hba *);
  79. static int lpfc_init_active_sgl_array(struct lpfc_hba *);
  80. static void lpfc_free_active_sgl(struct lpfc_hba *);
  81. static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
  82. static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
  83. static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
  84. static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
  85. static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
  86. static void lpfc_sli4_disable_intr(struct lpfc_hba *);
  87. static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
  88. static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
  89. static struct scsi_transport_template *lpfc_transport_template = NULL;
  90. static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
  91. static DEFINE_IDR(lpfc_hba_index);
  92. #define LPFC_NVMET_BUF_POST 254
  93. /**
  94. * lpfc_config_port_prep - Perform lpfc initialization prior to config port
  95. * @phba: pointer to lpfc hba data structure.
  96. *
  97. * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
  98. * mailbox command. It retrieves the revision information from the HBA and
  99. * collects the Vital Product Data (VPD) about the HBA for preparing the
  100. * configuration of the HBA.
  101. *
  102. * Return codes:
  103. * 0 - success.
  104. * -ERESTART - requests the SLI layer to reset the HBA and try again.
  105. * Any other value - indicates an error.
  106. **/
  107. int
  108. lpfc_config_port_prep(struct lpfc_hba *phba)
  109. {
  110. lpfc_vpd_t *vp = &phba->vpd;
  111. int i = 0, rc;
  112. LPFC_MBOXQ_t *pmb;
  113. MAILBOX_t *mb;
  114. char *lpfc_vpd_data = NULL;
  115. uint16_t offset = 0;
  116. static char licensed[56] =
  117. "key unlock for use with gnu public licensed code only\0";
  118. static int init_key = 1;
  119. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  120. if (!pmb) {
  121. phba->link_state = LPFC_HBA_ERROR;
  122. return -ENOMEM;
  123. }
  124. mb = &pmb->u.mb;
  125. phba->link_state = LPFC_INIT_MBX_CMDS;
  126. if (lpfc_is_LC_HBA(phba->pcidev->device)) {
  127. if (init_key) {
  128. uint32_t *ptext = (uint32_t *) licensed;
  129. for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
  130. *ptext = cpu_to_be32(*ptext);
  131. init_key = 0;
  132. }
  133. lpfc_read_nv(phba, pmb);
  134. memset((char*)mb->un.varRDnvp.rsvd3, 0,
  135. sizeof (mb->un.varRDnvp.rsvd3));
  136. memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
  137. sizeof (licensed));
  138. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  139. if (rc != MBX_SUCCESS) {
  140. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
  141. "0324 Config Port initialization "
  142. "error, mbxCmd x%x READ_NVPARM, "
  143. "mbxStatus x%x\n",
  144. mb->mbxCommand, mb->mbxStatus);
  145. mempool_free(pmb, phba->mbox_mem_pool);
  146. return -ERESTART;
  147. }
  148. memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
  149. sizeof(phba->wwnn));
  150. memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
  151. sizeof(phba->wwpn));
  152. }
  153. /*
  154. * Clear all option bits except LPFC_SLI3_BG_ENABLED,
  155. * which was already set in lpfc_get_cfgparam()
  156. */
  157. phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
  158. /* Setup and issue mailbox READ REV command */
  159. lpfc_read_rev(phba, pmb);
  160. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  161. if (rc != MBX_SUCCESS) {
  162. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  163. "0439 Adapter failed to init, mbxCmd x%x "
  164. "READ_REV, mbxStatus x%x\n",
  165. mb->mbxCommand, mb->mbxStatus);
  166. mempool_free( pmb, phba->mbox_mem_pool);
  167. return -ERESTART;
  168. }
  169. /*
  170. * The value of rr must be 1 since the driver set the cv field to 1.
  171. * This setting requires the FW to set all revision fields.
  172. */
  173. if (mb->un.varRdRev.rr == 0) {
  174. vp->rev.rBit = 0;
  175. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  176. "0440 Adapter failed to init, READ_REV has "
  177. "missing revision information.\n");
  178. mempool_free(pmb, phba->mbox_mem_pool);
  179. return -ERESTART;
  180. }
  181. if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
  182. mempool_free(pmb, phba->mbox_mem_pool);
  183. return -EINVAL;
  184. }
  185. /* Save information as VPD data */
  186. vp->rev.rBit = 1;
  187. memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
  188. vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
  189. memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
  190. vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
  191. memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
  192. vp->rev.biuRev = mb->un.varRdRev.biuRev;
  193. vp->rev.smRev = mb->un.varRdRev.smRev;
  194. vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
  195. vp->rev.endecRev = mb->un.varRdRev.endecRev;
  196. vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
  197. vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
  198. vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
  199. vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
  200. vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
  201. vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
  202. /* If the sli feature level is less then 9, we must
  203. * tear down all RPIs and VPIs on link down if NPIV
  204. * is enabled.
  205. */
  206. if (vp->rev.feaLevelHigh < 9)
  207. phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
  208. if (lpfc_is_LC_HBA(phba->pcidev->device))
  209. memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
  210. sizeof (phba->RandomData));
  211. /* Get adapter VPD information */
  212. lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
  213. if (!lpfc_vpd_data)
  214. goto out_free_mbox;
  215. do {
  216. lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
  217. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  218. if (rc != MBX_SUCCESS) {
  219. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  220. "0441 VPD not present on adapter, "
  221. "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
  222. mb->mbxCommand, mb->mbxStatus);
  223. mb->un.varDmp.word_cnt = 0;
  224. }
  225. /* dump mem may return a zero when finished or we got a
  226. * mailbox error, either way we are done.
  227. */
  228. if (mb->un.varDmp.word_cnt == 0)
  229. break;
  230. if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
  231. mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
  232. lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
  233. lpfc_vpd_data + offset,
  234. mb->un.varDmp.word_cnt);
  235. offset += mb->un.varDmp.word_cnt;
  236. } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
  237. lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
  238. kfree(lpfc_vpd_data);
  239. out_free_mbox:
  240. mempool_free(pmb, phba->mbox_mem_pool);
  241. return 0;
  242. }
  243. /**
  244. * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
  245. * @phba: pointer to lpfc hba data structure.
  246. * @pmboxq: pointer to the driver internal queue element for mailbox command.
  247. *
  248. * This is the completion handler for driver's configuring asynchronous event
  249. * mailbox command to the device. If the mailbox command returns successfully,
  250. * it will set internal async event support flag to 1; otherwise, it will
  251. * set internal async event support flag to 0.
  252. **/
  253. static void
  254. lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
  255. {
  256. if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
  257. phba->temp_sensor_support = 1;
  258. else
  259. phba->temp_sensor_support = 0;
  260. mempool_free(pmboxq, phba->mbox_mem_pool);
  261. return;
  262. }
  263. /**
  264. * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
  265. * @phba: pointer to lpfc hba data structure.
  266. * @pmboxq: pointer to the driver internal queue element for mailbox command.
  267. *
  268. * This is the completion handler for dump mailbox command for getting
  269. * wake up parameters. When this command complete, the response contain
  270. * Option rom version of the HBA. This function translate the version number
  271. * into a human readable string and store it in OptionROMVersion.
  272. **/
  273. static void
  274. lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
  275. {
  276. struct prog_id *prg;
  277. uint32_t prog_id_word;
  278. char dist = ' ';
  279. /* character array used for decoding dist type. */
  280. char dist_char[] = "nabx";
  281. if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
  282. mempool_free(pmboxq, phba->mbox_mem_pool);
  283. return;
  284. }
  285. prg = (struct prog_id *) &prog_id_word;
  286. /* word 7 contain option rom version */
  287. prog_id_word = pmboxq->u.mb.un.varWords[7];
  288. /* Decode the Option rom version word to a readable string */
  289. if (prg->dist < 4)
  290. dist = dist_char[prg->dist];
  291. if ((prg->dist == 3) && (prg->num == 0))
  292. snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
  293. prg->ver, prg->rev, prg->lev);
  294. else
  295. snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
  296. prg->ver, prg->rev, prg->lev,
  297. dist, prg->num);
  298. mempool_free(pmboxq, phba->mbox_mem_pool);
  299. return;
  300. }
  301. /**
  302. * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
  303. * cfg_soft_wwnn, cfg_soft_wwpn
  304. * @vport: pointer to lpfc vport data structure.
  305. *
  306. *
  307. * Return codes
  308. * None.
  309. **/
  310. void
  311. lpfc_update_vport_wwn(struct lpfc_vport *vport)
  312. {
  313. uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
  314. u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
  315. /* If the soft name exists then update it using the service params */
  316. if (vport->phba->cfg_soft_wwnn)
  317. u64_to_wwn(vport->phba->cfg_soft_wwnn,
  318. vport->fc_sparam.nodeName.u.wwn);
  319. if (vport->phba->cfg_soft_wwpn)
  320. u64_to_wwn(vport->phba->cfg_soft_wwpn,
  321. vport->fc_sparam.portName.u.wwn);
  322. /*
  323. * If the name is empty or there exists a soft name
  324. * then copy the service params name, otherwise use the fc name
  325. */
  326. if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
  327. memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
  328. sizeof(struct lpfc_name));
  329. else
  330. memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
  331. sizeof(struct lpfc_name));
  332. /*
  333. * If the port name has changed, then set the Param changes flag
  334. * to unreg the login
  335. */
  336. if (vport->fc_portname.u.wwn[0] != 0 &&
  337. memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
  338. sizeof(struct lpfc_name)))
  339. vport->vport_flag |= FAWWPN_PARAM_CHG;
  340. if (vport->fc_portname.u.wwn[0] == 0 ||
  341. vport->phba->cfg_soft_wwpn ||
  342. (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
  343. vport->vport_flag & FAWWPN_SET) {
  344. memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
  345. sizeof(struct lpfc_name));
  346. vport->vport_flag &= ~FAWWPN_SET;
  347. if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
  348. vport->vport_flag |= FAWWPN_SET;
  349. }
  350. else
  351. memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
  352. sizeof(struct lpfc_name));
  353. }
  354. /**
  355. * lpfc_config_port_post - Perform lpfc initialization after config port
  356. * @phba: pointer to lpfc hba data structure.
  357. *
  358. * This routine will do LPFC initialization after the CONFIG_PORT mailbox
  359. * command call. It performs all internal resource and state setups on the
  360. * port: post IOCB buffers, enable appropriate host interrupt attentions,
  361. * ELS ring timers, etc.
  362. *
  363. * Return codes
  364. * 0 - success.
  365. * Any other value - error.
  366. **/
  367. int
  368. lpfc_config_port_post(struct lpfc_hba *phba)
  369. {
  370. struct lpfc_vport *vport = phba->pport;
  371. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  372. LPFC_MBOXQ_t *pmb;
  373. MAILBOX_t *mb;
  374. struct lpfc_dmabuf *mp;
  375. struct lpfc_sli *psli = &phba->sli;
  376. uint32_t status, timeout;
  377. int i, j;
  378. int rc;
  379. spin_lock_irq(&phba->hbalock);
  380. /*
  381. * If the Config port completed correctly the HBA is not
  382. * over heated any more.
  383. */
  384. if (phba->over_temp_state == HBA_OVER_TEMP)
  385. phba->over_temp_state = HBA_NORMAL_TEMP;
  386. spin_unlock_irq(&phba->hbalock);
  387. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  388. if (!pmb) {
  389. phba->link_state = LPFC_HBA_ERROR;
  390. return -ENOMEM;
  391. }
  392. mb = &pmb->u.mb;
  393. /* Get login parameters for NID. */
  394. rc = lpfc_read_sparam(phba, pmb, 0);
  395. if (rc) {
  396. mempool_free(pmb, phba->mbox_mem_pool);
  397. return -ENOMEM;
  398. }
  399. pmb->vport = vport;
  400. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  401. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  402. "0448 Adapter failed init, mbxCmd x%x "
  403. "READ_SPARM mbxStatus x%x\n",
  404. mb->mbxCommand, mb->mbxStatus);
  405. phba->link_state = LPFC_HBA_ERROR;
  406. mp = (struct lpfc_dmabuf *) pmb->context1;
  407. mempool_free(pmb, phba->mbox_mem_pool);
  408. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  409. kfree(mp);
  410. return -EIO;
  411. }
  412. mp = (struct lpfc_dmabuf *) pmb->context1;
  413. memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
  414. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  415. kfree(mp);
  416. pmb->context1 = NULL;
  417. lpfc_update_vport_wwn(vport);
  418. /* Update the fc_host data structures with new wwn. */
  419. fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
  420. fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
  421. fc_host_max_npiv_vports(shost) = phba->max_vpi;
  422. /* If no serial number in VPD data, use low 6 bytes of WWNN */
  423. /* This should be consolidated into parse_vpd ? - mr */
  424. if (phba->SerialNumber[0] == 0) {
  425. uint8_t *outptr;
  426. outptr = &vport->fc_nodename.u.s.IEEE[0];
  427. for (i = 0; i < 12; i++) {
  428. status = *outptr++;
  429. j = ((status & 0xf0) >> 4);
  430. if (j <= 9)
  431. phba->SerialNumber[i] =
  432. (char)((uint8_t) 0x30 + (uint8_t) j);
  433. else
  434. phba->SerialNumber[i] =
  435. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  436. i++;
  437. j = (status & 0xf);
  438. if (j <= 9)
  439. phba->SerialNumber[i] =
  440. (char)((uint8_t) 0x30 + (uint8_t) j);
  441. else
  442. phba->SerialNumber[i] =
  443. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  444. }
  445. }
  446. lpfc_read_config(phba, pmb);
  447. pmb->vport = vport;
  448. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  449. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  450. "0453 Adapter failed to init, mbxCmd x%x "
  451. "READ_CONFIG, mbxStatus x%x\n",
  452. mb->mbxCommand, mb->mbxStatus);
  453. phba->link_state = LPFC_HBA_ERROR;
  454. mempool_free( pmb, phba->mbox_mem_pool);
  455. return -EIO;
  456. }
  457. /* Check if the port is disabled */
  458. lpfc_sli_read_link_ste(phba);
  459. /* Reset the DFT_HBA_Q_DEPTH to the max xri */
  460. i = (mb->un.varRdConfig.max_xri + 1);
  461. if (phba->cfg_hba_queue_depth > i) {
  462. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  463. "3359 HBA queue depth changed from %d to %d\n",
  464. phba->cfg_hba_queue_depth, i);
  465. phba->cfg_hba_queue_depth = i;
  466. }
  467. /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
  468. i = (mb->un.varRdConfig.max_xri >> 3);
  469. if (phba->pport->cfg_lun_queue_depth > i) {
  470. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  471. "3360 LUN queue depth changed from %d to %d\n",
  472. phba->pport->cfg_lun_queue_depth, i);
  473. phba->pport->cfg_lun_queue_depth = i;
  474. }
  475. phba->lmt = mb->un.varRdConfig.lmt;
  476. /* Get the default values for Model Name and Description */
  477. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  478. phba->link_state = LPFC_LINK_DOWN;
  479. /* Only process IOCBs on ELS ring till hba_state is READY */
  480. if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
  481. psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
  482. if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
  483. psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
  484. /* Post receive buffers for desired rings */
  485. if (phba->sli_rev != 3)
  486. lpfc_post_rcv_buf(phba);
  487. /*
  488. * Configure HBA MSI-X attention conditions to messages if MSI-X mode
  489. */
  490. if (phba->intr_type == MSIX) {
  491. rc = lpfc_config_msi(phba, pmb);
  492. if (rc) {
  493. mempool_free(pmb, phba->mbox_mem_pool);
  494. return -EIO;
  495. }
  496. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  497. if (rc != MBX_SUCCESS) {
  498. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
  499. "0352 Config MSI mailbox command "
  500. "failed, mbxCmd x%x, mbxStatus x%x\n",
  501. pmb->u.mb.mbxCommand,
  502. pmb->u.mb.mbxStatus);
  503. mempool_free(pmb, phba->mbox_mem_pool);
  504. return -EIO;
  505. }
  506. }
  507. spin_lock_irq(&phba->hbalock);
  508. /* Initialize ERATT handling flag */
  509. phba->hba_flag &= ~HBA_ERATT_HANDLED;
  510. /* Enable appropriate host interrupts */
  511. if (lpfc_readl(phba->HCregaddr, &status)) {
  512. spin_unlock_irq(&phba->hbalock);
  513. return -EIO;
  514. }
  515. status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
  516. if (psli->num_rings > 0)
  517. status |= HC_R0INT_ENA;
  518. if (psli->num_rings > 1)
  519. status |= HC_R1INT_ENA;
  520. if (psli->num_rings > 2)
  521. status |= HC_R2INT_ENA;
  522. if (psli->num_rings > 3)
  523. status |= HC_R3INT_ENA;
  524. if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
  525. (phba->cfg_poll & DISABLE_FCP_RING_INT))
  526. status &= ~(HC_R0INT_ENA);
  527. writel(status, phba->HCregaddr);
  528. readl(phba->HCregaddr); /* flush */
  529. spin_unlock_irq(&phba->hbalock);
  530. /* Set up ring-0 (ELS) timer */
  531. timeout = phba->fc_ratov * 2;
  532. mod_timer(&vport->els_tmofunc,
  533. jiffies + msecs_to_jiffies(1000 * timeout));
  534. /* Set up heart beat (HB) timer */
  535. mod_timer(&phba->hb_tmofunc,
  536. jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  537. phba->hb_outstanding = 0;
  538. phba->last_completion_time = jiffies;
  539. /* Set up error attention (ERATT) polling timer */
  540. mod_timer(&phba->eratt_poll,
  541. jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
  542. if (phba->hba_flag & LINK_DISABLED) {
  543. lpfc_printf_log(phba,
  544. KERN_ERR, LOG_INIT,
  545. "2598 Adapter Link is disabled.\n");
  546. lpfc_down_link(phba, pmb);
  547. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  548. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  549. if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
  550. lpfc_printf_log(phba,
  551. KERN_ERR, LOG_INIT,
  552. "2599 Adapter failed to issue DOWN_LINK"
  553. " mbox command rc 0x%x\n", rc);
  554. mempool_free(pmb, phba->mbox_mem_pool);
  555. return -EIO;
  556. }
  557. } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
  558. mempool_free(pmb, phba->mbox_mem_pool);
  559. rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
  560. if (rc)
  561. return rc;
  562. }
  563. /* MBOX buffer will be freed in mbox compl */
  564. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  565. if (!pmb) {
  566. phba->link_state = LPFC_HBA_ERROR;
  567. return -ENOMEM;
  568. }
  569. lpfc_config_async(phba, pmb, LPFC_ELS_RING);
  570. pmb->mbox_cmpl = lpfc_config_async_cmpl;
  571. pmb->vport = phba->pport;
  572. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  573. if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
  574. lpfc_printf_log(phba,
  575. KERN_ERR,
  576. LOG_INIT,
  577. "0456 Adapter failed to issue "
  578. "ASYNCEVT_ENABLE mbox status x%x\n",
  579. rc);
  580. mempool_free(pmb, phba->mbox_mem_pool);
  581. }
  582. /* Get Option rom version */
  583. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  584. if (!pmb) {
  585. phba->link_state = LPFC_HBA_ERROR;
  586. return -ENOMEM;
  587. }
  588. lpfc_dump_wakeup_param(phba, pmb);
  589. pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
  590. pmb->vport = phba->pport;
  591. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  592. if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
  593. lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
  594. "to get Option ROM version status x%x\n", rc);
  595. mempool_free(pmb, phba->mbox_mem_pool);
  596. }
  597. return 0;
  598. }
  599. /**
  600. * lpfc_hba_init_link - Initialize the FC link
  601. * @phba: pointer to lpfc hba data structure.
  602. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
  603. *
  604. * This routine will issue the INIT_LINK mailbox command call.
  605. * It is available to other drivers through the lpfc_hba data
  606. * structure for use as a delayed link up mechanism with the
  607. * module parameter lpfc_suppress_link_up.
  608. *
  609. * Return code
  610. * 0 - success
  611. * Any other value - error
  612. **/
  613. static int
  614. lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
  615. {
  616. return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
  617. }
  618. /**
  619. * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
  620. * @phba: pointer to lpfc hba data structure.
  621. * @fc_topology: desired fc topology.
  622. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
  623. *
  624. * This routine will issue the INIT_LINK mailbox command call.
  625. * It is available to other drivers through the lpfc_hba data
  626. * structure for use as a delayed link up mechanism with the
  627. * module parameter lpfc_suppress_link_up.
  628. *
  629. * Return code
  630. * 0 - success
  631. * Any other value - error
  632. **/
  633. int
  634. lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
  635. uint32_t flag)
  636. {
  637. struct lpfc_vport *vport = phba->pport;
  638. LPFC_MBOXQ_t *pmb;
  639. MAILBOX_t *mb;
  640. int rc;
  641. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  642. if (!pmb) {
  643. phba->link_state = LPFC_HBA_ERROR;
  644. return -ENOMEM;
  645. }
  646. mb = &pmb->u.mb;
  647. pmb->vport = vport;
  648. if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
  649. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
  650. !(phba->lmt & LMT_1Gb)) ||
  651. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
  652. !(phba->lmt & LMT_2Gb)) ||
  653. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
  654. !(phba->lmt & LMT_4Gb)) ||
  655. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
  656. !(phba->lmt & LMT_8Gb)) ||
  657. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
  658. !(phba->lmt & LMT_10Gb)) ||
  659. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
  660. !(phba->lmt & LMT_16Gb)) ||
  661. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
  662. !(phba->lmt & LMT_32Gb)) ||
  663. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
  664. !(phba->lmt & LMT_64Gb))) {
  665. /* Reset link speed to auto */
  666. lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
  667. "1302 Invalid speed for this board:%d "
  668. "Reset link speed to auto.\n",
  669. phba->cfg_link_speed);
  670. phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
  671. }
  672. lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
  673. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  674. if (phba->sli_rev < LPFC_SLI_REV4)
  675. lpfc_set_loopback_flag(phba);
  676. rc = lpfc_sli_issue_mbox(phba, pmb, flag);
  677. if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
  678. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  679. "0498 Adapter failed to init, mbxCmd x%x "
  680. "INIT_LINK, mbxStatus x%x\n",
  681. mb->mbxCommand, mb->mbxStatus);
  682. if (phba->sli_rev <= LPFC_SLI_REV3) {
  683. /* Clear all interrupt enable conditions */
  684. writel(0, phba->HCregaddr);
  685. readl(phba->HCregaddr); /* flush */
  686. /* Clear all pending interrupts */
  687. writel(0xffffffff, phba->HAregaddr);
  688. readl(phba->HAregaddr); /* flush */
  689. }
  690. phba->link_state = LPFC_HBA_ERROR;
  691. if (rc != MBX_BUSY || flag == MBX_POLL)
  692. mempool_free(pmb, phba->mbox_mem_pool);
  693. return -EIO;
  694. }
  695. phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
  696. if (flag == MBX_POLL)
  697. mempool_free(pmb, phba->mbox_mem_pool);
  698. return 0;
  699. }
  700. /**
  701. * lpfc_hba_down_link - this routine downs the FC link
  702. * @phba: pointer to lpfc hba data structure.
  703. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
  704. *
  705. * This routine will issue the DOWN_LINK mailbox command call.
  706. * It is available to other drivers through the lpfc_hba data
  707. * structure for use to stop the link.
  708. *
  709. * Return code
  710. * 0 - success
  711. * Any other value - error
  712. **/
  713. static int
  714. lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
  715. {
  716. LPFC_MBOXQ_t *pmb;
  717. int rc;
  718. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  719. if (!pmb) {
  720. phba->link_state = LPFC_HBA_ERROR;
  721. return -ENOMEM;
  722. }
  723. lpfc_printf_log(phba,
  724. KERN_ERR, LOG_INIT,
  725. "0491 Adapter Link is disabled.\n");
  726. lpfc_down_link(phba, pmb);
  727. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  728. rc = lpfc_sli_issue_mbox(phba, pmb, flag);
  729. if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
  730. lpfc_printf_log(phba,
  731. KERN_ERR, LOG_INIT,
  732. "2522 Adapter failed to issue DOWN_LINK"
  733. " mbox command rc 0x%x\n", rc);
  734. mempool_free(pmb, phba->mbox_mem_pool);
  735. return -EIO;
  736. }
  737. if (flag == MBX_POLL)
  738. mempool_free(pmb, phba->mbox_mem_pool);
  739. return 0;
  740. }
  741. /**
  742. * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
  743. * @phba: pointer to lpfc HBA data structure.
  744. *
  745. * This routine will do LPFC uninitialization before the HBA is reset when
  746. * bringing down the SLI Layer.
  747. *
  748. * Return codes
  749. * 0 - success.
  750. * Any other value - error.
  751. **/
  752. int
  753. lpfc_hba_down_prep(struct lpfc_hba *phba)
  754. {
  755. struct lpfc_vport **vports;
  756. int i;
  757. if (phba->sli_rev <= LPFC_SLI_REV3) {
  758. /* Disable interrupts */
  759. writel(0, phba->HCregaddr);
  760. readl(phba->HCregaddr); /* flush */
  761. }
  762. if (phba->pport->load_flag & FC_UNLOADING)
  763. lpfc_cleanup_discovery_resources(phba->pport);
  764. else {
  765. vports = lpfc_create_vport_work_array(phba);
  766. if (vports != NULL)
  767. for (i = 0; i <= phba->max_vports &&
  768. vports[i] != NULL; i++)
  769. lpfc_cleanup_discovery_resources(vports[i]);
  770. lpfc_destroy_vport_work_array(phba, vports);
  771. }
  772. return 0;
  773. }
  774. /**
  775. * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
  776. * rspiocb which got deferred
  777. *
  778. * @phba: pointer to lpfc HBA data structure.
  779. *
  780. * This routine will cleanup completed slow path events after HBA is reset
  781. * when bringing down the SLI Layer.
  782. *
  783. *
  784. * Return codes
  785. * void.
  786. **/
  787. static void
  788. lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
  789. {
  790. struct lpfc_iocbq *rspiocbq;
  791. struct hbq_dmabuf *dmabuf;
  792. struct lpfc_cq_event *cq_event;
  793. spin_lock_irq(&phba->hbalock);
  794. phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
  795. spin_unlock_irq(&phba->hbalock);
  796. while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
  797. /* Get the response iocb from the head of work queue */
  798. spin_lock_irq(&phba->hbalock);
  799. list_remove_head(&phba->sli4_hba.sp_queue_event,
  800. cq_event, struct lpfc_cq_event, list);
  801. spin_unlock_irq(&phba->hbalock);
  802. switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
  803. case CQE_CODE_COMPL_WQE:
  804. rspiocbq = container_of(cq_event, struct lpfc_iocbq,
  805. cq_event);
  806. lpfc_sli_release_iocbq(phba, rspiocbq);
  807. break;
  808. case CQE_CODE_RECEIVE:
  809. case CQE_CODE_RECEIVE_V1:
  810. dmabuf = container_of(cq_event, struct hbq_dmabuf,
  811. cq_event);
  812. lpfc_in_buf_free(phba, &dmabuf->dbuf);
  813. }
  814. }
  815. }
  816. /**
  817. * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
  818. * @phba: pointer to lpfc HBA data structure.
  819. *
  820. * This routine will cleanup posted ELS buffers after the HBA is reset
  821. * when bringing down the SLI Layer.
  822. *
  823. *
  824. * Return codes
  825. * void.
  826. **/
  827. static void
  828. lpfc_hba_free_post_buf(struct lpfc_hba *phba)
  829. {
  830. struct lpfc_sli *psli = &phba->sli;
  831. struct lpfc_sli_ring *pring;
  832. struct lpfc_dmabuf *mp, *next_mp;
  833. LIST_HEAD(buflist);
  834. int count;
  835. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
  836. lpfc_sli_hbqbuf_free_all(phba);
  837. else {
  838. /* Cleanup preposted buffers on the ELS ring */
  839. pring = &psli->sli3_ring[LPFC_ELS_RING];
  840. spin_lock_irq(&phba->hbalock);
  841. list_splice_init(&pring->postbufq, &buflist);
  842. spin_unlock_irq(&phba->hbalock);
  843. count = 0;
  844. list_for_each_entry_safe(mp, next_mp, &buflist, list) {
  845. list_del(&mp->list);
  846. count++;
  847. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  848. kfree(mp);
  849. }
  850. spin_lock_irq(&phba->hbalock);
  851. pring->postbufq_cnt -= count;
  852. spin_unlock_irq(&phba->hbalock);
  853. }
  854. }
  855. /**
  856. * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
  857. * @phba: pointer to lpfc HBA data structure.
  858. *
  859. * This routine will cleanup the txcmplq after the HBA is reset when bringing
  860. * down the SLI Layer.
  861. *
  862. * Return codes
  863. * void
  864. **/
  865. static void
  866. lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
  867. {
  868. struct lpfc_sli *psli = &phba->sli;
  869. struct lpfc_queue *qp = NULL;
  870. struct lpfc_sli_ring *pring;
  871. LIST_HEAD(completions);
  872. int i;
  873. struct lpfc_iocbq *piocb, *next_iocb;
  874. if (phba->sli_rev != LPFC_SLI_REV4) {
  875. for (i = 0; i < psli->num_rings; i++) {
  876. pring = &psli->sli3_ring[i];
  877. spin_lock_irq(&phba->hbalock);
  878. /* At this point in time the HBA is either reset or DOA
  879. * Nothing should be on txcmplq as it will
  880. * NEVER complete.
  881. */
  882. list_splice_init(&pring->txcmplq, &completions);
  883. pring->txcmplq_cnt = 0;
  884. spin_unlock_irq(&phba->hbalock);
  885. lpfc_sli_abort_iocb_ring(phba, pring);
  886. }
  887. /* Cancel all the IOCBs from the completions list */
  888. lpfc_sli_cancel_iocbs(phba, &completions,
  889. IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
  890. return;
  891. }
  892. list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
  893. pring = qp->pring;
  894. if (!pring)
  895. continue;
  896. spin_lock_irq(&pring->ring_lock);
  897. list_for_each_entry_safe(piocb, next_iocb,
  898. &pring->txcmplq, list)
  899. piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
  900. list_splice_init(&pring->txcmplq, &completions);
  901. pring->txcmplq_cnt = 0;
  902. spin_unlock_irq(&pring->ring_lock);
  903. lpfc_sli_abort_iocb_ring(phba, pring);
  904. }
  905. /* Cancel all the IOCBs from the completions list */
  906. lpfc_sli_cancel_iocbs(phba, &completions,
  907. IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
  908. }
  909. /**
  910. * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
  911. int i;
  912. * @phba: pointer to lpfc HBA data structure.
  913. *
  914. * This routine will do uninitialization after the HBA is reset when bring
  915. * down the SLI Layer.
  916. *
  917. * Return codes
  918. * 0 - success.
  919. * Any other value - error.
  920. **/
  921. static int
  922. lpfc_hba_down_post_s3(struct lpfc_hba *phba)
  923. {
  924. lpfc_hba_free_post_buf(phba);
  925. lpfc_hba_clean_txcmplq(phba);
  926. return 0;
  927. }
  928. /**
  929. * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
  930. * @phba: pointer to lpfc HBA data structure.
  931. *
  932. * This routine will do uninitialization after the HBA is reset when bring
  933. * down the SLI Layer.
  934. *
  935. * Return codes
  936. * 0 - success.
  937. * Any other value - error.
  938. **/
  939. static int
  940. lpfc_hba_down_post_s4(struct lpfc_hba *phba)
  941. {
  942. struct lpfc_scsi_buf *psb, *psb_next;
  943. struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
  944. LIST_HEAD(aborts);
  945. LIST_HEAD(nvme_aborts);
  946. LIST_HEAD(nvmet_aborts);
  947. unsigned long iflag = 0;
  948. struct lpfc_sglq *sglq_entry = NULL;
  949. int cnt;
  950. lpfc_sli_hbqbuf_free_all(phba);
  951. lpfc_hba_clean_txcmplq(phba);
  952. /* At this point in time the HBA is either reset or DOA. Either
  953. * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
  954. * on the lpfc_els_sgl_list so that it can either be freed if the
  955. * driver is unloading or reposted if the driver is restarting
  956. * the port.
  957. */
  958. spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
  959. /* scsl_buf_list */
  960. /* sgl_list_lock required because worker thread uses this
  961. * list.
  962. */
  963. spin_lock(&phba->sli4_hba.sgl_list_lock);
  964. list_for_each_entry(sglq_entry,
  965. &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
  966. sglq_entry->state = SGL_FREED;
  967. list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
  968. &phba->sli4_hba.lpfc_els_sgl_list);
  969. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  970. /* abts_scsi_buf_list_lock required because worker thread uses this
  971. * list.
  972. */
  973. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
  974. spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
  975. list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
  976. &aborts);
  977. spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
  978. }
  979. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  980. spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  981. list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
  982. &nvme_aborts);
  983. list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
  984. &nvmet_aborts);
  985. spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  986. }
  987. spin_unlock_irq(&phba->hbalock);
  988. list_for_each_entry_safe(psb, psb_next, &aborts, list) {
  989. psb->pCmd = NULL;
  990. psb->status = IOSTAT_SUCCESS;
  991. }
  992. spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
  993. list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
  994. spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
  995. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  996. cnt = 0;
  997. list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
  998. psb->pCmd = NULL;
  999. psb->status = IOSTAT_SUCCESS;
  1000. cnt++;
  1001. }
  1002. spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
  1003. phba->put_nvme_bufs += cnt;
  1004. list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
  1005. spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
  1006. list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
  1007. ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
  1008. lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
  1009. }
  1010. }
  1011. lpfc_sli4_free_sp_events(phba);
  1012. return 0;
  1013. }
  1014. /**
  1015. * lpfc_hba_down_post - Wrapper func for hba down post routine
  1016. * @phba: pointer to lpfc HBA data structure.
  1017. *
  1018. * This routine wraps the actual SLI3 or SLI4 routine for performing
  1019. * uninitialization after the HBA is reset when bring down the SLI Layer.
  1020. *
  1021. * Return codes
  1022. * 0 - success.
  1023. * Any other value - error.
  1024. **/
  1025. int
  1026. lpfc_hba_down_post(struct lpfc_hba *phba)
  1027. {
  1028. return (*phba->lpfc_hba_down_post)(phba);
  1029. }
  1030. /**
  1031. * lpfc_hb_timeout - The HBA-timer timeout handler
  1032. * @ptr: unsigned long holds the pointer to lpfc hba data structure.
  1033. *
  1034. * This is the HBA-timer timeout handler registered to the lpfc driver. When
  1035. * this timer fires, a HBA timeout event shall be posted to the lpfc driver
  1036. * work-port-events bitmap and the worker thread is notified. This timeout
  1037. * event will be used by the worker thread to invoke the actual timeout
  1038. * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
  1039. * be performed in the timeout handler and the HBA timeout event bit shall
  1040. * be cleared by the worker thread after it has taken the event bitmap out.
  1041. **/
  1042. static void
  1043. lpfc_hb_timeout(struct timer_list *t)
  1044. {
  1045. struct lpfc_hba *phba;
  1046. uint32_t tmo_posted;
  1047. unsigned long iflag;
  1048. phba = from_timer(phba, t, hb_tmofunc);
  1049. /* Check for heart beat timeout conditions */
  1050. spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
  1051. tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
  1052. if (!tmo_posted)
  1053. phba->pport->work_port_events |= WORKER_HB_TMO;
  1054. spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
  1055. /* Tell the worker thread there is work to do */
  1056. if (!tmo_posted)
  1057. lpfc_worker_wake_up(phba);
  1058. return;
  1059. }
  1060. /**
  1061. * lpfc_rrq_timeout - The RRQ-timer timeout handler
  1062. * @ptr: unsigned long holds the pointer to lpfc hba data structure.
  1063. *
  1064. * This is the RRQ-timer timeout handler registered to the lpfc driver. When
  1065. * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
  1066. * work-port-events bitmap and the worker thread is notified. This timeout
  1067. * event will be used by the worker thread to invoke the actual timeout
  1068. * handler routine, lpfc_rrq_handler. Any periodical operations will
  1069. * be performed in the timeout handler and the RRQ timeout event bit shall
  1070. * be cleared by the worker thread after it has taken the event bitmap out.
  1071. **/
  1072. static void
  1073. lpfc_rrq_timeout(struct timer_list *t)
  1074. {
  1075. struct lpfc_hba *phba;
  1076. unsigned long iflag;
  1077. phba = from_timer(phba, t, rrq_tmr);
  1078. spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
  1079. if (!(phba->pport->load_flag & FC_UNLOADING))
  1080. phba->hba_flag |= HBA_RRQ_ACTIVE;
  1081. else
  1082. phba->hba_flag &= ~HBA_RRQ_ACTIVE;
  1083. spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
  1084. if (!(phba->pport->load_flag & FC_UNLOADING))
  1085. lpfc_worker_wake_up(phba);
  1086. }
  1087. /**
  1088. * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
  1089. * @phba: pointer to lpfc hba data structure.
  1090. * @pmboxq: pointer to the driver internal queue element for mailbox command.
  1091. *
  1092. * This is the callback function to the lpfc heart-beat mailbox command.
  1093. * If configured, the lpfc driver issues the heart-beat mailbox command to
  1094. * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
  1095. * heart-beat mailbox command is issued, the driver shall set up heart-beat
  1096. * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
  1097. * heart-beat outstanding state. Once the mailbox command comes back and
  1098. * no error conditions detected, the heart-beat mailbox command timer is
  1099. * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
  1100. * state is cleared for the next heart-beat. If the timer expired with the
  1101. * heart-beat outstanding state set, the driver will put the HBA offline.
  1102. **/
  1103. static void
  1104. lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
  1105. {
  1106. unsigned long drvr_flag;
  1107. spin_lock_irqsave(&phba->hbalock, drvr_flag);
  1108. phba->hb_outstanding = 0;
  1109. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  1110. /* Check and reset heart-beat timer is necessary */
  1111. mempool_free(pmboxq, phba->mbox_mem_pool);
  1112. if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
  1113. !(phba->link_state == LPFC_HBA_ERROR) &&
  1114. !(phba->pport->load_flag & FC_UNLOADING))
  1115. mod_timer(&phba->hb_tmofunc,
  1116. jiffies +
  1117. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  1118. return;
  1119. }
  1120. /**
  1121. * lpfc_hb_timeout_handler - The HBA-timer timeout handler
  1122. * @phba: pointer to lpfc hba data structure.
  1123. *
  1124. * This is the actual HBA-timer timeout handler to be invoked by the worker
  1125. * thread whenever the HBA timer fired and HBA-timeout event posted. This
  1126. * handler performs any periodic operations needed for the device. If such
  1127. * periodic event has already been attended to either in the interrupt handler
  1128. * or by processing slow-ring or fast-ring events within the HBA-timer
  1129. * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
  1130. * the timer for the next timeout period. If lpfc heart-beat mailbox command
  1131. * is configured and there is no heart-beat mailbox command outstanding, a
  1132. * heart-beat mailbox is issued and timer set properly. Otherwise, if there
  1133. * has been a heart-beat mailbox command outstanding, the HBA shall be put
  1134. * to offline.
  1135. **/
  1136. void
  1137. lpfc_hb_timeout_handler(struct lpfc_hba *phba)
  1138. {
  1139. struct lpfc_vport **vports;
  1140. LPFC_MBOXQ_t *pmboxq;
  1141. struct lpfc_dmabuf *buf_ptr;
  1142. int retval, i;
  1143. struct lpfc_sli *psli = &phba->sli;
  1144. LIST_HEAD(completions);
  1145. struct lpfc_queue *qp;
  1146. unsigned long time_elapsed;
  1147. uint32_t tick_cqe, max_cqe, val;
  1148. uint64_t tot, data1, data2, data3;
  1149. struct lpfc_nvmet_tgtport *tgtp;
  1150. struct lpfc_register reg_data;
  1151. struct nvme_fc_local_port *localport;
  1152. struct lpfc_nvme_lport *lport;
  1153. struct lpfc_nvme_ctrl_stat *cstat;
  1154. void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
  1155. vports = lpfc_create_vport_work_array(phba);
  1156. if (vports != NULL)
  1157. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  1158. lpfc_rcv_seq_check_edtov(vports[i]);
  1159. lpfc_fdmi_num_disc_check(vports[i]);
  1160. }
  1161. lpfc_destroy_vport_work_array(phba, vports);
  1162. if ((phba->link_state == LPFC_HBA_ERROR) ||
  1163. (phba->pport->load_flag & FC_UNLOADING) ||
  1164. (phba->pport->fc_flag & FC_OFFLINE_MODE))
  1165. return;
  1166. if (phba->cfg_auto_imax) {
  1167. if (!phba->last_eqdelay_time) {
  1168. phba->last_eqdelay_time = jiffies;
  1169. goto skip_eqdelay;
  1170. }
  1171. time_elapsed = jiffies - phba->last_eqdelay_time;
  1172. phba->last_eqdelay_time = jiffies;
  1173. tot = 0xffff;
  1174. /* Check outstanding IO count */
  1175. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  1176. if (phba->nvmet_support) {
  1177. tgtp = phba->targetport->private;
  1178. /* Calculate outstanding IOs */
  1179. tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
  1180. tot += atomic_read(&tgtp->xmt_fcp_release);
  1181. tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
  1182. } else {
  1183. localport = phba->pport->localport;
  1184. if (!localport || !localport->private)
  1185. goto skip_eqdelay;
  1186. lport = (struct lpfc_nvme_lport *)
  1187. localport->private;
  1188. tot = 0;
  1189. for (i = 0;
  1190. i < phba->cfg_nvme_io_channel; i++) {
  1191. cstat = &lport->cstat[i];
  1192. data1 = atomic_read(
  1193. &cstat->fc4NvmeInputRequests);
  1194. data2 = atomic_read(
  1195. &cstat->fc4NvmeOutputRequests);
  1196. data3 = atomic_read(
  1197. &cstat->fc4NvmeControlRequests);
  1198. tot += (data1 + data2 + data3);
  1199. tot -= atomic_read(
  1200. &cstat->fc4NvmeIoCmpls);
  1201. }
  1202. }
  1203. }
  1204. /* Interrupts per sec per EQ */
  1205. val = phba->cfg_fcp_imax / phba->io_channel_irqs;
  1206. tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
  1207. /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
  1208. max_cqe = time_elapsed * tick_cqe;
  1209. for (i = 0; i < phba->io_channel_irqs; i++) {
  1210. /* Fast-path EQ */
  1211. qp = phba->sli4_hba.hba_eq[i];
  1212. if (!qp)
  1213. continue;
  1214. /* Use no EQ delay if we don't have many outstanding
  1215. * IOs, or if we are only processing 1 CQE/ISR or less.
  1216. * Otherwise, assume we can process up to lpfc_fcp_imax
  1217. * interrupts per HBA.
  1218. */
  1219. if (tot < LPFC_NODELAY_MAX_IO ||
  1220. qp->EQ_cqe_cnt <= max_cqe)
  1221. val = 0;
  1222. else
  1223. val = phba->cfg_fcp_imax;
  1224. if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
  1225. /* Use EQ Delay Register method */
  1226. /* Convert for EQ Delay register */
  1227. if (val) {
  1228. /* First, interrupts per sec per EQ */
  1229. val = phba->cfg_fcp_imax /
  1230. phba->io_channel_irqs;
  1231. /* us delay between each interrupt */
  1232. val = LPFC_SEC_TO_USEC / val;
  1233. }
  1234. if (val != qp->q_mode) {
  1235. reg_data.word0 = 0;
  1236. bf_set(lpfc_sliport_eqdelay_id,
  1237. &reg_data, qp->queue_id);
  1238. bf_set(lpfc_sliport_eqdelay_delay,
  1239. &reg_data, val);
  1240. writel(reg_data.word0, eqdreg);
  1241. }
  1242. } else {
  1243. /* Use mbox command method */
  1244. if (val != qp->q_mode)
  1245. lpfc_modify_hba_eq_delay(phba, i,
  1246. 1, val);
  1247. }
  1248. /*
  1249. * val is cfg_fcp_imax or 0 for mbox delay or us delay
  1250. * between interrupts for EQDR.
  1251. */
  1252. qp->q_mode = val;
  1253. qp->EQ_cqe_cnt = 0;
  1254. }
  1255. }
  1256. skip_eqdelay:
  1257. spin_lock_irq(&phba->pport->work_port_lock);
  1258. if (time_after(phba->last_completion_time +
  1259. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
  1260. jiffies)) {
  1261. spin_unlock_irq(&phba->pport->work_port_lock);
  1262. if (!phba->hb_outstanding)
  1263. mod_timer(&phba->hb_tmofunc,
  1264. jiffies +
  1265. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  1266. else
  1267. mod_timer(&phba->hb_tmofunc,
  1268. jiffies +
  1269. msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
  1270. return;
  1271. }
  1272. spin_unlock_irq(&phba->pport->work_port_lock);
  1273. if (phba->elsbuf_cnt &&
  1274. (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
  1275. spin_lock_irq(&phba->hbalock);
  1276. list_splice_init(&phba->elsbuf, &completions);
  1277. phba->elsbuf_cnt = 0;
  1278. phba->elsbuf_prev_cnt = 0;
  1279. spin_unlock_irq(&phba->hbalock);
  1280. while (!list_empty(&completions)) {
  1281. list_remove_head(&completions, buf_ptr,
  1282. struct lpfc_dmabuf, list);
  1283. lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
  1284. kfree(buf_ptr);
  1285. }
  1286. }
  1287. phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
  1288. /* If there is no heart beat outstanding, issue a heartbeat command */
  1289. if (phba->cfg_enable_hba_heartbeat) {
  1290. if (!phba->hb_outstanding) {
  1291. if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
  1292. (list_empty(&psli->mboxq))) {
  1293. pmboxq = mempool_alloc(phba->mbox_mem_pool,
  1294. GFP_KERNEL);
  1295. if (!pmboxq) {
  1296. mod_timer(&phba->hb_tmofunc,
  1297. jiffies +
  1298. msecs_to_jiffies(1000 *
  1299. LPFC_HB_MBOX_INTERVAL));
  1300. return;
  1301. }
  1302. lpfc_heart_beat(phba, pmboxq);
  1303. pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
  1304. pmboxq->vport = phba->pport;
  1305. retval = lpfc_sli_issue_mbox(phba, pmboxq,
  1306. MBX_NOWAIT);
  1307. if (retval != MBX_BUSY &&
  1308. retval != MBX_SUCCESS) {
  1309. mempool_free(pmboxq,
  1310. phba->mbox_mem_pool);
  1311. mod_timer(&phba->hb_tmofunc,
  1312. jiffies +
  1313. msecs_to_jiffies(1000 *
  1314. LPFC_HB_MBOX_INTERVAL));
  1315. return;
  1316. }
  1317. phba->skipped_hb = 0;
  1318. phba->hb_outstanding = 1;
  1319. } else if (time_before_eq(phba->last_completion_time,
  1320. phba->skipped_hb)) {
  1321. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  1322. "2857 Last completion time not "
  1323. " updated in %d ms\n",
  1324. jiffies_to_msecs(jiffies
  1325. - phba->last_completion_time));
  1326. } else
  1327. phba->skipped_hb = jiffies;
  1328. mod_timer(&phba->hb_tmofunc,
  1329. jiffies +
  1330. msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
  1331. return;
  1332. } else {
  1333. /*
  1334. * If heart beat timeout called with hb_outstanding set
  1335. * we need to give the hb mailbox cmd a chance to
  1336. * complete or TMO.
  1337. */
  1338. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  1339. "0459 Adapter heartbeat still out"
  1340. "standing:last compl time was %d ms.\n",
  1341. jiffies_to_msecs(jiffies
  1342. - phba->last_completion_time));
  1343. mod_timer(&phba->hb_tmofunc,
  1344. jiffies +
  1345. msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
  1346. }
  1347. } else {
  1348. mod_timer(&phba->hb_tmofunc,
  1349. jiffies +
  1350. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  1351. }
  1352. }
  1353. /**
  1354. * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
  1355. * @phba: pointer to lpfc hba data structure.
  1356. *
  1357. * This routine is called to bring the HBA offline when HBA hardware error
  1358. * other than Port Error 6 has been detected.
  1359. **/
  1360. static void
  1361. lpfc_offline_eratt(struct lpfc_hba *phba)
  1362. {
  1363. struct lpfc_sli *psli = &phba->sli;
  1364. spin_lock_irq(&phba->hbalock);
  1365. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  1366. spin_unlock_irq(&phba->hbalock);
  1367. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  1368. lpfc_offline(phba);
  1369. lpfc_reset_barrier(phba);
  1370. spin_lock_irq(&phba->hbalock);
  1371. lpfc_sli_brdreset(phba);
  1372. spin_unlock_irq(&phba->hbalock);
  1373. lpfc_hba_down_post(phba);
  1374. lpfc_sli_brdready(phba, HS_MBRDY);
  1375. lpfc_unblock_mgmt_io(phba);
  1376. phba->link_state = LPFC_HBA_ERROR;
  1377. return;
  1378. }
  1379. /**
  1380. * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
  1381. * @phba: pointer to lpfc hba data structure.
  1382. *
  1383. * This routine is called to bring a SLI4 HBA offline when HBA hardware error
  1384. * other than Port Error 6 has been detected.
  1385. **/
  1386. void
  1387. lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
  1388. {
  1389. spin_lock_irq(&phba->hbalock);
  1390. phba->link_state = LPFC_HBA_ERROR;
  1391. spin_unlock_irq(&phba->hbalock);
  1392. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  1393. lpfc_offline(phba);
  1394. lpfc_hba_down_post(phba);
  1395. lpfc_unblock_mgmt_io(phba);
  1396. }
  1397. /**
  1398. * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
  1399. * @phba: pointer to lpfc hba data structure.
  1400. *
  1401. * This routine is invoked to handle the deferred HBA hardware error
  1402. * conditions. This type of error is indicated by HBA by setting ER1
  1403. * and another ER bit in the host status register. The driver will
  1404. * wait until the ER1 bit clears before handling the error condition.
  1405. **/
  1406. static void
  1407. lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
  1408. {
  1409. uint32_t old_host_status = phba->work_hs;
  1410. struct lpfc_sli *psli = &phba->sli;
  1411. /* If the pci channel is offline, ignore possible errors,
  1412. * since we cannot communicate with the pci card anyway.
  1413. */
  1414. if (pci_channel_offline(phba->pcidev)) {
  1415. spin_lock_irq(&phba->hbalock);
  1416. phba->hba_flag &= ~DEFER_ERATT;
  1417. spin_unlock_irq(&phba->hbalock);
  1418. return;
  1419. }
  1420. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1421. "0479 Deferred Adapter Hardware Error "
  1422. "Data: x%x x%x x%x\n",
  1423. phba->work_hs,
  1424. phba->work_status[0], phba->work_status[1]);
  1425. spin_lock_irq(&phba->hbalock);
  1426. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  1427. spin_unlock_irq(&phba->hbalock);
  1428. /*
  1429. * Firmware stops when it triggred erratt. That could cause the I/Os
  1430. * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
  1431. * SCSI layer retry it after re-establishing link.
  1432. */
  1433. lpfc_sli_abort_fcp_rings(phba);
  1434. /*
  1435. * There was a firmware error. Take the hba offline and then
  1436. * attempt to restart it.
  1437. */
  1438. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  1439. lpfc_offline(phba);
  1440. /* Wait for the ER1 bit to clear.*/
  1441. while (phba->work_hs & HS_FFER1) {
  1442. msleep(100);
  1443. if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
  1444. phba->work_hs = UNPLUG_ERR ;
  1445. break;
  1446. }
  1447. /* If driver is unloading let the worker thread continue */
  1448. if (phba->pport->load_flag & FC_UNLOADING) {
  1449. phba->work_hs = 0;
  1450. break;
  1451. }
  1452. }
  1453. /*
  1454. * This is to ptrotect against a race condition in which
  1455. * first write to the host attention register clear the
  1456. * host status register.
  1457. */
  1458. if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
  1459. phba->work_hs = old_host_status & ~HS_FFER1;
  1460. spin_lock_irq(&phba->hbalock);
  1461. phba->hba_flag &= ~DEFER_ERATT;
  1462. spin_unlock_irq(&phba->hbalock);
  1463. phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
  1464. phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
  1465. }
  1466. static void
  1467. lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
  1468. {
  1469. struct lpfc_board_event_header board_event;
  1470. struct Scsi_Host *shost;
  1471. board_event.event_type = FC_REG_BOARD_EVENT;
  1472. board_event.subcategory = LPFC_EVENT_PORTINTERR;
  1473. shost = lpfc_shost_from_vport(phba->pport);
  1474. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1475. sizeof(board_event),
  1476. (char *) &board_event,
  1477. LPFC_NL_VENDOR_ID);
  1478. }
  1479. /**
  1480. * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
  1481. * @phba: pointer to lpfc hba data structure.
  1482. *
  1483. * This routine is invoked to handle the following HBA hardware error
  1484. * conditions:
  1485. * 1 - HBA error attention interrupt
  1486. * 2 - DMA ring index out of range
  1487. * 3 - Mailbox command came back as unknown
  1488. **/
  1489. static void
  1490. lpfc_handle_eratt_s3(struct lpfc_hba *phba)
  1491. {
  1492. struct lpfc_vport *vport = phba->pport;
  1493. struct lpfc_sli *psli = &phba->sli;
  1494. uint32_t event_data;
  1495. unsigned long temperature;
  1496. struct temp_event temp_event_data;
  1497. struct Scsi_Host *shost;
  1498. /* If the pci channel is offline, ignore possible errors,
  1499. * since we cannot communicate with the pci card anyway.
  1500. */
  1501. if (pci_channel_offline(phba->pcidev)) {
  1502. spin_lock_irq(&phba->hbalock);
  1503. phba->hba_flag &= ~DEFER_ERATT;
  1504. spin_unlock_irq(&phba->hbalock);
  1505. return;
  1506. }
  1507. /* If resets are disabled then leave the HBA alone and return */
  1508. if (!phba->cfg_enable_hba_reset)
  1509. return;
  1510. /* Send an internal error event to mgmt application */
  1511. lpfc_board_errevt_to_mgmt(phba);
  1512. if (phba->hba_flag & DEFER_ERATT)
  1513. lpfc_handle_deferred_eratt(phba);
  1514. if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
  1515. if (phba->work_hs & HS_FFER6)
  1516. /* Re-establishing Link */
  1517. lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
  1518. "1301 Re-establishing Link "
  1519. "Data: x%x x%x x%x\n",
  1520. phba->work_hs, phba->work_status[0],
  1521. phba->work_status[1]);
  1522. if (phba->work_hs & HS_FFER8)
  1523. /* Device Zeroization */
  1524. lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
  1525. "2861 Host Authentication device "
  1526. "zeroization Data:x%x x%x x%x\n",
  1527. phba->work_hs, phba->work_status[0],
  1528. phba->work_status[1]);
  1529. spin_lock_irq(&phba->hbalock);
  1530. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  1531. spin_unlock_irq(&phba->hbalock);
  1532. /*
  1533. * Firmware stops when it triggled erratt with HS_FFER6.
  1534. * That could cause the I/Os dropped by the firmware.
  1535. * Error iocb (I/O) on txcmplq and let the SCSI layer
  1536. * retry it after re-establishing link.
  1537. */
  1538. lpfc_sli_abort_fcp_rings(phba);
  1539. /*
  1540. * There was a firmware error. Take the hba offline and then
  1541. * attempt to restart it.
  1542. */
  1543. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  1544. lpfc_offline(phba);
  1545. lpfc_sli_brdrestart(phba);
  1546. if (lpfc_online(phba) == 0) { /* Initialize the HBA */
  1547. lpfc_unblock_mgmt_io(phba);
  1548. return;
  1549. }
  1550. lpfc_unblock_mgmt_io(phba);
  1551. } else if (phba->work_hs & HS_CRIT_TEMP) {
  1552. temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
  1553. temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  1554. temp_event_data.event_code = LPFC_CRIT_TEMP;
  1555. temp_event_data.data = (uint32_t)temperature;
  1556. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1557. "0406 Adapter maximum temperature exceeded "
  1558. "(%ld), taking this port offline "
  1559. "Data: x%x x%x x%x\n",
  1560. temperature, phba->work_hs,
  1561. phba->work_status[0], phba->work_status[1]);
  1562. shost = lpfc_shost_from_vport(phba->pport);
  1563. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1564. sizeof(temp_event_data),
  1565. (char *) &temp_event_data,
  1566. SCSI_NL_VID_TYPE_PCI
  1567. | PCI_VENDOR_ID_EMULEX);
  1568. spin_lock_irq(&phba->hbalock);
  1569. phba->over_temp_state = HBA_OVER_TEMP;
  1570. spin_unlock_irq(&phba->hbalock);
  1571. lpfc_offline_eratt(phba);
  1572. } else {
  1573. /* The if clause above forces this code path when the status
  1574. * failure is a value other than FFER6. Do not call the offline
  1575. * twice. This is the adapter hardware error path.
  1576. */
  1577. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1578. "0457 Adapter Hardware Error "
  1579. "Data: x%x x%x x%x\n",
  1580. phba->work_hs,
  1581. phba->work_status[0], phba->work_status[1]);
  1582. event_data = FC_REG_DUMP_EVENT;
  1583. shost = lpfc_shost_from_vport(vport);
  1584. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1585. sizeof(event_data), (char *) &event_data,
  1586. SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
  1587. lpfc_offline_eratt(phba);
  1588. }
  1589. return;
  1590. }
  1591. /**
  1592. * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
  1593. * @phba: pointer to lpfc hba data structure.
  1594. * @mbx_action: flag for mailbox shutdown action.
  1595. *
  1596. * This routine is invoked to perform an SLI4 port PCI function reset in
  1597. * response to port status register polling attention. It waits for port
  1598. * status register (ERR, RDY, RN) bits before proceeding with function reset.
  1599. * During this process, interrupt vectors are freed and later requested
  1600. * for handling possible port resource change.
  1601. **/
  1602. static int
  1603. lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
  1604. bool en_rn_msg)
  1605. {
  1606. int rc;
  1607. uint32_t intr_mode;
  1608. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
  1609. LPFC_SLI_INTF_IF_TYPE_2) {
  1610. /*
  1611. * On error status condition, driver need to wait for port
  1612. * ready before performing reset.
  1613. */
  1614. rc = lpfc_sli4_pdev_status_reg_wait(phba);
  1615. if (rc)
  1616. return rc;
  1617. }
  1618. /* need reset: attempt for port recovery */
  1619. if (en_rn_msg)
  1620. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1621. "2887 Reset Needed: Attempting Port "
  1622. "Recovery...\n");
  1623. lpfc_offline_prep(phba, mbx_action);
  1624. lpfc_offline(phba);
  1625. /* release interrupt for possible resource change */
  1626. lpfc_sli4_disable_intr(phba);
  1627. rc = lpfc_sli_brdrestart(phba);
  1628. if (rc) {
  1629. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1630. "6309 Failed to restart board\n");
  1631. return rc;
  1632. }
  1633. /* request and enable interrupt */
  1634. intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
  1635. if (intr_mode == LPFC_INTR_ERROR) {
  1636. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1637. "3175 Failed to enable interrupt\n");
  1638. return -EIO;
  1639. }
  1640. phba->intr_mode = intr_mode;
  1641. rc = lpfc_online(phba);
  1642. if (rc == 0)
  1643. lpfc_unblock_mgmt_io(phba);
  1644. return rc;
  1645. }
  1646. /**
  1647. * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
  1648. * @phba: pointer to lpfc hba data structure.
  1649. *
  1650. * This routine is invoked to handle the SLI4 HBA hardware error attention
  1651. * conditions.
  1652. **/
  1653. static void
  1654. lpfc_handle_eratt_s4(struct lpfc_hba *phba)
  1655. {
  1656. struct lpfc_vport *vport = phba->pport;
  1657. uint32_t event_data;
  1658. struct Scsi_Host *shost;
  1659. uint32_t if_type;
  1660. struct lpfc_register portstat_reg = {0};
  1661. uint32_t reg_err1, reg_err2;
  1662. uint32_t uerrlo_reg, uemasklo_reg;
  1663. uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
  1664. bool en_rn_msg = true;
  1665. struct temp_event temp_event_data;
  1666. struct lpfc_register portsmphr_reg;
  1667. int rc, i;
  1668. /* If the pci channel is offline, ignore possible errors, since
  1669. * we cannot communicate with the pci card anyway.
  1670. */
  1671. if (pci_channel_offline(phba->pcidev))
  1672. return;
  1673. memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
  1674. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  1675. switch (if_type) {
  1676. case LPFC_SLI_INTF_IF_TYPE_0:
  1677. pci_rd_rc1 = lpfc_readl(
  1678. phba->sli4_hba.u.if_type0.UERRLOregaddr,
  1679. &uerrlo_reg);
  1680. pci_rd_rc2 = lpfc_readl(
  1681. phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
  1682. &uemasklo_reg);
  1683. /* consider PCI bus read error as pci_channel_offline */
  1684. if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
  1685. return;
  1686. if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
  1687. lpfc_sli4_offline_eratt(phba);
  1688. return;
  1689. }
  1690. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1691. "7623 Checking UE recoverable");
  1692. for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
  1693. if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
  1694. &portsmphr_reg.word0))
  1695. continue;
  1696. smphr_port_status = bf_get(lpfc_port_smphr_port_status,
  1697. &portsmphr_reg);
  1698. if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
  1699. LPFC_PORT_SEM_UE_RECOVERABLE)
  1700. break;
  1701. /*Sleep for 1Sec, before checking SEMAPHORE */
  1702. msleep(1000);
  1703. }
  1704. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1705. "4827 smphr_port_status x%x : Waited %dSec",
  1706. smphr_port_status, i);
  1707. /* Recoverable UE, reset the HBA device */
  1708. if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
  1709. LPFC_PORT_SEM_UE_RECOVERABLE) {
  1710. for (i = 0; i < 20; i++) {
  1711. msleep(1000);
  1712. if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
  1713. &portsmphr_reg.word0) &&
  1714. (LPFC_POST_STAGE_PORT_READY ==
  1715. bf_get(lpfc_port_smphr_port_status,
  1716. &portsmphr_reg))) {
  1717. rc = lpfc_sli4_port_sta_fn_reset(phba,
  1718. LPFC_MBX_NO_WAIT, en_rn_msg);
  1719. if (rc == 0)
  1720. return;
  1721. lpfc_printf_log(phba,
  1722. KERN_ERR, LOG_INIT,
  1723. "4215 Failed to recover UE");
  1724. break;
  1725. }
  1726. }
  1727. }
  1728. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1729. "7624 Firmware not ready: Failing UE recovery,"
  1730. " waited %dSec", i);
  1731. lpfc_sli4_offline_eratt(phba);
  1732. break;
  1733. case LPFC_SLI_INTF_IF_TYPE_2:
  1734. case LPFC_SLI_INTF_IF_TYPE_6:
  1735. pci_rd_rc1 = lpfc_readl(
  1736. phba->sli4_hba.u.if_type2.STATUSregaddr,
  1737. &portstat_reg.word0);
  1738. /* consider PCI bus read error as pci_channel_offline */
  1739. if (pci_rd_rc1 == -EIO) {
  1740. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1741. "3151 PCI bus read access failure: x%x\n",
  1742. readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
  1743. return;
  1744. }
  1745. reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
  1746. reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
  1747. if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
  1748. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1749. "2889 Port Overtemperature event, "
  1750. "taking port offline Data: x%x x%x\n",
  1751. reg_err1, reg_err2);
  1752. phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
  1753. temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  1754. temp_event_data.event_code = LPFC_CRIT_TEMP;
  1755. temp_event_data.data = 0xFFFFFFFF;
  1756. shost = lpfc_shost_from_vport(phba->pport);
  1757. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1758. sizeof(temp_event_data),
  1759. (char *)&temp_event_data,
  1760. SCSI_NL_VID_TYPE_PCI
  1761. | PCI_VENDOR_ID_EMULEX);
  1762. spin_lock_irq(&phba->hbalock);
  1763. phba->over_temp_state = HBA_OVER_TEMP;
  1764. spin_unlock_irq(&phba->hbalock);
  1765. lpfc_sli4_offline_eratt(phba);
  1766. return;
  1767. }
  1768. if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
  1769. reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
  1770. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1771. "3143 Port Down: Firmware Update "
  1772. "Detected\n");
  1773. en_rn_msg = false;
  1774. } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
  1775. reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
  1776. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1777. "3144 Port Down: Debug Dump\n");
  1778. else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
  1779. reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
  1780. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1781. "3145 Port Down: Provisioning\n");
  1782. /* If resets are disabled then leave the HBA alone and return */
  1783. if (!phba->cfg_enable_hba_reset)
  1784. return;
  1785. /* Check port status register for function reset */
  1786. rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
  1787. en_rn_msg);
  1788. if (rc == 0) {
  1789. /* don't report event on forced debug dump */
  1790. if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
  1791. reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
  1792. return;
  1793. else
  1794. break;
  1795. }
  1796. /* fall through for not able to recover */
  1797. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1798. "3152 Unrecoverable error, bring the port "
  1799. "offline\n");
  1800. lpfc_sli4_offline_eratt(phba);
  1801. break;
  1802. case LPFC_SLI_INTF_IF_TYPE_1:
  1803. default:
  1804. break;
  1805. }
  1806. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  1807. "3123 Report dump event to upper layer\n");
  1808. /* Send an internal error event to mgmt application */
  1809. lpfc_board_errevt_to_mgmt(phba);
  1810. event_data = FC_REG_DUMP_EVENT;
  1811. shost = lpfc_shost_from_vport(vport);
  1812. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1813. sizeof(event_data), (char *) &event_data,
  1814. SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
  1815. }
  1816. /**
  1817. * lpfc_handle_eratt - Wrapper func for handling hba error attention
  1818. * @phba: pointer to lpfc HBA data structure.
  1819. *
  1820. * This routine wraps the actual SLI3 or SLI4 hba error attention handling
  1821. * routine from the API jump table function pointer from the lpfc_hba struct.
  1822. *
  1823. * Return codes
  1824. * 0 - success.
  1825. * Any other value - error.
  1826. **/
  1827. void
  1828. lpfc_handle_eratt(struct lpfc_hba *phba)
  1829. {
  1830. (*phba->lpfc_handle_eratt)(phba);
  1831. }
  1832. /**
  1833. * lpfc_handle_latt - The HBA link event handler
  1834. * @phba: pointer to lpfc hba data structure.
  1835. *
  1836. * This routine is invoked from the worker thread to handle a HBA host
  1837. * attention link event. SLI3 only.
  1838. **/
  1839. void
  1840. lpfc_handle_latt(struct lpfc_hba *phba)
  1841. {
  1842. struct lpfc_vport *vport = phba->pport;
  1843. struct lpfc_sli *psli = &phba->sli;
  1844. LPFC_MBOXQ_t *pmb;
  1845. volatile uint32_t control;
  1846. struct lpfc_dmabuf *mp;
  1847. int rc = 0;
  1848. pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1849. if (!pmb) {
  1850. rc = 1;
  1851. goto lpfc_handle_latt_err_exit;
  1852. }
  1853. mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1854. if (!mp) {
  1855. rc = 2;
  1856. goto lpfc_handle_latt_free_pmb;
  1857. }
  1858. mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
  1859. if (!mp->virt) {
  1860. rc = 3;
  1861. goto lpfc_handle_latt_free_mp;
  1862. }
  1863. /* Cleanup any outstanding ELS commands */
  1864. lpfc_els_flush_all_cmd(phba);
  1865. psli->slistat.link_event++;
  1866. lpfc_read_topology(phba, pmb, mp);
  1867. pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
  1868. pmb->vport = vport;
  1869. /* Block ELS IOCBs until we have processed this mbox command */
  1870. phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
  1871. rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
  1872. if (rc == MBX_NOT_FINISHED) {
  1873. rc = 4;
  1874. goto lpfc_handle_latt_free_mbuf;
  1875. }
  1876. /* Clear Link Attention in HA REG */
  1877. spin_lock_irq(&phba->hbalock);
  1878. writel(HA_LATT, phba->HAregaddr);
  1879. readl(phba->HAregaddr); /* flush */
  1880. spin_unlock_irq(&phba->hbalock);
  1881. return;
  1882. lpfc_handle_latt_free_mbuf:
  1883. phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
  1884. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  1885. lpfc_handle_latt_free_mp:
  1886. kfree(mp);
  1887. lpfc_handle_latt_free_pmb:
  1888. mempool_free(pmb, phba->mbox_mem_pool);
  1889. lpfc_handle_latt_err_exit:
  1890. /* Enable Link attention interrupts */
  1891. spin_lock_irq(&phba->hbalock);
  1892. psli->sli_flag |= LPFC_PROCESS_LA;
  1893. control = readl(phba->HCregaddr);
  1894. control |= HC_LAINT_ENA;
  1895. writel(control, phba->HCregaddr);
  1896. readl(phba->HCregaddr); /* flush */
  1897. /* Clear Link Attention in HA REG */
  1898. writel(HA_LATT, phba->HAregaddr);
  1899. readl(phba->HAregaddr); /* flush */
  1900. spin_unlock_irq(&phba->hbalock);
  1901. lpfc_linkdown(phba);
  1902. phba->link_state = LPFC_HBA_ERROR;
  1903. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
  1904. "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
  1905. return;
  1906. }
  1907. /**
  1908. * lpfc_parse_vpd - Parse VPD (Vital Product Data)
  1909. * @phba: pointer to lpfc hba data structure.
  1910. * @vpd: pointer to the vital product data.
  1911. * @len: length of the vital product data in bytes.
  1912. *
  1913. * This routine parses the Vital Product Data (VPD). The VPD is treated as
  1914. * an array of characters. In this routine, the ModelName, ProgramType, and
  1915. * ModelDesc, etc. fields of the phba data structure will be populated.
  1916. *
  1917. * Return codes
  1918. * 0 - pointer to the VPD passed in is NULL
  1919. * 1 - success
  1920. **/
  1921. int
  1922. lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
  1923. {
  1924. uint8_t lenlo, lenhi;
  1925. int Length;
  1926. int i, j;
  1927. int finished = 0;
  1928. int index = 0;
  1929. if (!vpd)
  1930. return 0;
  1931. /* Vital Product */
  1932. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  1933. "0455 Vital Product Data: x%x x%x x%x x%x\n",
  1934. (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
  1935. (uint32_t) vpd[3]);
  1936. while (!finished && (index < (len - 4))) {
  1937. switch (vpd[index]) {
  1938. case 0x82:
  1939. case 0x91:
  1940. index += 1;
  1941. lenlo = vpd[index];
  1942. index += 1;
  1943. lenhi = vpd[index];
  1944. index += 1;
  1945. i = ((((unsigned short)lenhi) << 8) + lenlo);
  1946. index += i;
  1947. break;
  1948. case 0x90:
  1949. index += 1;
  1950. lenlo = vpd[index];
  1951. index += 1;
  1952. lenhi = vpd[index];
  1953. index += 1;
  1954. Length = ((((unsigned short)lenhi) << 8) + lenlo);
  1955. if (Length > len - index)
  1956. Length = len - index;
  1957. while (Length > 0) {
  1958. /* Look for Serial Number */
  1959. if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
  1960. index += 2;
  1961. i = vpd[index];
  1962. index += 1;
  1963. j = 0;
  1964. Length -= (3+i);
  1965. while(i--) {
  1966. phba->SerialNumber[j++] = vpd[index++];
  1967. if (j == 31)
  1968. break;
  1969. }
  1970. phba->SerialNumber[j] = 0;
  1971. continue;
  1972. }
  1973. else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
  1974. phba->vpd_flag |= VPD_MODEL_DESC;
  1975. index += 2;
  1976. i = vpd[index];
  1977. index += 1;
  1978. j = 0;
  1979. Length -= (3+i);
  1980. while(i--) {
  1981. phba->ModelDesc[j++] = vpd[index++];
  1982. if (j == 255)
  1983. break;
  1984. }
  1985. phba->ModelDesc[j] = 0;
  1986. continue;
  1987. }
  1988. else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
  1989. phba->vpd_flag |= VPD_MODEL_NAME;
  1990. index += 2;
  1991. i = vpd[index];
  1992. index += 1;
  1993. j = 0;
  1994. Length -= (3+i);
  1995. while(i--) {
  1996. phba->ModelName[j++] = vpd[index++];
  1997. if (j == 79)
  1998. break;
  1999. }
  2000. phba->ModelName[j] = 0;
  2001. continue;
  2002. }
  2003. else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
  2004. phba->vpd_flag |= VPD_PROGRAM_TYPE;
  2005. index += 2;
  2006. i = vpd[index];
  2007. index += 1;
  2008. j = 0;
  2009. Length -= (3+i);
  2010. while(i--) {
  2011. phba->ProgramType[j++] = vpd[index++];
  2012. if (j == 255)
  2013. break;
  2014. }
  2015. phba->ProgramType[j] = 0;
  2016. continue;
  2017. }
  2018. else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
  2019. phba->vpd_flag |= VPD_PORT;
  2020. index += 2;
  2021. i = vpd[index];
  2022. index += 1;
  2023. j = 0;
  2024. Length -= (3+i);
  2025. while(i--) {
  2026. if ((phba->sli_rev == LPFC_SLI_REV4) &&
  2027. (phba->sli4_hba.pport_name_sta ==
  2028. LPFC_SLI4_PPNAME_GET)) {
  2029. j++;
  2030. index++;
  2031. } else
  2032. phba->Port[j++] = vpd[index++];
  2033. if (j == 19)
  2034. break;
  2035. }
  2036. if ((phba->sli_rev != LPFC_SLI_REV4) ||
  2037. (phba->sli4_hba.pport_name_sta ==
  2038. LPFC_SLI4_PPNAME_NON))
  2039. phba->Port[j] = 0;
  2040. continue;
  2041. }
  2042. else {
  2043. index += 2;
  2044. i = vpd[index];
  2045. index += 1;
  2046. index += i;
  2047. Length -= (3 + i);
  2048. }
  2049. }
  2050. finished = 0;
  2051. break;
  2052. case 0x78:
  2053. finished = 1;
  2054. break;
  2055. default:
  2056. index ++;
  2057. break;
  2058. }
  2059. }
  2060. return(1);
  2061. }
  2062. /**
  2063. * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
  2064. * @phba: pointer to lpfc hba data structure.
  2065. * @mdp: pointer to the data structure to hold the derived model name.
  2066. * @descp: pointer to the data structure to hold the derived description.
  2067. *
  2068. * This routine retrieves HBA's description based on its registered PCI device
  2069. * ID. The @descp passed into this function points to an array of 256 chars. It
  2070. * shall be returned with the model name, maximum speed, and the host bus type.
  2071. * The @mdp passed into this function points to an array of 80 chars. When the
  2072. * function returns, the @mdp will be filled with the model name.
  2073. **/
  2074. static void
  2075. lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
  2076. {
  2077. lpfc_vpd_t *vp;
  2078. uint16_t dev_id = phba->pcidev->device;
  2079. int max_speed;
  2080. int GE = 0;
  2081. int oneConnect = 0; /* default is not a oneConnect */
  2082. struct {
  2083. char *name;
  2084. char *bus;
  2085. char *function;
  2086. } m = {"<Unknown>", "", ""};
  2087. if (mdp && mdp[0] != '\0'
  2088. && descp && descp[0] != '\0')
  2089. return;
  2090. if (phba->lmt & LMT_64Gb)
  2091. max_speed = 64;
  2092. else if (phba->lmt & LMT_32Gb)
  2093. max_speed = 32;
  2094. else if (phba->lmt & LMT_16Gb)
  2095. max_speed = 16;
  2096. else if (phba->lmt & LMT_10Gb)
  2097. max_speed = 10;
  2098. else if (phba->lmt & LMT_8Gb)
  2099. max_speed = 8;
  2100. else if (phba->lmt & LMT_4Gb)
  2101. max_speed = 4;
  2102. else if (phba->lmt & LMT_2Gb)
  2103. max_speed = 2;
  2104. else if (phba->lmt & LMT_1Gb)
  2105. max_speed = 1;
  2106. else
  2107. max_speed = 0;
  2108. vp = &phba->vpd;
  2109. switch (dev_id) {
  2110. case PCI_DEVICE_ID_FIREFLY:
  2111. m = (typeof(m)){"LP6000", "PCI",
  2112. "Obsolete, Unsupported Fibre Channel Adapter"};
  2113. break;
  2114. case PCI_DEVICE_ID_SUPERFLY:
  2115. if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
  2116. m = (typeof(m)){"LP7000", "PCI", ""};
  2117. else
  2118. m = (typeof(m)){"LP7000E", "PCI", ""};
  2119. m.function = "Obsolete, Unsupported Fibre Channel Adapter";
  2120. break;
  2121. case PCI_DEVICE_ID_DRAGONFLY:
  2122. m = (typeof(m)){"LP8000", "PCI",
  2123. "Obsolete, Unsupported Fibre Channel Adapter"};
  2124. break;
  2125. case PCI_DEVICE_ID_CENTAUR:
  2126. if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
  2127. m = (typeof(m)){"LP9002", "PCI", ""};
  2128. else
  2129. m = (typeof(m)){"LP9000", "PCI", ""};
  2130. m.function = "Obsolete, Unsupported Fibre Channel Adapter";
  2131. break;
  2132. case PCI_DEVICE_ID_RFLY:
  2133. m = (typeof(m)){"LP952", "PCI",
  2134. "Obsolete, Unsupported Fibre Channel Adapter"};
  2135. break;
  2136. case PCI_DEVICE_ID_PEGASUS:
  2137. m = (typeof(m)){"LP9802", "PCI-X",
  2138. "Obsolete, Unsupported Fibre Channel Adapter"};
  2139. break;
  2140. case PCI_DEVICE_ID_THOR:
  2141. m = (typeof(m)){"LP10000", "PCI-X",
  2142. "Obsolete, Unsupported Fibre Channel Adapter"};
  2143. break;
  2144. case PCI_DEVICE_ID_VIPER:
  2145. m = (typeof(m)){"LPX1000", "PCI-X",
  2146. "Obsolete, Unsupported Fibre Channel Adapter"};
  2147. break;
  2148. case PCI_DEVICE_ID_PFLY:
  2149. m = (typeof(m)){"LP982", "PCI-X",
  2150. "Obsolete, Unsupported Fibre Channel Adapter"};
  2151. break;
  2152. case PCI_DEVICE_ID_TFLY:
  2153. m = (typeof(m)){"LP1050", "PCI-X",
  2154. "Obsolete, Unsupported Fibre Channel Adapter"};
  2155. break;
  2156. case PCI_DEVICE_ID_HELIOS:
  2157. m = (typeof(m)){"LP11000", "PCI-X2",
  2158. "Obsolete, Unsupported Fibre Channel Adapter"};
  2159. break;
  2160. case PCI_DEVICE_ID_HELIOS_SCSP:
  2161. m = (typeof(m)){"LP11000-SP", "PCI-X2",
  2162. "Obsolete, Unsupported Fibre Channel Adapter"};
  2163. break;
  2164. case PCI_DEVICE_ID_HELIOS_DCSP:
  2165. m = (typeof(m)){"LP11002-SP", "PCI-X2",
  2166. "Obsolete, Unsupported Fibre Channel Adapter"};
  2167. break;
  2168. case PCI_DEVICE_ID_NEPTUNE:
  2169. m = (typeof(m)){"LPe1000", "PCIe",
  2170. "Obsolete, Unsupported Fibre Channel Adapter"};
  2171. break;
  2172. case PCI_DEVICE_ID_NEPTUNE_SCSP:
  2173. m = (typeof(m)){"LPe1000-SP", "PCIe",
  2174. "Obsolete, Unsupported Fibre Channel Adapter"};
  2175. break;
  2176. case PCI_DEVICE_ID_NEPTUNE_DCSP:
  2177. m = (typeof(m)){"LPe1002-SP", "PCIe",
  2178. "Obsolete, Unsupported Fibre Channel Adapter"};
  2179. break;
  2180. case PCI_DEVICE_ID_BMID:
  2181. m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
  2182. break;
  2183. case PCI_DEVICE_ID_BSMB:
  2184. m = (typeof(m)){"LP111", "PCI-X2",
  2185. "Obsolete, Unsupported Fibre Channel Adapter"};
  2186. break;
  2187. case PCI_DEVICE_ID_ZEPHYR:
  2188. m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
  2189. break;
  2190. case PCI_DEVICE_ID_ZEPHYR_SCSP:
  2191. m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
  2192. break;
  2193. case PCI_DEVICE_ID_ZEPHYR_DCSP:
  2194. m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
  2195. GE = 1;
  2196. break;
  2197. case PCI_DEVICE_ID_ZMID:
  2198. m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
  2199. break;
  2200. case PCI_DEVICE_ID_ZSMB:
  2201. m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
  2202. break;
  2203. case PCI_DEVICE_ID_LP101:
  2204. m = (typeof(m)){"LP101", "PCI-X",
  2205. "Obsolete, Unsupported Fibre Channel Adapter"};
  2206. break;
  2207. case PCI_DEVICE_ID_LP10000S:
  2208. m = (typeof(m)){"LP10000-S", "PCI",
  2209. "Obsolete, Unsupported Fibre Channel Adapter"};
  2210. break;
  2211. case PCI_DEVICE_ID_LP11000S:
  2212. m = (typeof(m)){"LP11000-S", "PCI-X2",
  2213. "Obsolete, Unsupported Fibre Channel Adapter"};
  2214. break;
  2215. case PCI_DEVICE_ID_LPE11000S:
  2216. m = (typeof(m)){"LPe11000-S", "PCIe",
  2217. "Obsolete, Unsupported Fibre Channel Adapter"};
  2218. break;
  2219. case PCI_DEVICE_ID_SAT:
  2220. m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
  2221. break;
  2222. case PCI_DEVICE_ID_SAT_MID:
  2223. m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
  2224. break;
  2225. case PCI_DEVICE_ID_SAT_SMB:
  2226. m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
  2227. break;
  2228. case PCI_DEVICE_ID_SAT_DCSP:
  2229. m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
  2230. break;
  2231. case PCI_DEVICE_ID_SAT_SCSP:
  2232. m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
  2233. break;
  2234. case PCI_DEVICE_ID_SAT_S:
  2235. m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
  2236. break;
  2237. case PCI_DEVICE_ID_HORNET:
  2238. m = (typeof(m)){"LP21000", "PCIe",
  2239. "Obsolete, Unsupported FCoE Adapter"};
  2240. GE = 1;
  2241. break;
  2242. case PCI_DEVICE_ID_PROTEUS_VF:
  2243. m = (typeof(m)){"LPev12000", "PCIe IOV",
  2244. "Obsolete, Unsupported Fibre Channel Adapter"};
  2245. break;
  2246. case PCI_DEVICE_ID_PROTEUS_PF:
  2247. m = (typeof(m)){"LPev12000", "PCIe IOV",
  2248. "Obsolete, Unsupported Fibre Channel Adapter"};
  2249. break;
  2250. case PCI_DEVICE_ID_PROTEUS_S:
  2251. m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
  2252. "Obsolete, Unsupported Fibre Channel Adapter"};
  2253. break;
  2254. case PCI_DEVICE_ID_TIGERSHARK:
  2255. oneConnect = 1;
  2256. m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
  2257. break;
  2258. case PCI_DEVICE_ID_TOMCAT:
  2259. oneConnect = 1;
  2260. m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
  2261. break;
  2262. case PCI_DEVICE_ID_FALCON:
  2263. m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
  2264. "EmulexSecure Fibre"};
  2265. break;
  2266. case PCI_DEVICE_ID_BALIUS:
  2267. m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
  2268. "Obsolete, Unsupported Fibre Channel Adapter"};
  2269. break;
  2270. case PCI_DEVICE_ID_LANCER_FC:
  2271. m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
  2272. break;
  2273. case PCI_DEVICE_ID_LANCER_FC_VF:
  2274. m = (typeof(m)){"LPe16000", "PCIe",
  2275. "Obsolete, Unsupported Fibre Channel Adapter"};
  2276. break;
  2277. case PCI_DEVICE_ID_LANCER_FCOE:
  2278. oneConnect = 1;
  2279. m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
  2280. break;
  2281. case PCI_DEVICE_ID_LANCER_FCOE_VF:
  2282. oneConnect = 1;
  2283. m = (typeof(m)){"OCe15100", "PCIe",
  2284. "Obsolete, Unsupported FCoE"};
  2285. break;
  2286. case PCI_DEVICE_ID_LANCER_G6_FC:
  2287. m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
  2288. break;
  2289. case PCI_DEVICE_ID_LANCER_G7_FC:
  2290. m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
  2291. break;
  2292. case PCI_DEVICE_ID_SKYHAWK:
  2293. case PCI_DEVICE_ID_SKYHAWK_VF:
  2294. oneConnect = 1;
  2295. m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
  2296. break;
  2297. default:
  2298. m = (typeof(m)){"Unknown", "", ""};
  2299. break;
  2300. }
  2301. if (mdp && mdp[0] == '\0')
  2302. snprintf(mdp, 79,"%s", m.name);
  2303. /*
  2304. * oneConnect hba requires special processing, they are all initiators
  2305. * and we put the port number on the end
  2306. */
  2307. if (descp && descp[0] == '\0') {
  2308. if (oneConnect)
  2309. snprintf(descp, 255,
  2310. "Emulex OneConnect %s, %s Initiator %s",
  2311. m.name, m.function,
  2312. phba->Port);
  2313. else if (max_speed == 0)
  2314. snprintf(descp, 255,
  2315. "Emulex %s %s %s",
  2316. m.name, m.bus, m.function);
  2317. else
  2318. snprintf(descp, 255,
  2319. "Emulex %s %d%s %s %s",
  2320. m.name, max_speed, (GE) ? "GE" : "Gb",
  2321. m.bus, m.function);
  2322. }
  2323. }
  2324. /**
  2325. * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
  2326. * @phba: pointer to lpfc hba data structure.
  2327. * @pring: pointer to a IOCB ring.
  2328. * @cnt: the number of IOCBs to be posted to the IOCB ring.
  2329. *
  2330. * This routine posts a given number of IOCBs with the associated DMA buffer
  2331. * descriptors specified by the cnt argument to the given IOCB ring.
  2332. *
  2333. * Return codes
  2334. * The number of IOCBs NOT able to be posted to the IOCB ring.
  2335. **/
  2336. int
  2337. lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
  2338. {
  2339. IOCB_t *icmd;
  2340. struct lpfc_iocbq *iocb;
  2341. struct lpfc_dmabuf *mp1, *mp2;
  2342. cnt += pring->missbufcnt;
  2343. /* While there are buffers to post */
  2344. while (cnt > 0) {
  2345. /* Allocate buffer for command iocb */
  2346. iocb = lpfc_sli_get_iocbq(phba);
  2347. if (iocb == NULL) {
  2348. pring->missbufcnt = cnt;
  2349. return cnt;
  2350. }
  2351. icmd = &iocb->iocb;
  2352. /* 2 buffers can be posted per command */
  2353. /* Allocate buffer to post */
  2354. mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
  2355. if (mp1)
  2356. mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
  2357. if (!mp1 || !mp1->virt) {
  2358. kfree(mp1);
  2359. lpfc_sli_release_iocbq(phba, iocb);
  2360. pring->missbufcnt = cnt;
  2361. return cnt;
  2362. }
  2363. INIT_LIST_HEAD(&mp1->list);
  2364. /* Allocate buffer to post */
  2365. if (cnt > 1) {
  2366. mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
  2367. if (mp2)
  2368. mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
  2369. &mp2->phys);
  2370. if (!mp2 || !mp2->virt) {
  2371. kfree(mp2);
  2372. lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
  2373. kfree(mp1);
  2374. lpfc_sli_release_iocbq(phba, iocb);
  2375. pring->missbufcnt = cnt;
  2376. return cnt;
  2377. }
  2378. INIT_LIST_HEAD(&mp2->list);
  2379. } else {
  2380. mp2 = NULL;
  2381. }
  2382. icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
  2383. icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
  2384. icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
  2385. icmd->ulpBdeCount = 1;
  2386. cnt--;
  2387. if (mp2) {
  2388. icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
  2389. icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
  2390. icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
  2391. cnt--;
  2392. icmd->ulpBdeCount = 2;
  2393. }
  2394. icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
  2395. icmd->ulpLe = 1;
  2396. if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
  2397. IOCB_ERROR) {
  2398. lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
  2399. kfree(mp1);
  2400. cnt++;
  2401. if (mp2) {
  2402. lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
  2403. kfree(mp2);
  2404. cnt++;
  2405. }
  2406. lpfc_sli_release_iocbq(phba, iocb);
  2407. pring->missbufcnt = cnt;
  2408. return cnt;
  2409. }
  2410. lpfc_sli_ringpostbuf_put(phba, pring, mp1);
  2411. if (mp2)
  2412. lpfc_sli_ringpostbuf_put(phba, pring, mp2);
  2413. }
  2414. pring->missbufcnt = 0;
  2415. return 0;
  2416. }
  2417. /**
  2418. * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
  2419. * @phba: pointer to lpfc hba data structure.
  2420. *
  2421. * This routine posts initial receive IOCB buffers to the ELS ring. The
  2422. * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
  2423. * set to 64 IOCBs. SLI3 only.
  2424. *
  2425. * Return codes
  2426. * 0 - success (currently always success)
  2427. **/
  2428. static int
  2429. lpfc_post_rcv_buf(struct lpfc_hba *phba)
  2430. {
  2431. struct lpfc_sli *psli = &phba->sli;
  2432. /* Ring 0, ELS / CT buffers */
  2433. lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
  2434. /* Ring 2 - FCP no buffers needed */
  2435. return 0;
  2436. }
  2437. #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
  2438. /**
  2439. * lpfc_sha_init - Set up initial array of hash table entries
  2440. * @HashResultPointer: pointer to an array as hash table.
  2441. *
  2442. * This routine sets up the initial values to the array of hash table entries
  2443. * for the LC HBAs.
  2444. **/
  2445. static void
  2446. lpfc_sha_init(uint32_t * HashResultPointer)
  2447. {
  2448. HashResultPointer[0] = 0x67452301;
  2449. HashResultPointer[1] = 0xEFCDAB89;
  2450. HashResultPointer[2] = 0x98BADCFE;
  2451. HashResultPointer[3] = 0x10325476;
  2452. HashResultPointer[4] = 0xC3D2E1F0;
  2453. }
  2454. /**
  2455. * lpfc_sha_iterate - Iterate initial hash table with the working hash table
  2456. * @HashResultPointer: pointer to an initial/result hash table.
  2457. * @HashWorkingPointer: pointer to an working hash table.
  2458. *
  2459. * This routine iterates an initial hash table pointed by @HashResultPointer
  2460. * with the values from the working hash table pointeed by @HashWorkingPointer.
  2461. * The results are putting back to the initial hash table, returned through
  2462. * the @HashResultPointer as the result hash table.
  2463. **/
  2464. static void
  2465. lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
  2466. {
  2467. int t;
  2468. uint32_t TEMP;
  2469. uint32_t A, B, C, D, E;
  2470. t = 16;
  2471. do {
  2472. HashWorkingPointer[t] =
  2473. S(1,
  2474. HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
  2475. 8] ^
  2476. HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
  2477. } while (++t <= 79);
  2478. t = 0;
  2479. A = HashResultPointer[0];
  2480. B = HashResultPointer[1];
  2481. C = HashResultPointer[2];
  2482. D = HashResultPointer[3];
  2483. E = HashResultPointer[4];
  2484. do {
  2485. if (t < 20) {
  2486. TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
  2487. } else if (t < 40) {
  2488. TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
  2489. } else if (t < 60) {
  2490. TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
  2491. } else {
  2492. TEMP = (B ^ C ^ D) + 0xCA62C1D6;
  2493. }
  2494. TEMP += S(5, A) + E + HashWorkingPointer[t];
  2495. E = D;
  2496. D = C;
  2497. C = S(30, B);
  2498. B = A;
  2499. A = TEMP;
  2500. } while (++t <= 79);
  2501. HashResultPointer[0] += A;
  2502. HashResultPointer[1] += B;
  2503. HashResultPointer[2] += C;
  2504. HashResultPointer[3] += D;
  2505. HashResultPointer[4] += E;
  2506. }
  2507. /**
  2508. * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
  2509. * @RandomChallenge: pointer to the entry of host challenge random number array.
  2510. * @HashWorking: pointer to the entry of the working hash array.
  2511. *
  2512. * This routine calculates the working hash array referred by @HashWorking
  2513. * from the challenge random numbers associated with the host, referred by
  2514. * @RandomChallenge. The result is put into the entry of the working hash
  2515. * array and returned by reference through @HashWorking.
  2516. **/
  2517. static void
  2518. lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
  2519. {
  2520. *HashWorking = (*RandomChallenge ^ *HashWorking);
  2521. }
  2522. /**
  2523. * lpfc_hba_init - Perform special handling for LC HBA initialization
  2524. * @phba: pointer to lpfc hba data structure.
  2525. * @hbainit: pointer to an array of unsigned 32-bit integers.
  2526. *
  2527. * This routine performs the special handling for LC HBA initialization.
  2528. **/
  2529. void
  2530. lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
  2531. {
  2532. int t;
  2533. uint32_t *HashWorking;
  2534. uint32_t *pwwnn = (uint32_t *) phba->wwnn;
  2535. HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
  2536. if (!HashWorking)
  2537. return;
  2538. HashWorking[0] = HashWorking[78] = *pwwnn++;
  2539. HashWorking[1] = HashWorking[79] = *pwwnn;
  2540. for (t = 0; t < 7; t++)
  2541. lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
  2542. lpfc_sha_init(hbainit);
  2543. lpfc_sha_iterate(hbainit, HashWorking);
  2544. kfree(HashWorking);
  2545. }
  2546. /**
  2547. * lpfc_cleanup - Performs vport cleanups before deleting a vport
  2548. * @vport: pointer to a virtual N_Port data structure.
  2549. *
  2550. * This routine performs the necessary cleanups before deleting the @vport.
  2551. * It invokes the discovery state machine to perform necessary state
  2552. * transitions and to release the ndlps associated with the @vport. Note,
  2553. * the physical port is treated as @vport 0.
  2554. **/
  2555. void
  2556. lpfc_cleanup(struct lpfc_vport *vport)
  2557. {
  2558. struct lpfc_hba *phba = vport->phba;
  2559. struct lpfc_nodelist *ndlp, *next_ndlp;
  2560. int i = 0;
  2561. if (phba->link_state > LPFC_LINK_DOWN)
  2562. lpfc_port_link_failure(vport);
  2563. list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
  2564. if (!NLP_CHK_NODE_ACT(ndlp)) {
  2565. ndlp = lpfc_enable_node(vport, ndlp,
  2566. NLP_STE_UNUSED_NODE);
  2567. if (!ndlp)
  2568. continue;
  2569. spin_lock_irq(&phba->ndlp_lock);
  2570. NLP_SET_FREE_REQ(ndlp);
  2571. spin_unlock_irq(&phba->ndlp_lock);
  2572. /* Trigger the release of the ndlp memory */
  2573. lpfc_nlp_put(ndlp);
  2574. continue;
  2575. }
  2576. spin_lock_irq(&phba->ndlp_lock);
  2577. if (NLP_CHK_FREE_REQ(ndlp)) {
  2578. /* The ndlp should not be in memory free mode already */
  2579. spin_unlock_irq(&phba->ndlp_lock);
  2580. continue;
  2581. } else
  2582. /* Indicate request for freeing ndlp memory */
  2583. NLP_SET_FREE_REQ(ndlp);
  2584. spin_unlock_irq(&phba->ndlp_lock);
  2585. if (vport->port_type != LPFC_PHYSICAL_PORT &&
  2586. ndlp->nlp_DID == Fabric_DID) {
  2587. /* Just free up ndlp with Fabric_DID for vports */
  2588. lpfc_nlp_put(ndlp);
  2589. continue;
  2590. }
  2591. /* take care of nodes in unused state before the state
  2592. * machine taking action.
  2593. */
  2594. if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
  2595. lpfc_nlp_put(ndlp);
  2596. continue;
  2597. }
  2598. if (ndlp->nlp_type & NLP_FABRIC)
  2599. lpfc_disc_state_machine(vport, ndlp, NULL,
  2600. NLP_EVT_DEVICE_RECOVERY);
  2601. lpfc_disc_state_machine(vport, ndlp, NULL,
  2602. NLP_EVT_DEVICE_RM);
  2603. }
  2604. /* At this point, ALL ndlp's should be gone
  2605. * because of the previous NLP_EVT_DEVICE_RM.
  2606. * Lets wait for this to happen, if needed.
  2607. */
  2608. while (!list_empty(&vport->fc_nodes)) {
  2609. if (i++ > 3000) {
  2610. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
  2611. "0233 Nodelist not empty\n");
  2612. list_for_each_entry_safe(ndlp, next_ndlp,
  2613. &vport->fc_nodes, nlp_listp) {
  2614. lpfc_printf_vlog(ndlp->vport, KERN_ERR,
  2615. LOG_NODE,
  2616. "0282 did:x%x ndlp:x%p "
  2617. "usgmap:x%x refcnt:%d\n",
  2618. ndlp->nlp_DID, (void *)ndlp,
  2619. ndlp->nlp_usg_map,
  2620. kref_read(&ndlp->kref));
  2621. }
  2622. break;
  2623. }
  2624. /* Wait for any activity on ndlps to settle */
  2625. msleep(10);
  2626. }
  2627. lpfc_cleanup_vports_rrqs(vport, NULL);
  2628. }
  2629. /**
  2630. * lpfc_stop_vport_timers - Stop all the timers associated with a vport
  2631. * @vport: pointer to a virtual N_Port data structure.
  2632. *
  2633. * This routine stops all the timers associated with a @vport. This function
  2634. * is invoked before disabling or deleting a @vport. Note that the physical
  2635. * port is treated as @vport 0.
  2636. **/
  2637. void
  2638. lpfc_stop_vport_timers(struct lpfc_vport *vport)
  2639. {
  2640. del_timer_sync(&vport->els_tmofunc);
  2641. del_timer_sync(&vport->delayed_disc_tmo);
  2642. lpfc_can_disctmo(vport);
  2643. return;
  2644. }
  2645. /**
  2646. * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
  2647. * @phba: pointer to lpfc hba data structure.
  2648. *
  2649. * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
  2650. * caller of this routine should already hold the host lock.
  2651. **/
  2652. void
  2653. __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
  2654. {
  2655. /* Clear pending FCF rediscovery wait flag */
  2656. phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
  2657. /* Now, try to stop the timer */
  2658. del_timer(&phba->fcf.redisc_wait);
  2659. }
  2660. /**
  2661. * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
  2662. * @phba: pointer to lpfc hba data structure.
  2663. *
  2664. * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
  2665. * checks whether the FCF rediscovery wait timer is pending with the host
  2666. * lock held before proceeding with disabling the timer and clearing the
  2667. * wait timer pendig flag.
  2668. **/
  2669. void
  2670. lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
  2671. {
  2672. spin_lock_irq(&phba->hbalock);
  2673. if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
  2674. /* FCF rediscovery timer already fired or stopped */
  2675. spin_unlock_irq(&phba->hbalock);
  2676. return;
  2677. }
  2678. __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
  2679. /* Clear failover in progress flags */
  2680. phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
  2681. spin_unlock_irq(&phba->hbalock);
  2682. }
  2683. /**
  2684. * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
  2685. * @phba: pointer to lpfc hba data structure.
  2686. *
  2687. * This routine stops all the timers associated with a HBA. This function is
  2688. * invoked before either putting a HBA offline or unloading the driver.
  2689. **/
  2690. void
  2691. lpfc_stop_hba_timers(struct lpfc_hba *phba)
  2692. {
  2693. lpfc_stop_vport_timers(phba->pport);
  2694. del_timer_sync(&phba->sli.mbox_tmo);
  2695. del_timer_sync(&phba->fabric_block_timer);
  2696. del_timer_sync(&phba->eratt_poll);
  2697. del_timer_sync(&phba->hb_tmofunc);
  2698. if (phba->sli_rev == LPFC_SLI_REV4) {
  2699. del_timer_sync(&phba->rrq_tmr);
  2700. phba->hba_flag &= ~HBA_RRQ_ACTIVE;
  2701. }
  2702. phba->hb_outstanding = 0;
  2703. switch (phba->pci_dev_grp) {
  2704. case LPFC_PCI_DEV_LP:
  2705. /* Stop any LightPulse device specific driver timers */
  2706. del_timer_sync(&phba->fcp_poll_timer);
  2707. break;
  2708. case LPFC_PCI_DEV_OC:
  2709. /* Stop any OneConnect device sepcific driver timers */
  2710. lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
  2711. break;
  2712. default:
  2713. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  2714. "0297 Invalid device group (x%x)\n",
  2715. phba->pci_dev_grp);
  2716. break;
  2717. }
  2718. return;
  2719. }
  2720. /**
  2721. * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
  2722. * @phba: pointer to lpfc hba data structure.
  2723. *
  2724. * This routine marks a HBA's management interface as blocked. Once the HBA's
  2725. * management interface is marked as blocked, all the user space access to
  2726. * the HBA, whether they are from sysfs interface or libdfc interface will
  2727. * all be blocked. The HBA is set to block the management interface when the
  2728. * driver prepares the HBA interface for online or offline.
  2729. **/
  2730. static void
  2731. lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
  2732. {
  2733. unsigned long iflag;
  2734. uint8_t actcmd = MBX_HEARTBEAT;
  2735. unsigned long timeout;
  2736. spin_lock_irqsave(&phba->hbalock, iflag);
  2737. phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
  2738. spin_unlock_irqrestore(&phba->hbalock, iflag);
  2739. if (mbx_action == LPFC_MBX_NO_WAIT)
  2740. return;
  2741. timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
  2742. spin_lock_irqsave(&phba->hbalock, iflag);
  2743. if (phba->sli.mbox_active) {
  2744. actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
  2745. /* Determine how long we might wait for the active mailbox
  2746. * command to be gracefully completed by firmware.
  2747. */
  2748. timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
  2749. phba->sli.mbox_active) * 1000) + jiffies;
  2750. }
  2751. spin_unlock_irqrestore(&phba->hbalock, iflag);
  2752. /* Wait for the outstnading mailbox command to complete */
  2753. while (phba->sli.mbox_active) {
  2754. /* Check active mailbox complete status every 2ms */
  2755. msleep(2);
  2756. if (time_after(jiffies, timeout)) {
  2757. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  2758. "2813 Mgmt IO is Blocked %x "
  2759. "- mbox cmd %x still active\n",
  2760. phba->sli.sli_flag, actcmd);
  2761. break;
  2762. }
  2763. }
  2764. }
  2765. /**
  2766. * lpfc_sli4_node_prep - Assign RPIs for active nodes.
  2767. * @phba: pointer to lpfc hba data structure.
  2768. *
  2769. * Allocate RPIs for all active remote nodes. This is needed whenever
  2770. * an SLI4 adapter is reset and the driver is not unloading. Its purpose
  2771. * is to fixup the temporary rpi assignments.
  2772. **/
  2773. void
  2774. lpfc_sli4_node_prep(struct lpfc_hba *phba)
  2775. {
  2776. struct lpfc_nodelist *ndlp, *next_ndlp;
  2777. struct lpfc_vport **vports;
  2778. int i, rpi;
  2779. unsigned long flags;
  2780. if (phba->sli_rev != LPFC_SLI_REV4)
  2781. return;
  2782. vports = lpfc_create_vport_work_array(phba);
  2783. if (vports == NULL)
  2784. return;
  2785. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  2786. if (vports[i]->load_flag & FC_UNLOADING)
  2787. continue;
  2788. list_for_each_entry_safe(ndlp, next_ndlp,
  2789. &vports[i]->fc_nodes,
  2790. nlp_listp) {
  2791. if (!NLP_CHK_NODE_ACT(ndlp))
  2792. continue;
  2793. rpi = lpfc_sli4_alloc_rpi(phba);
  2794. if (rpi == LPFC_RPI_ALLOC_ERROR) {
  2795. spin_lock_irqsave(&phba->ndlp_lock, flags);
  2796. NLP_CLR_NODE_ACT(ndlp);
  2797. spin_unlock_irqrestore(&phba->ndlp_lock, flags);
  2798. continue;
  2799. }
  2800. ndlp->nlp_rpi = rpi;
  2801. lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
  2802. "0009 rpi:%x DID:%x "
  2803. "flg:%x map:%x %p\n", ndlp->nlp_rpi,
  2804. ndlp->nlp_DID, ndlp->nlp_flag,
  2805. ndlp->nlp_usg_map, ndlp);
  2806. }
  2807. }
  2808. lpfc_destroy_vport_work_array(phba, vports);
  2809. }
  2810. /**
  2811. * lpfc_online - Initialize and bring a HBA online
  2812. * @phba: pointer to lpfc hba data structure.
  2813. *
  2814. * This routine initializes the HBA and brings a HBA online. During this
  2815. * process, the management interface is blocked to prevent user space access
  2816. * to the HBA interfering with the driver initialization.
  2817. *
  2818. * Return codes
  2819. * 0 - successful
  2820. * 1 - failed
  2821. **/
  2822. int
  2823. lpfc_online(struct lpfc_hba *phba)
  2824. {
  2825. struct lpfc_vport *vport;
  2826. struct lpfc_vport **vports;
  2827. int i, error = 0;
  2828. bool vpis_cleared = false;
  2829. if (!phba)
  2830. return 0;
  2831. vport = phba->pport;
  2832. if (!(vport->fc_flag & FC_OFFLINE_MODE))
  2833. return 0;
  2834. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  2835. "0458 Bring Adapter online\n");
  2836. lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
  2837. if (phba->sli_rev == LPFC_SLI_REV4) {
  2838. if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
  2839. lpfc_unblock_mgmt_io(phba);
  2840. return 1;
  2841. }
  2842. spin_lock_irq(&phba->hbalock);
  2843. if (!phba->sli4_hba.max_cfg_param.vpi_used)
  2844. vpis_cleared = true;
  2845. spin_unlock_irq(&phba->hbalock);
  2846. /* Reestablish the local initiator port.
  2847. * The offline process destroyed the previous lport.
  2848. */
  2849. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
  2850. !phba->nvmet_support) {
  2851. error = lpfc_nvme_create_localport(phba->pport);
  2852. if (error)
  2853. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  2854. "6132 NVME restore reg failed "
  2855. "on nvmei error x%x\n", error);
  2856. }
  2857. } else {
  2858. lpfc_sli_queue_init(phba);
  2859. if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
  2860. lpfc_unblock_mgmt_io(phba);
  2861. return 1;
  2862. }
  2863. }
  2864. vports = lpfc_create_vport_work_array(phba);
  2865. if (vports != NULL) {
  2866. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  2867. struct Scsi_Host *shost;
  2868. shost = lpfc_shost_from_vport(vports[i]);
  2869. spin_lock_irq(shost->host_lock);
  2870. vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
  2871. if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
  2872. vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
  2873. if (phba->sli_rev == LPFC_SLI_REV4) {
  2874. vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
  2875. if ((vpis_cleared) &&
  2876. (vports[i]->port_type !=
  2877. LPFC_PHYSICAL_PORT))
  2878. vports[i]->vpi = 0;
  2879. }
  2880. spin_unlock_irq(shost->host_lock);
  2881. }
  2882. }
  2883. lpfc_destroy_vport_work_array(phba, vports);
  2884. lpfc_unblock_mgmt_io(phba);
  2885. return 0;
  2886. }
  2887. /**
  2888. * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
  2889. * @phba: pointer to lpfc hba data structure.
  2890. *
  2891. * This routine marks a HBA's management interface as not blocked. Once the
  2892. * HBA's management interface is marked as not blocked, all the user space
  2893. * access to the HBA, whether they are from sysfs interface or libdfc
  2894. * interface will be allowed. The HBA is set to block the management interface
  2895. * when the driver prepares the HBA interface for online or offline and then
  2896. * set to unblock the management interface afterwards.
  2897. **/
  2898. void
  2899. lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
  2900. {
  2901. unsigned long iflag;
  2902. spin_lock_irqsave(&phba->hbalock, iflag);
  2903. phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
  2904. spin_unlock_irqrestore(&phba->hbalock, iflag);
  2905. }
  2906. /**
  2907. * lpfc_offline_prep - Prepare a HBA to be brought offline
  2908. * @phba: pointer to lpfc hba data structure.
  2909. *
  2910. * This routine is invoked to prepare a HBA to be brought offline. It performs
  2911. * unregistration login to all the nodes on all vports and flushes the mailbox
  2912. * queue to make it ready to be brought offline.
  2913. **/
  2914. void
  2915. lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
  2916. {
  2917. struct lpfc_vport *vport = phba->pport;
  2918. struct lpfc_nodelist *ndlp, *next_ndlp;
  2919. struct lpfc_vport **vports;
  2920. struct Scsi_Host *shost;
  2921. int i;
  2922. if (vport->fc_flag & FC_OFFLINE_MODE)
  2923. return;
  2924. lpfc_block_mgmt_io(phba, mbx_action);
  2925. lpfc_linkdown(phba);
  2926. /* Issue an unreg_login to all nodes on all vports */
  2927. vports = lpfc_create_vport_work_array(phba);
  2928. if (vports != NULL) {
  2929. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  2930. if (vports[i]->load_flag & FC_UNLOADING)
  2931. continue;
  2932. shost = lpfc_shost_from_vport(vports[i]);
  2933. spin_lock_irq(shost->host_lock);
  2934. vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
  2935. vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
  2936. vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
  2937. spin_unlock_irq(shost->host_lock);
  2938. shost = lpfc_shost_from_vport(vports[i]);
  2939. list_for_each_entry_safe(ndlp, next_ndlp,
  2940. &vports[i]->fc_nodes,
  2941. nlp_listp) {
  2942. if (!NLP_CHK_NODE_ACT(ndlp))
  2943. continue;
  2944. if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
  2945. continue;
  2946. if (ndlp->nlp_type & NLP_FABRIC) {
  2947. lpfc_disc_state_machine(vports[i], ndlp,
  2948. NULL, NLP_EVT_DEVICE_RECOVERY);
  2949. lpfc_disc_state_machine(vports[i], ndlp,
  2950. NULL, NLP_EVT_DEVICE_RM);
  2951. }
  2952. spin_lock_irq(shost->host_lock);
  2953. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  2954. spin_unlock_irq(shost->host_lock);
  2955. /*
  2956. * Whenever an SLI4 port goes offline, free the
  2957. * RPI. Get a new RPI when the adapter port
  2958. * comes back online.
  2959. */
  2960. if (phba->sli_rev == LPFC_SLI_REV4) {
  2961. lpfc_printf_vlog(ndlp->vport,
  2962. KERN_INFO, LOG_NODE,
  2963. "0011 lpfc_offline: "
  2964. "ndlp:x%p did %x "
  2965. "usgmap:x%x rpi:%x\n",
  2966. ndlp, ndlp->nlp_DID,
  2967. ndlp->nlp_usg_map,
  2968. ndlp->nlp_rpi);
  2969. lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
  2970. }
  2971. lpfc_unreg_rpi(vports[i], ndlp);
  2972. }
  2973. }
  2974. }
  2975. lpfc_destroy_vport_work_array(phba, vports);
  2976. lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
  2977. if (phba->wq)
  2978. flush_workqueue(phba->wq);
  2979. }
  2980. /**
  2981. * lpfc_offline - Bring a HBA offline
  2982. * @phba: pointer to lpfc hba data structure.
  2983. *
  2984. * This routine actually brings a HBA offline. It stops all the timers
  2985. * associated with the HBA, brings down the SLI layer, and eventually
  2986. * marks the HBA as in offline state for the upper layer protocol.
  2987. **/
  2988. void
  2989. lpfc_offline(struct lpfc_hba *phba)
  2990. {
  2991. struct Scsi_Host *shost;
  2992. struct lpfc_vport **vports;
  2993. int i;
  2994. if (phba->pport->fc_flag & FC_OFFLINE_MODE)
  2995. return;
  2996. /* stop port and all timers associated with this hba */
  2997. lpfc_stop_port(phba);
  2998. /* Tear down the local and target port registrations. The
  2999. * nvme transports need to cleanup.
  3000. */
  3001. lpfc_nvmet_destroy_targetport(phba);
  3002. lpfc_nvme_destroy_localport(phba->pport);
  3003. vports = lpfc_create_vport_work_array(phba);
  3004. if (vports != NULL)
  3005. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
  3006. lpfc_stop_vport_timers(vports[i]);
  3007. lpfc_destroy_vport_work_array(phba, vports);
  3008. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  3009. "0460 Bring Adapter offline\n");
  3010. /* Bring down the SLI Layer and cleanup. The HBA is offline
  3011. now. */
  3012. lpfc_sli_hba_down(phba);
  3013. spin_lock_irq(&phba->hbalock);
  3014. phba->work_ha = 0;
  3015. spin_unlock_irq(&phba->hbalock);
  3016. vports = lpfc_create_vport_work_array(phba);
  3017. if (vports != NULL)
  3018. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  3019. shost = lpfc_shost_from_vport(vports[i]);
  3020. spin_lock_irq(shost->host_lock);
  3021. vports[i]->work_port_events = 0;
  3022. vports[i]->fc_flag |= FC_OFFLINE_MODE;
  3023. spin_unlock_irq(shost->host_lock);
  3024. }
  3025. lpfc_destroy_vport_work_array(phba, vports);
  3026. }
  3027. /**
  3028. * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
  3029. * @phba: pointer to lpfc hba data structure.
  3030. *
  3031. * This routine is to free all the SCSI buffers and IOCBs from the driver
  3032. * list back to kernel. It is called from lpfc_pci_remove_one to free
  3033. * the internal resources before the device is removed from the system.
  3034. **/
  3035. static void
  3036. lpfc_scsi_free(struct lpfc_hba *phba)
  3037. {
  3038. struct lpfc_scsi_buf *sb, *sb_next;
  3039. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
  3040. return;
  3041. spin_lock_irq(&phba->hbalock);
  3042. /* Release all the lpfc_scsi_bufs maintained by this host. */
  3043. spin_lock(&phba->scsi_buf_list_put_lock);
  3044. list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
  3045. list) {
  3046. list_del(&sb->list);
  3047. dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
  3048. sb->dma_handle);
  3049. kfree(sb);
  3050. phba->total_scsi_bufs--;
  3051. }
  3052. spin_unlock(&phba->scsi_buf_list_put_lock);
  3053. spin_lock(&phba->scsi_buf_list_get_lock);
  3054. list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
  3055. list) {
  3056. list_del(&sb->list);
  3057. dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
  3058. sb->dma_handle);
  3059. kfree(sb);
  3060. phba->total_scsi_bufs--;
  3061. }
  3062. spin_unlock(&phba->scsi_buf_list_get_lock);
  3063. spin_unlock_irq(&phba->hbalock);
  3064. }
  3065. /**
  3066. * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
  3067. * @phba: pointer to lpfc hba data structure.
  3068. *
  3069. * This routine is to free all the NVME buffers and IOCBs from the driver
  3070. * list back to kernel. It is called from lpfc_pci_remove_one to free
  3071. * the internal resources before the device is removed from the system.
  3072. **/
  3073. static void
  3074. lpfc_nvme_free(struct lpfc_hba *phba)
  3075. {
  3076. struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
  3077. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
  3078. return;
  3079. spin_lock_irq(&phba->hbalock);
  3080. /* Release all the lpfc_nvme_bufs maintained by this host. */
  3081. spin_lock(&phba->nvme_buf_list_put_lock);
  3082. list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  3083. &phba->lpfc_nvme_buf_list_put, list) {
  3084. list_del(&lpfc_ncmd->list);
  3085. phba->put_nvme_bufs--;
  3086. dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
  3087. lpfc_ncmd->dma_handle);
  3088. kfree(lpfc_ncmd);
  3089. phba->total_nvme_bufs--;
  3090. }
  3091. spin_unlock(&phba->nvme_buf_list_put_lock);
  3092. spin_lock(&phba->nvme_buf_list_get_lock);
  3093. list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  3094. &phba->lpfc_nvme_buf_list_get, list) {
  3095. list_del(&lpfc_ncmd->list);
  3096. phba->get_nvme_bufs--;
  3097. dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
  3098. lpfc_ncmd->dma_handle);
  3099. kfree(lpfc_ncmd);
  3100. phba->total_nvme_bufs--;
  3101. }
  3102. spin_unlock(&phba->nvme_buf_list_get_lock);
  3103. spin_unlock_irq(&phba->hbalock);
  3104. }
  3105. /**
  3106. * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
  3107. * @phba: pointer to lpfc hba data structure.
  3108. *
  3109. * This routine first calculates the sizes of the current els and allocated
  3110. * scsi sgl lists, and then goes through all sgls to updates the physical
  3111. * XRIs assigned due to port function reset. During port initialization, the
  3112. * current els and allocated scsi sgl lists are 0s.
  3113. *
  3114. * Return codes
  3115. * 0 - successful (for now, it always returns 0)
  3116. **/
  3117. int
  3118. lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
  3119. {
  3120. struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
  3121. uint16_t i, lxri, xri_cnt, els_xri_cnt;
  3122. LIST_HEAD(els_sgl_list);
  3123. int rc;
  3124. /*
  3125. * update on pci function's els xri-sgl list
  3126. */
  3127. els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
  3128. if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
  3129. /* els xri-sgl expanded */
  3130. xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
  3131. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3132. "3157 ELS xri-sgl count increased from "
  3133. "%d to %d\n", phba->sli4_hba.els_xri_cnt,
  3134. els_xri_cnt);
  3135. /* allocate the additional els sgls */
  3136. for (i = 0; i < xri_cnt; i++) {
  3137. sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
  3138. GFP_KERNEL);
  3139. if (sglq_entry == NULL) {
  3140. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3141. "2562 Failure to allocate an "
  3142. "ELS sgl entry:%d\n", i);
  3143. rc = -ENOMEM;
  3144. goto out_free_mem;
  3145. }
  3146. sglq_entry->buff_type = GEN_BUFF_TYPE;
  3147. sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
  3148. &sglq_entry->phys);
  3149. if (sglq_entry->virt == NULL) {
  3150. kfree(sglq_entry);
  3151. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3152. "2563 Failure to allocate an "
  3153. "ELS mbuf:%d\n", i);
  3154. rc = -ENOMEM;
  3155. goto out_free_mem;
  3156. }
  3157. sglq_entry->sgl = sglq_entry->virt;
  3158. memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
  3159. sglq_entry->state = SGL_FREED;
  3160. list_add_tail(&sglq_entry->list, &els_sgl_list);
  3161. }
  3162. spin_lock_irq(&phba->hbalock);
  3163. spin_lock(&phba->sli4_hba.sgl_list_lock);
  3164. list_splice_init(&els_sgl_list,
  3165. &phba->sli4_hba.lpfc_els_sgl_list);
  3166. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  3167. spin_unlock_irq(&phba->hbalock);
  3168. } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
  3169. /* els xri-sgl shrinked */
  3170. xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
  3171. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3172. "3158 ELS xri-sgl count decreased from "
  3173. "%d to %d\n", phba->sli4_hba.els_xri_cnt,
  3174. els_xri_cnt);
  3175. spin_lock_irq(&phba->hbalock);
  3176. spin_lock(&phba->sli4_hba.sgl_list_lock);
  3177. list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
  3178. &els_sgl_list);
  3179. /* release extra els sgls from list */
  3180. for (i = 0; i < xri_cnt; i++) {
  3181. list_remove_head(&els_sgl_list,
  3182. sglq_entry, struct lpfc_sglq, list);
  3183. if (sglq_entry) {
  3184. __lpfc_mbuf_free(phba, sglq_entry->virt,
  3185. sglq_entry->phys);
  3186. kfree(sglq_entry);
  3187. }
  3188. }
  3189. list_splice_init(&els_sgl_list,
  3190. &phba->sli4_hba.lpfc_els_sgl_list);
  3191. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  3192. spin_unlock_irq(&phba->hbalock);
  3193. } else
  3194. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3195. "3163 ELS xri-sgl count unchanged: %d\n",
  3196. els_xri_cnt);
  3197. phba->sli4_hba.els_xri_cnt = els_xri_cnt;
  3198. /* update xris to els sgls on the list */
  3199. sglq_entry = NULL;
  3200. sglq_entry_next = NULL;
  3201. list_for_each_entry_safe(sglq_entry, sglq_entry_next,
  3202. &phba->sli4_hba.lpfc_els_sgl_list, list) {
  3203. lxri = lpfc_sli4_next_xritag(phba);
  3204. if (lxri == NO_XRI) {
  3205. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3206. "2400 Failed to allocate xri for "
  3207. "ELS sgl\n");
  3208. rc = -ENOMEM;
  3209. goto out_free_mem;
  3210. }
  3211. sglq_entry->sli4_lxritag = lxri;
  3212. sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
  3213. }
  3214. return 0;
  3215. out_free_mem:
  3216. lpfc_free_els_sgl_list(phba);
  3217. return rc;
  3218. }
  3219. /**
  3220. * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
  3221. * @phba: pointer to lpfc hba data structure.
  3222. *
  3223. * This routine first calculates the sizes of the current els and allocated
  3224. * scsi sgl lists, and then goes through all sgls to updates the physical
  3225. * XRIs assigned due to port function reset. During port initialization, the
  3226. * current els and allocated scsi sgl lists are 0s.
  3227. *
  3228. * Return codes
  3229. * 0 - successful (for now, it always returns 0)
  3230. **/
  3231. int
  3232. lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
  3233. {
  3234. struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
  3235. uint16_t i, lxri, xri_cnt, els_xri_cnt;
  3236. uint16_t nvmet_xri_cnt;
  3237. LIST_HEAD(nvmet_sgl_list);
  3238. int rc;
  3239. /*
  3240. * update on pci function's nvmet xri-sgl list
  3241. */
  3242. els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
  3243. /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
  3244. nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
  3245. if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
  3246. /* els xri-sgl expanded */
  3247. xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
  3248. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3249. "6302 NVMET xri-sgl cnt grew from %d to %d\n",
  3250. phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
  3251. /* allocate the additional nvmet sgls */
  3252. for (i = 0; i < xri_cnt; i++) {
  3253. sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
  3254. GFP_KERNEL);
  3255. if (sglq_entry == NULL) {
  3256. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3257. "6303 Failure to allocate an "
  3258. "NVMET sgl entry:%d\n", i);
  3259. rc = -ENOMEM;
  3260. goto out_free_mem;
  3261. }
  3262. sglq_entry->buff_type = NVMET_BUFF_TYPE;
  3263. sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
  3264. &sglq_entry->phys);
  3265. if (sglq_entry->virt == NULL) {
  3266. kfree(sglq_entry);
  3267. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3268. "6304 Failure to allocate an "
  3269. "NVMET buf:%d\n", i);
  3270. rc = -ENOMEM;
  3271. goto out_free_mem;
  3272. }
  3273. sglq_entry->sgl = sglq_entry->virt;
  3274. memset(sglq_entry->sgl, 0,
  3275. phba->cfg_sg_dma_buf_size);
  3276. sglq_entry->state = SGL_FREED;
  3277. list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
  3278. }
  3279. spin_lock_irq(&phba->hbalock);
  3280. spin_lock(&phba->sli4_hba.sgl_list_lock);
  3281. list_splice_init(&nvmet_sgl_list,
  3282. &phba->sli4_hba.lpfc_nvmet_sgl_list);
  3283. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  3284. spin_unlock_irq(&phba->hbalock);
  3285. } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
  3286. /* nvmet xri-sgl shrunk */
  3287. xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
  3288. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3289. "6305 NVMET xri-sgl count decreased from "
  3290. "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
  3291. nvmet_xri_cnt);
  3292. spin_lock_irq(&phba->hbalock);
  3293. spin_lock(&phba->sli4_hba.sgl_list_lock);
  3294. list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
  3295. &nvmet_sgl_list);
  3296. /* release extra nvmet sgls from list */
  3297. for (i = 0; i < xri_cnt; i++) {
  3298. list_remove_head(&nvmet_sgl_list,
  3299. sglq_entry, struct lpfc_sglq, list);
  3300. if (sglq_entry) {
  3301. lpfc_nvmet_buf_free(phba, sglq_entry->virt,
  3302. sglq_entry->phys);
  3303. kfree(sglq_entry);
  3304. }
  3305. }
  3306. list_splice_init(&nvmet_sgl_list,
  3307. &phba->sli4_hba.lpfc_nvmet_sgl_list);
  3308. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  3309. spin_unlock_irq(&phba->hbalock);
  3310. } else
  3311. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3312. "6306 NVMET xri-sgl count unchanged: %d\n",
  3313. nvmet_xri_cnt);
  3314. phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
  3315. /* update xris to nvmet sgls on the list */
  3316. sglq_entry = NULL;
  3317. sglq_entry_next = NULL;
  3318. list_for_each_entry_safe(sglq_entry, sglq_entry_next,
  3319. &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
  3320. lxri = lpfc_sli4_next_xritag(phba);
  3321. if (lxri == NO_XRI) {
  3322. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3323. "6307 Failed to allocate xri for "
  3324. "NVMET sgl\n");
  3325. rc = -ENOMEM;
  3326. goto out_free_mem;
  3327. }
  3328. sglq_entry->sli4_lxritag = lxri;
  3329. sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
  3330. }
  3331. return 0;
  3332. out_free_mem:
  3333. lpfc_free_nvmet_sgl_list(phba);
  3334. return rc;
  3335. }
  3336. /**
  3337. * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
  3338. * @phba: pointer to lpfc hba data structure.
  3339. *
  3340. * This routine first calculates the sizes of the current els and allocated
  3341. * scsi sgl lists, and then goes through all sgls to updates the physical
  3342. * XRIs assigned due to port function reset. During port initialization, the
  3343. * current els and allocated scsi sgl lists are 0s.
  3344. *
  3345. * Return codes
  3346. * 0 - successful (for now, it always returns 0)
  3347. **/
  3348. int
  3349. lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
  3350. {
  3351. struct lpfc_scsi_buf *psb, *psb_next;
  3352. uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
  3353. LIST_HEAD(scsi_sgl_list);
  3354. int rc;
  3355. /*
  3356. * update on pci function's els xri-sgl list
  3357. */
  3358. els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
  3359. phba->total_scsi_bufs = 0;
  3360. /*
  3361. * update on pci function's allocated scsi xri-sgl list
  3362. */
  3363. /* maximum number of xris available for scsi buffers */
  3364. phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
  3365. els_xri_cnt;
  3366. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
  3367. return 0;
  3368. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
  3369. phba->sli4_hba.scsi_xri_max = /* Split them up */
  3370. (phba->sli4_hba.scsi_xri_max *
  3371. phba->cfg_xri_split) / 100;
  3372. spin_lock_irq(&phba->scsi_buf_list_get_lock);
  3373. spin_lock(&phba->scsi_buf_list_put_lock);
  3374. list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
  3375. list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
  3376. spin_unlock(&phba->scsi_buf_list_put_lock);
  3377. spin_unlock_irq(&phba->scsi_buf_list_get_lock);
  3378. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3379. "6060 Current allocated SCSI xri-sgl count:%d, "
  3380. "maximum SCSI xri count:%d (split:%d)\n",
  3381. phba->sli4_hba.scsi_xri_cnt,
  3382. phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
  3383. if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
  3384. /* max scsi xri shrinked below the allocated scsi buffers */
  3385. scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
  3386. phba->sli4_hba.scsi_xri_max;
  3387. /* release the extra allocated scsi buffers */
  3388. for (i = 0; i < scsi_xri_cnt; i++) {
  3389. list_remove_head(&scsi_sgl_list, psb,
  3390. struct lpfc_scsi_buf, list);
  3391. if (psb) {
  3392. dma_pool_free(phba->lpfc_sg_dma_buf_pool,
  3393. psb->data, psb->dma_handle);
  3394. kfree(psb);
  3395. }
  3396. }
  3397. spin_lock_irq(&phba->scsi_buf_list_get_lock);
  3398. phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
  3399. spin_unlock_irq(&phba->scsi_buf_list_get_lock);
  3400. }
  3401. /* update xris associated to remaining allocated scsi buffers */
  3402. psb = NULL;
  3403. psb_next = NULL;
  3404. list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
  3405. lxri = lpfc_sli4_next_xritag(phba);
  3406. if (lxri == NO_XRI) {
  3407. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3408. "2560 Failed to allocate xri for "
  3409. "scsi buffer\n");
  3410. rc = -ENOMEM;
  3411. goto out_free_mem;
  3412. }
  3413. psb->cur_iocbq.sli4_lxritag = lxri;
  3414. psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
  3415. }
  3416. spin_lock_irq(&phba->scsi_buf_list_get_lock);
  3417. spin_lock(&phba->scsi_buf_list_put_lock);
  3418. list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
  3419. INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
  3420. spin_unlock(&phba->scsi_buf_list_put_lock);
  3421. spin_unlock_irq(&phba->scsi_buf_list_get_lock);
  3422. return 0;
  3423. out_free_mem:
  3424. lpfc_scsi_free(phba);
  3425. return rc;
  3426. }
  3427. static uint64_t
  3428. lpfc_get_wwpn(struct lpfc_hba *phba)
  3429. {
  3430. uint64_t wwn;
  3431. int rc;
  3432. LPFC_MBOXQ_t *mboxq;
  3433. MAILBOX_t *mb;
  3434. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  3435. GFP_KERNEL);
  3436. if (!mboxq)
  3437. return (uint64_t)-1;
  3438. /* First get WWN of HBA instance */
  3439. lpfc_read_nv(phba, mboxq);
  3440. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  3441. if (rc != MBX_SUCCESS) {
  3442. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3443. "6019 Mailbox failed , mbxCmd x%x "
  3444. "READ_NV, mbxStatus x%x\n",
  3445. bf_get(lpfc_mqe_command, &mboxq->u.mqe),
  3446. bf_get(lpfc_mqe_status, &mboxq->u.mqe));
  3447. mempool_free(mboxq, phba->mbox_mem_pool);
  3448. return (uint64_t) -1;
  3449. }
  3450. mb = &mboxq->u.mb;
  3451. memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
  3452. /* wwn is WWPN of HBA instance */
  3453. mempool_free(mboxq, phba->mbox_mem_pool);
  3454. if (phba->sli_rev == LPFC_SLI_REV4)
  3455. return be64_to_cpu(wwn);
  3456. else
  3457. return rol64(wwn, 32);
  3458. }
  3459. /**
  3460. * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
  3461. * @phba: pointer to lpfc hba data structure.
  3462. *
  3463. * This routine first calculates the sizes of the current els and allocated
  3464. * scsi sgl lists, and then goes through all sgls to updates the physical
  3465. * XRIs assigned due to port function reset. During port initialization, the
  3466. * current els and allocated scsi sgl lists are 0s.
  3467. *
  3468. * Return codes
  3469. * 0 - successful (for now, it always returns 0)
  3470. **/
  3471. int
  3472. lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
  3473. {
  3474. struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
  3475. uint16_t i, lxri, els_xri_cnt;
  3476. uint16_t nvme_xri_cnt, nvme_xri_max;
  3477. LIST_HEAD(nvme_sgl_list);
  3478. int rc, cnt;
  3479. phba->total_nvme_bufs = 0;
  3480. phba->get_nvme_bufs = 0;
  3481. phba->put_nvme_bufs = 0;
  3482. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
  3483. return 0;
  3484. /*
  3485. * update on pci function's allocated nvme xri-sgl list
  3486. */
  3487. /* maximum number of xris available for nvme buffers */
  3488. els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
  3489. nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
  3490. phba->sli4_hba.nvme_xri_max = nvme_xri_max;
  3491. phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
  3492. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3493. "6074 Current allocated NVME xri-sgl count:%d, "
  3494. "maximum NVME xri count:%d\n",
  3495. phba->sli4_hba.nvme_xri_cnt,
  3496. phba->sli4_hba.nvme_xri_max);
  3497. spin_lock_irq(&phba->nvme_buf_list_get_lock);
  3498. spin_lock(&phba->nvme_buf_list_put_lock);
  3499. list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
  3500. list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
  3501. cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
  3502. phba->get_nvme_bufs = 0;
  3503. phba->put_nvme_bufs = 0;
  3504. spin_unlock(&phba->nvme_buf_list_put_lock);
  3505. spin_unlock_irq(&phba->nvme_buf_list_get_lock);
  3506. if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
  3507. /* max nvme xri shrunk below the allocated nvme buffers */
  3508. spin_lock_irq(&phba->nvme_buf_list_get_lock);
  3509. nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
  3510. phba->sli4_hba.nvme_xri_max;
  3511. spin_unlock_irq(&phba->nvme_buf_list_get_lock);
  3512. /* release the extra allocated nvme buffers */
  3513. for (i = 0; i < nvme_xri_cnt; i++) {
  3514. list_remove_head(&nvme_sgl_list, lpfc_ncmd,
  3515. struct lpfc_nvme_buf, list);
  3516. if (lpfc_ncmd) {
  3517. dma_pool_free(phba->lpfc_sg_dma_buf_pool,
  3518. lpfc_ncmd->data,
  3519. lpfc_ncmd->dma_handle);
  3520. kfree(lpfc_ncmd);
  3521. }
  3522. }
  3523. spin_lock_irq(&phba->nvme_buf_list_get_lock);
  3524. phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
  3525. spin_unlock_irq(&phba->nvme_buf_list_get_lock);
  3526. }
  3527. /* update xris associated to remaining allocated nvme buffers */
  3528. lpfc_ncmd = NULL;
  3529. lpfc_ncmd_next = NULL;
  3530. list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  3531. &nvme_sgl_list, list) {
  3532. lxri = lpfc_sli4_next_xritag(phba);
  3533. if (lxri == NO_XRI) {
  3534. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3535. "6075 Failed to allocate xri for "
  3536. "nvme buffer\n");
  3537. rc = -ENOMEM;
  3538. goto out_free_mem;
  3539. }
  3540. lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
  3541. lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
  3542. }
  3543. spin_lock_irq(&phba->nvme_buf_list_get_lock);
  3544. spin_lock(&phba->nvme_buf_list_put_lock);
  3545. list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
  3546. phba->get_nvme_bufs = cnt;
  3547. INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
  3548. spin_unlock(&phba->nvme_buf_list_put_lock);
  3549. spin_unlock_irq(&phba->nvme_buf_list_get_lock);
  3550. return 0;
  3551. out_free_mem:
  3552. lpfc_nvme_free(phba);
  3553. return rc;
  3554. }
  3555. /**
  3556. * lpfc_create_port - Create an FC port
  3557. * @phba: pointer to lpfc hba data structure.
  3558. * @instance: a unique integer ID to this FC port.
  3559. * @dev: pointer to the device data structure.
  3560. *
  3561. * This routine creates a FC port for the upper layer protocol. The FC port
  3562. * can be created on top of either a physical port or a virtual port provided
  3563. * by the HBA. This routine also allocates a SCSI host data structure (shost)
  3564. * and associates the FC port created before adding the shost into the SCSI
  3565. * layer.
  3566. *
  3567. * Return codes
  3568. * @vport - pointer to the virtual N_Port data structure.
  3569. * NULL - port create failed.
  3570. **/
  3571. struct lpfc_vport *
  3572. lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
  3573. {
  3574. struct lpfc_vport *vport;
  3575. struct Scsi_Host *shost = NULL;
  3576. int error = 0;
  3577. int i;
  3578. uint64_t wwn;
  3579. bool use_no_reset_hba = false;
  3580. int rc;
  3581. if (lpfc_no_hba_reset_cnt) {
  3582. if (phba->sli_rev < LPFC_SLI_REV4 &&
  3583. dev == &phba->pcidev->dev) {
  3584. /* Reset the port first */
  3585. lpfc_sli_brdrestart(phba);
  3586. rc = lpfc_sli_chipset_init(phba);
  3587. if (rc)
  3588. return NULL;
  3589. }
  3590. wwn = lpfc_get_wwpn(phba);
  3591. }
  3592. for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
  3593. if (wwn == lpfc_no_hba_reset[i]) {
  3594. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3595. "6020 Setting use_no_reset port=%llx\n",
  3596. wwn);
  3597. use_no_reset_hba = true;
  3598. break;
  3599. }
  3600. }
  3601. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
  3602. if (dev != &phba->pcidev->dev) {
  3603. shost = scsi_host_alloc(&lpfc_vport_template,
  3604. sizeof(struct lpfc_vport));
  3605. } else {
  3606. if (!use_no_reset_hba)
  3607. shost = scsi_host_alloc(&lpfc_template,
  3608. sizeof(struct lpfc_vport));
  3609. else
  3610. shost = scsi_host_alloc(&lpfc_template_no_hr,
  3611. sizeof(struct lpfc_vport));
  3612. }
  3613. } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  3614. shost = scsi_host_alloc(&lpfc_template_nvme,
  3615. sizeof(struct lpfc_vport));
  3616. }
  3617. if (!shost)
  3618. goto out;
  3619. vport = (struct lpfc_vport *) shost->hostdata;
  3620. vport->phba = phba;
  3621. vport->load_flag |= FC_LOADING;
  3622. vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
  3623. vport->fc_rscn_flush = 0;
  3624. lpfc_get_vport_cfgparam(vport);
  3625. shost->unique_id = instance;
  3626. shost->max_id = LPFC_MAX_TARGET;
  3627. shost->max_lun = vport->cfg_max_luns;
  3628. shost->this_id = -1;
  3629. shost->max_cmd_len = 16;
  3630. shost->nr_hw_queues = phba->cfg_fcp_io_channel;
  3631. if (phba->sli_rev == LPFC_SLI_REV4) {
  3632. shost->dma_boundary =
  3633. phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
  3634. shost->sg_tablesize = phba->cfg_sg_seg_cnt;
  3635. }
  3636. /*
  3637. * Set initial can_queue value since 0 is no longer supported and
  3638. * scsi_add_host will fail. This will be adjusted later based on the
  3639. * max xri value determined in hba setup.
  3640. */
  3641. shost->can_queue = phba->cfg_hba_queue_depth - 10;
  3642. if (dev != &phba->pcidev->dev) {
  3643. shost->transportt = lpfc_vport_transport_template;
  3644. vport->port_type = LPFC_NPIV_PORT;
  3645. } else {
  3646. shost->transportt = lpfc_transport_template;
  3647. vport->port_type = LPFC_PHYSICAL_PORT;
  3648. }
  3649. /* Initialize all internally managed lists. */
  3650. INIT_LIST_HEAD(&vport->fc_nodes);
  3651. INIT_LIST_HEAD(&vport->rcv_buffer_list);
  3652. spin_lock_init(&vport->work_port_lock);
  3653. timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
  3654. timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
  3655. timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
  3656. error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
  3657. if (error)
  3658. goto out_put_shost;
  3659. spin_lock_irq(&phba->hbalock);
  3660. list_add_tail(&vport->listentry, &phba->port_list);
  3661. spin_unlock_irq(&phba->hbalock);
  3662. return vport;
  3663. out_put_shost:
  3664. scsi_host_put(shost);
  3665. out:
  3666. return NULL;
  3667. }
  3668. /**
  3669. * destroy_port - destroy an FC port
  3670. * @vport: pointer to an lpfc virtual N_Port data structure.
  3671. *
  3672. * This routine destroys a FC port from the upper layer protocol. All the
  3673. * resources associated with the port are released.
  3674. **/
  3675. void
  3676. destroy_port(struct lpfc_vport *vport)
  3677. {
  3678. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  3679. struct lpfc_hba *phba = vport->phba;
  3680. lpfc_debugfs_terminate(vport);
  3681. fc_remove_host(shost);
  3682. scsi_remove_host(shost);
  3683. spin_lock_irq(&phba->hbalock);
  3684. list_del_init(&vport->listentry);
  3685. spin_unlock_irq(&phba->hbalock);
  3686. lpfc_cleanup(vport);
  3687. return;
  3688. }
  3689. /**
  3690. * lpfc_get_instance - Get a unique integer ID
  3691. *
  3692. * This routine allocates a unique integer ID from lpfc_hba_index pool. It
  3693. * uses the kernel idr facility to perform the task.
  3694. *
  3695. * Return codes:
  3696. * instance - a unique integer ID allocated as the new instance.
  3697. * -1 - lpfc get instance failed.
  3698. **/
  3699. int
  3700. lpfc_get_instance(void)
  3701. {
  3702. int ret;
  3703. ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
  3704. return ret < 0 ? -1 : ret;
  3705. }
  3706. /**
  3707. * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
  3708. * @shost: pointer to SCSI host data structure.
  3709. * @time: elapsed time of the scan in jiffies.
  3710. *
  3711. * This routine is called by the SCSI layer with a SCSI host to determine
  3712. * whether the scan host is finished.
  3713. *
  3714. * Note: there is no scan_start function as adapter initialization will have
  3715. * asynchronously kicked off the link initialization.
  3716. *
  3717. * Return codes
  3718. * 0 - SCSI host scan is not over yet.
  3719. * 1 - SCSI host scan is over.
  3720. **/
  3721. int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
  3722. {
  3723. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  3724. struct lpfc_hba *phba = vport->phba;
  3725. int stat = 0;
  3726. spin_lock_irq(shost->host_lock);
  3727. if (vport->load_flag & FC_UNLOADING) {
  3728. stat = 1;
  3729. goto finished;
  3730. }
  3731. if (time >= msecs_to_jiffies(30 * 1000)) {
  3732. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  3733. "0461 Scanning longer than 30 "
  3734. "seconds. Continuing initialization\n");
  3735. stat = 1;
  3736. goto finished;
  3737. }
  3738. if (time >= msecs_to_jiffies(15 * 1000) &&
  3739. phba->link_state <= LPFC_LINK_DOWN) {
  3740. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  3741. "0465 Link down longer than 15 "
  3742. "seconds. Continuing initialization\n");
  3743. stat = 1;
  3744. goto finished;
  3745. }
  3746. if (vport->port_state != LPFC_VPORT_READY)
  3747. goto finished;
  3748. if (vport->num_disc_nodes || vport->fc_prli_sent)
  3749. goto finished;
  3750. if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
  3751. goto finished;
  3752. if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
  3753. goto finished;
  3754. stat = 1;
  3755. finished:
  3756. spin_unlock_irq(shost->host_lock);
  3757. return stat;
  3758. }
  3759. /**
  3760. * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
  3761. * @shost: pointer to SCSI host data structure.
  3762. *
  3763. * This routine initializes a given SCSI host attributes on a FC port. The
  3764. * SCSI host can be either on top of a physical port or a virtual port.
  3765. **/
  3766. void lpfc_host_attrib_init(struct Scsi_Host *shost)
  3767. {
  3768. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  3769. struct lpfc_hba *phba = vport->phba;
  3770. /*
  3771. * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
  3772. */
  3773. fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
  3774. fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
  3775. fc_host_supported_classes(shost) = FC_COS_CLASS3;
  3776. memset(fc_host_supported_fc4s(shost), 0,
  3777. sizeof(fc_host_supported_fc4s(shost)));
  3778. fc_host_supported_fc4s(shost)[2] = 1;
  3779. fc_host_supported_fc4s(shost)[7] = 1;
  3780. lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
  3781. sizeof fc_host_symbolic_name(shost));
  3782. fc_host_supported_speeds(shost) = 0;
  3783. if (phba->lmt & LMT_64Gb)
  3784. fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
  3785. if (phba->lmt & LMT_32Gb)
  3786. fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
  3787. if (phba->lmt & LMT_16Gb)
  3788. fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
  3789. if (phba->lmt & LMT_10Gb)
  3790. fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
  3791. if (phba->lmt & LMT_8Gb)
  3792. fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
  3793. if (phba->lmt & LMT_4Gb)
  3794. fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
  3795. if (phba->lmt & LMT_2Gb)
  3796. fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
  3797. if (phba->lmt & LMT_1Gb)
  3798. fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
  3799. fc_host_maxframe_size(shost) =
  3800. (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
  3801. (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
  3802. fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
  3803. /* This value is also unchanging */
  3804. memset(fc_host_active_fc4s(shost), 0,
  3805. sizeof(fc_host_active_fc4s(shost)));
  3806. fc_host_active_fc4s(shost)[2] = 1;
  3807. fc_host_active_fc4s(shost)[7] = 1;
  3808. fc_host_max_npiv_vports(shost) = phba->max_vpi;
  3809. spin_lock_irq(shost->host_lock);
  3810. vport->load_flag &= ~FC_LOADING;
  3811. spin_unlock_irq(shost->host_lock);
  3812. }
  3813. /**
  3814. * lpfc_stop_port_s3 - Stop SLI3 device port
  3815. * @phba: pointer to lpfc hba data structure.
  3816. *
  3817. * This routine is invoked to stop an SLI3 device port, it stops the device
  3818. * from generating interrupts and stops the device driver's timers for the
  3819. * device.
  3820. **/
  3821. static void
  3822. lpfc_stop_port_s3(struct lpfc_hba *phba)
  3823. {
  3824. /* Clear all interrupt enable conditions */
  3825. writel(0, phba->HCregaddr);
  3826. readl(phba->HCregaddr); /* flush */
  3827. /* Clear all pending interrupts */
  3828. writel(0xffffffff, phba->HAregaddr);
  3829. readl(phba->HAregaddr); /* flush */
  3830. /* Reset some HBA SLI setup states */
  3831. lpfc_stop_hba_timers(phba);
  3832. phba->pport->work_port_events = 0;
  3833. }
  3834. /**
  3835. * lpfc_stop_port_s4 - Stop SLI4 device port
  3836. * @phba: pointer to lpfc hba data structure.
  3837. *
  3838. * This routine is invoked to stop an SLI4 device port, it stops the device
  3839. * from generating interrupts and stops the device driver's timers for the
  3840. * device.
  3841. **/
  3842. static void
  3843. lpfc_stop_port_s4(struct lpfc_hba *phba)
  3844. {
  3845. /* Reset some HBA SLI4 setup states */
  3846. lpfc_stop_hba_timers(phba);
  3847. phba->pport->work_port_events = 0;
  3848. phba->sli4_hba.intr_enable = 0;
  3849. }
  3850. /**
  3851. * lpfc_stop_port - Wrapper function for stopping hba port
  3852. * @phba: Pointer to HBA context object.
  3853. *
  3854. * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
  3855. * the API jump table function pointer from the lpfc_hba struct.
  3856. **/
  3857. void
  3858. lpfc_stop_port(struct lpfc_hba *phba)
  3859. {
  3860. phba->lpfc_stop_port(phba);
  3861. if (phba->wq)
  3862. flush_workqueue(phba->wq);
  3863. }
  3864. /**
  3865. * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
  3866. * @phba: Pointer to hba for which this call is being executed.
  3867. *
  3868. * This routine starts the timer waiting for the FCF rediscovery to complete.
  3869. **/
  3870. void
  3871. lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
  3872. {
  3873. unsigned long fcf_redisc_wait_tmo =
  3874. (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
  3875. /* Start fcf rediscovery wait period timer */
  3876. mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
  3877. spin_lock_irq(&phba->hbalock);
  3878. /* Allow action to new fcf asynchronous event */
  3879. phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
  3880. /* Mark the FCF rediscovery pending state */
  3881. phba->fcf.fcf_flag |= FCF_REDISC_PEND;
  3882. spin_unlock_irq(&phba->hbalock);
  3883. }
  3884. /**
  3885. * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
  3886. * @ptr: Map to lpfc_hba data structure pointer.
  3887. *
  3888. * This routine is invoked when waiting for FCF table rediscover has been
  3889. * timed out. If new FCF record(s) has (have) been discovered during the
  3890. * wait period, a new FCF event shall be added to the FCOE async event
  3891. * list, and then worker thread shall be waked up for processing from the
  3892. * worker thread context.
  3893. **/
  3894. static void
  3895. lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
  3896. {
  3897. struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
  3898. /* Don't send FCF rediscovery event if timer cancelled */
  3899. spin_lock_irq(&phba->hbalock);
  3900. if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
  3901. spin_unlock_irq(&phba->hbalock);
  3902. return;
  3903. }
  3904. /* Clear FCF rediscovery timer pending flag */
  3905. phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
  3906. /* FCF rediscovery event to worker thread */
  3907. phba->fcf.fcf_flag |= FCF_REDISC_EVT;
  3908. spin_unlock_irq(&phba->hbalock);
  3909. lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
  3910. "2776 FCF rediscover quiescent timer expired\n");
  3911. /* wake up worker thread */
  3912. lpfc_worker_wake_up(phba);
  3913. }
  3914. /**
  3915. * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
  3916. * @phba: pointer to lpfc hba data structure.
  3917. * @acqe_link: pointer to the async link completion queue entry.
  3918. *
  3919. * This routine is to parse the SLI4 link-attention link fault code.
  3920. **/
  3921. static void
  3922. lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
  3923. struct lpfc_acqe_link *acqe_link)
  3924. {
  3925. switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
  3926. case LPFC_ASYNC_LINK_FAULT_NONE:
  3927. case LPFC_ASYNC_LINK_FAULT_LOCAL:
  3928. case LPFC_ASYNC_LINK_FAULT_REMOTE:
  3929. case LPFC_ASYNC_LINK_FAULT_LR_LRR:
  3930. break;
  3931. default:
  3932. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  3933. "0398 Unknown link fault code: x%x\n",
  3934. bf_get(lpfc_acqe_link_fault, acqe_link));
  3935. break;
  3936. }
  3937. }
  3938. /**
  3939. * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
  3940. * @phba: pointer to lpfc hba data structure.
  3941. * @acqe_link: pointer to the async link completion queue entry.
  3942. *
  3943. * This routine is to parse the SLI4 link attention type and translate it
  3944. * into the base driver's link attention type coding.
  3945. *
  3946. * Return: Link attention type in terms of base driver's coding.
  3947. **/
  3948. static uint8_t
  3949. lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
  3950. struct lpfc_acqe_link *acqe_link)
  3951. {
  3952. uint8_t att_type;
  3953. switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
  3954. case LPFC_ASYNC_LINK_STATUS_DOWN:
  3955. case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
  3956. att_type = LPFC_ATT_LINK_DOWN;
  3957. break;
  3958. case LPFC_ASYNC_LINK_STATUS_UP:
  3959. /* Ignore physical link up events - wait for logical link up */
  3960. att_type = LPFC_ATT_RESERVED;
  3961. break;
  3962. case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
  3963. att_type = LPFC_ATT_LINK_UP;
  3964. break;
  3965. default:
  3966. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  3967. "0399 Invalid link attention type: x%x\n",
  3968. bf_get(lpfc_acqe_link_status, acqe_link));
  3969. att_type = LPFC_ATT_RESERVED;
  3970. break;
  3971. }
  3972. return att_type;
  3973. }
  3974. /**
  3975. * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
  3976. * @phba: pointer to lpfc hba data structure.
  3977. *
  3978. * This routine is to get an SLI3 FC port's link speed in Mbps.
  3979. *
  3980. * Return: link speed in terms of Mbps.
  3981. **/
  3982. uint32_t
  3983. lpfc_sli_port_speed_get(struct lpfc_hba *phba)
  3984. {
  3985. uint32_t link_speed;
  3986. if (!lpfc_is_link_up(phba))
  3987. return 0;
  3988. if (phba->sli_rev <= LPFC_SLI_REV3) {
  3989. switch (phba->fc_linkspeed) {
  3990. case LPFC_LINK_SPEED_1GHZ:
  3991. link_speed = 1000;
  3992. break;
  3993. case LPFC_LINK_SPEED_2GHZ:
  3994. link_speed = 2000;
  3995. break;
  3996. case LPFC_LINK_SPEED_4GHZ:
  3997. link_speed = 4000;
  3998. break;
  3999. case LPFC_LINK_SPEED_8GHZ:
  4000. link_speed = 8000;
  4001. break;
  4002. case LPFC_LINK_SPEED_10GHZ:
  4003. link_speed = 10000;
  4004. break;
  4005. case LPFC_LINK_SPEED_16GHZ:
  4006. link_speed = 16000;
  4007. break;
  4008. default:
  4009. link_speed = 0;
  4010. }
  4011. } else {
  4012. if (phba->sli4_hba.link_state.logical_speed)
  4013. link_speed =
  4014. phba->sli4_hba.link_state.logical_speed;
  4015. else
  4016. link_speed = phba->sli4_hba.link_state.speed;
  4017. }
  4018. return link_speed;
  4019. }
  4020. /**
  4021. * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
  4022. * @phba: pointer to lpfc hba data structure.
  4023. * @evt_code: asynchronous event code.
  4024. * @speed_code: asynchronous event link speed code.
  4025. *
  4026. * This routine is to parse the giving SLI4 async event link speed code into
  4027. * value of Mbps for the link speed.
  4028. *
  4029. * Return: link speed in terms of Mbps.
  4030. **/
  4031. static uint32_t
  4032. lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
  4033. uint8_t speed_code)
  4034. {
  4035. uint32_t port_speed;
  4036. switch (evt_code) {
  4037. case LPFC_TRAILER_CODE_LINK:
  4038. switch (speed_code) {
  4039. case LPFC_ASYNC_LINK_SPEED_ZERO:
  4040. port_speed = 0;
  4041. break;
  4042. case LPFC_ASYNC_LINK_SPEED_10MBPS:
  4043. port_speed = 10;
  4044. break;
  4045. case LPFC_ASYNC_LINK_SPEED_100MBPS:
  4046. port_speed = 100;
  4047. break;
  4048. case LPFC_ASYNC_LINK_SPEED_1GBPS:
  4049. port_speed = 1000;
  4050. break;
  4051. case LPFC_ASYNC_LINK_SPEED_10GBPS:
  4052. port_speed = 10000;
  4053. break;
  4054. case LPFC_ASYNC_LINK_SPEED_20GBPS:
  4055. port_speed = 20000;
  4056. break;
  4057. case LPFC_ASYNC_LINK_SPEED_25GBPS:
  4058. port_speed = 25000;
  4059. break;
  4060. case LPFC_ASYNC_LINK_SPEED_40GBPS:
  4061. port_speed = 40000;
  4062. break;
  4063. default:
  4064. port_speed = 0;
  4065. }
  4066. break;
  4067. case LPFC_TRAILER_CODE_FC:
  4068. switch (speed_code) {
  4069. case LPFC_FC_LA_SPEED_UNKNOWN:
  4070. port_speed = 0;
  4071. break;
  4072. case LPFC_FC_LA_SPEED_1G:
  4073. port_speed = 1000;
  4074. break;
  4075. case LPFC_FC_LA_SPEED_2G:
  4076. port_speed = 2000;
  4077. break;
  4078. case LPFC_FC_LA_SPEED_4G:
  4079. port_speed = 4000;
  4080. break;
  4081. case LPFC_FC_LA_SPEED_8G:
  4082. port_speed = 8000;
  4083. break;
  4084. case LPFC_FC_LA_SPEED_10G:
  4085. port_speed = 10000;
  4086. break;
  4087. case LPFC_FC_LA_SPEED_16G:
  4088. port_speed = 16000;
  4089. break;
  4090. case LPFC_FC_LA_SPEED_32G:
  4091. port_speed = 32000;
  4092. break;
  4093. case LPFC_FC_LA_SPEED_64G:
  4094. port_speed = 64000;
  4095. break;
  4096. default:
  4097. port_speed = 0;
  4098. }
  4099. break;
  4100. default:
  4101. port_speed = 0;
  4102. }
  4103. return port_speed;
  4104. }
  4105. /**
  4106. * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
  4107. * @phba: pointer to lpfc hba data structure.
  4108. * @acqe_link: pointer to the async link completion queue entry.
  4109. *
  4110. * This routine is to handle the SLI4 asynchronous FCoE link event.
  4111. **/
  4112. static void
  4113. lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
  4114. struct lpfc_acqe_link *acqe_link)
  4115. {
  4116. struct lpfc_dmabuf *mp;
  4117. LPFC_MBOXQ_t *pmb;
  4118. MAILBOX_t *mb;
  4119. struct lpfc_mbx_read_top *la;
  4120. uint8_t att_type;
  4121. int rc;
  4122. att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
  4123. if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
  4124. return;
  4125. phba->fcoe_eventtag = acqe_link->event_tag;
  4126. pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  4127. if (!pmb) {
  4128. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4129. "0395 The mboxq allocation failed\n");
  4130. return;
  4131. }
  4132. mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  4133. if (!mp) {
  4134. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4135. "0396 The lpfc_dmabuf allocation failed\n");
  4136. goto out_free_pmb;
  4137. }
  4138. mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
  4139. if (!mp->virt) {
  4140. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4141. "0397 The mbuf allocation failed\n");
  4142. goto out_free_dmabuf;
  4143. }
  4144. /* Cleanup any outstanding ELS commands */
  4145. lpfc_els_flush_all_cmd(phba);
  4146. /* Block ELS IOCBs until we have done process link event */
  4147. phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
  4148. /* Update link event statistics */
  4149. phba->sli.slistat.link_event++;
  4150. /* Create lpfc_handle_latt mailbox command from link ACQE */
  4151. lpfc_read_topology(phba, pmb, mp);
  4152. pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
  4153. pmb->vport = phba->pport;
  4154. /* Keep the link status for extra SLI4 state machine reference */
  4155. phba->sli4_hba.link_state.speed =
  4156. lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
  4157. bf_get(lpfc_acqe_link_speed, acqe_link));
  4158. phba->sli4_hba.link_state.duplex =
  4159. bf_get(lpfc_acqe_link_duplex, acqe_link);
  4160. phba->sli4_hba.link_state.status =
  4161. bf_get(lpfc_acqe_link_status, acqe_link);
  4162. phba->sli4_hba.link_state.type =
  4163. bf_get(lpfc_acqe_link_type, acqe_link);
  4164. phba->sli4_hba.link_state.number =
  4165. bf_get(lpfc_acqe_link_number, acqe_link);
  4166. phba->sli4_hba.link_state.fault =
  4167. bf_get(lpfc_acqe_link_fault, acqe_link);
  4168. phba->sli4_hba.link_state.logical_speed =
  4169. bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
  4170. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4171. "2900 Async FC/FCoE Link event - Speed:%dGBit "
  4172. "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
  4173. "Logical speed:%dMbps Fault:%d\n",
  4174. phba->sli4_hba.link_state.speed,
  4175. phba->sli4_hba.link_state.topology,
  4176. phba->sli4_hba.link_state.status,
  4177. phba->sli4_hba.link_state.type,
  4178. phba->sli4_hba.link_state.number,
  4179. phba->sli4_hba.link_state.logical_speed,
  4180. phba->sli4_hba.link_state.fault);
  4181. /*
  4182. * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
  4183. * topology info. Note: Optional for non FC-AL ports.
  4184. */
  4185. if (!(phba->hba_flag & HBA_FCOE_MODE)) {
  4186. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  4187. if (rc == MBX_NOT_FINISHED)
  4188. goto out_free_dmabuf;
  4189. return;
  4190. }
  4191. /*
  4192. * For FCoE Mode: fill in all the topology information we need and call
  4193. * the READ_TOPOLOGY completion routine to continue without actually
  4194. * sending the READ_TOPOLOGY mailbox command to the port.
  4195. */
  4196. /* Initialize completion status */
  4197. mb = &pmb->u.mb;
  4198. mb->mbxStatus = MBX_SUCCESS;
  4199. /* Parse port fault information field */
  4200. lpfc_sli4_parse_latt_fault(phba, acqe_link);
  4201. /* Parse and translate link attention fields */
  4202. la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
  4203. la->eventTag = acqe_link->event_tag;
  4204. bf_set(lpfc_mbx_read_top_att_type, la, att_type);
  4205. bf_set(lpfc_mbx_read_top_link_spd, la,
  4206. (bf_get(lpfc_acqe_link_speed, acqe_link)));
  4207. /* Fake the the following irrelvant fields */
  4208. bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
  4209. bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
  4210. bf_set(lpfc_mbx_read_top_il, la, 0);
  4211. bf_set(lpfc_mbx_read_top_pb, la, 0);
  4212. bf_set(lpfc_mbx_read_top_fa, la, 0);
  4213. bf_set(lpfc_mbx_read_top_mm, la, 0);
  4214. /* Invoke the lpfc_handle_latt mailbox command callback function */
  4215. lpfc_mbx_cmpl_read_topology(phba, pmb);
  4216. return;
  4217. out_free_dmabuf:
  4218. kfree(mp);
  4219. out_free_pmb:
  4220. mempool_free(pmb, phba->mbox_mem_pool);
  4221. }
  4222. /**
  4223. * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
  4224. * @phba: pointer to lpfc hba data structure.
  4225. * @acqe_fc: pointer to the async fc completion queue entry.
  4226. *
  4227. * This routine is to handle the SLI4 asynchronous FC event. It will simply log
  4228. * that the event was received and then issue a read_topology mailbox command so
  4229. * that the rest of the driver will treat it the same as SLI3.
  4230. **/
  4231. static void
  4232. lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
  4233. {
  4234. struct lpfc_dmabuf *mp;
  4235. LPFC_MBOXQ_t *pmb;
  4236. MAILBOX_t *mb;
  4237. struct lpfc_mbx_read_top *la;
  4238. int rc;
  4239. if (bf_get(lpfc_trailer_type, acqe_fc) !=
  4240. LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
  4241. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4242. "2895 Non FC link Event detected.(%d)\n",
  4243. bf_get(lpfc_trailer_type, acqe_fc));
  4244. return;
  4245. }
  4246. /* Keep the link status for extra SLI4 state machine reference */
  4247. phba->sli4_hba.link_state.speed =
  4248. lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
  4249. bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
  4250. phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
  4251. phba->sli4_hba.link_state.topology =
  4252. bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
  4253. phba->sli4_hba.link_state.status =
  4254. bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
  4255. phba->sli4_hba.link_state.type =
  4256. bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
  4257. phba->sli4_hba.link_state.number =
  4258. bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
  4259. phba->sli4_hba.link_state.fault =
  4260. bf_get(lpfc_acqe_link_fault, acqe_fc);
  4261. phba->sli4_hba.link_state.logical_speed =
  4262. bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
  4263. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4264. "2896 Async FC event - Speed:%dGBaud Topology:x%x "
  4265. "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
  4266. "%dMbps Fault:%d\n",
  4267. phba->sli4_hba.link_state.speed,
  4268. phba->sli4_hba.link_state.topology,
  4269. phba->sli4_hba.link_state.status,
  4270. phba->sli4_hba.link_state.type,
  4271. phba->sli4_hba.link_state.number,
  4272. phba->sli4_hba.link_state.logical_speed,
  4273. phba->sli4_hba.link_state.fault);
  4274. pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  4275. if (!pmb) {
  4276. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4277. "2897 The mboxq allocation failed\n");
  4278. return;
  4279. }
  4280. mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  4281. if (!mp) {
  4282. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4283. "2898 The lpfc_dmabuf allocation failed\n");
  4284. goto out_free_pmb;
  4285. }
  4286. mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
  4287. if (!mp->virt) {
  4288. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4289. "2899 The mbuf allocation failed\n");
  4290. goto out_free_dmabuf;
  4291. }
  4292. /* Cleanup any outstanding ELS commands */
  4293. lpfc_els_flush_all_cmd(phba);
  4294. /* Block ELS IOCBs until we have done process link event */
  4295. phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
  4296. /* Update link event statistics */
  4297. phba->sli.slistat.link_event++;
  4298. /* Create lpfc_handle_latt mailbox command from link ACQE */
  4299. lpfc_read_topology(phba, pmb, mp);
  4300. pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
  4301. pmb->vport = phba->pport;
  4302. if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
  4303. phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
  4304. switch (phba->sli4_hba.link_state.status) {
  4305. case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
  4306. phba->link_flag |= LS_MDS_LINK_DOWN;
  4307. break;
  4308. case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
  4309. phba->link_flag |= LS_MDS_LOOPBACK;
  4310. break;
  4311. default:
  4312. break;
  4313. }
  4314. /* Initialize completion status */
  4315. mb = &pmb->u.mb;
  4316. mb->mbxStatus = MBX_SUCCESS;
  4317. /* Parse port fault information field */
  4318. lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
  4319. /* Parse and translate link attention fields */
  4320. la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
  4321. la->eventTag = acqe_fc->event_tag;
  4322. if (phba->sli4_hba.link_state.status ==
  4323. LPFC_FC_LA_TYPE_UNEXP_WWPN) {
  4324. bf_set(lpfc_mbx_read_top_att_type, la,
  4325. LPFC_FC_LA_TYPE_UNEXP_WWPN);
  4326. } else {
  4327. bf_set(lpfc_mbx_read_top_att_type, la,
  4328. LPFC_FC_LA_TYPE_LINK_DOWN);
  4329. }
  4330. /* Invoke the mailbox command callback function */
  4331. lpfc_mbx_cmpl_read_topology(phba, pmb);
  4332. return;
  4333. }
  4334. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  4335. if (rc == MBX_NOT_FINISHED)
  4336. goto out_free_dmabuf;
  4337. return;
  4338. out_free_dmabuf:
  4339. kfree(mp);
  4340. out_free_pmb:
  4341. mempool_free(pmb, phba->mbox_mem_pool);
  4342. }
  4343. /**
  4344. * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
  4345. * @phba: pointer to lpfc hba data structure.
  4346. * @acqe_fc: pointer to the async SLI completion queue entry.
  4347. *
  4348. * This routine is to handle the SLI4 asynchronous SLI events.
  4349. **/
  4350. static void
  4351. lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
  4352. {
  4353. char port_name;
  4354. char message[128];
  4355. uint8_t status;
  4356. uint8_t evt_type;
  4357. uint8_t operational = 0;
  4358. struct temp_event temp_event_data;
  4359. struct lpfc_acqe_misconfigured_event *misconfigured;
  4360. struct Scsi_Host *shost;
  4361. evt_type = bf_get(lpfc_trailer_type, acqe_sli);
  4362. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4363. "2901 Async SLI event - Event Data1:x%08x Event Data2:"
  4364. "x%08x SLI Event Type:%d\n",
  4365. acqe_sli->event_data1, acqe_sli->event_data2,
  4366. evt_type);
  4367. port_name = phba->Port[0];
  4368. if (port_name == 0x00)
  4369. port_name = '?'; /* get port name is empty */
  4370. switch (evt_type) {
  4371. case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
  4372. temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  4373. temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
  4374. temp_event_data.data = (uint32_t)acqe_sli->event_data1;
  4375. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  4376. "3190 Over Temperature:%d Celsius- Port Name %c\n",
  4377. acqe_sli->event_data1, port_name);
  4378. phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
  4379. shost = lpfc_shost_from_vport(phba->pport);
  4380. fc_host_post_vendor_event(shost, fc_get_event_number(),
  4381. sizeof(temp_event_data),
  4382. (char *)&temp_event_data,
  4383. SCSI_NL_VID_TYPE_PCI
  4384. | PCI_VENDOR_ID_EMULEX);
  4385. break;
  4386. case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
  4387. temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  4388. temp_event_data.event_code = LPFC_NORMAL_TEMP;
  4389. temp_event_data.data = (uint32_t)acqe_sli->event_data1;
  4390. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4391. "3191 Normal Temperature:%d Celsius - Port Name %c\n",
  4392. acqe_sli->event_data1, port_name);
  4393. shost = lpfc_shost_from_vport(phba->pport);
  4394. fc_host_post_vendor_event(shost, fc_get_event_number(),
  4395. sizeof(temp_event_data),
  4396. (char *)&temp_event_data,
  4397. SCSI_NL_VID_TYPE_PCI
  4398. | PCI_VENDOR_ID_EMULEX);
  4399. break;
  4400. case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
  4401. misconfigured = (struct lpfc_acqe_misconfigured_event *)
  4402. &acqe_sli->event_data1;
  4403. /* fetch the status for this port */
  4404. switch (phba->sli4_hba.lnk_info.lnk_no) {
  4405. case LPFC_LINK_NUMBER_0:
  4406. status = bf_get(lpfc_sli_misconfigured_port0_state,
  4407. &misconfigured->theEvent);
  4408. operational = bf_get(lpfc_sli_misconfigured_port0_op,
  4409. &misconfigured->theEvent);
  4410. break;
  4411. case LPFC_LINK_NUMBER_1:
  4412. status = bf_get(lpfc_sli_misconfigured_port1_state,
  4413. &misconfigured->theEvent);
  4414. operational = bf_get(lpfc_sli_misconfigured_port1_op,
  4415. &misconfigured->theEvent);
  4416. break;
  4417. case LPFC_LINK_NUMBER_2:
  4418. status = bf_get(lpfc_sli_misconfigured_port2_state,
  4419. &misconfigured->theEvent);
  4420. operational = bf_get(lpfc_sli_misconfigured_port2_op,
  4421. &misconfigured->theEvent);
  4422. break;
  4423. case LPFC_LINK_NUMBER_3:
  4424. status = bf_get(lpfc_sli_misconfigured_port3_state,
  4425. &misconfigured->theEvent);
  4426. operational = bf_get(lpfc_sli_misconfigured_port3_op,
  4427. &misconfigured->theEvent);
  4428. break;
  4429. default:
  4430. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4431. "3296 "
  4432. "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
  4433. "event: Invalid link %d",
  4434. phba->sli4_hba.lnk_info.lnk_no);
  4435. return;
  4436. }
  4437. /* Skip if optic state unchanged */
  4438. if (phba->sli4_hba.lnk_info.optic_state == status)
  4439. return;
  4440. switch (status) {
  4441. case LPFC_SLI_EVENT_STATUS_VALID:
  4442. sprintf(message, "Physical Link is functional");
  4443. break;
  4444. case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
  4445. sprintf(message, "Optics faulted/incorrectly "
  4446. "installed/not installed - Reseat optics, "
  4447. "if issue not resolved, replace.");
  4448. break;
  4449. case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
  4450. sprintf(message,
  4451. "Optics of two types installed - Remove one "
  4452. "optic or install matching pair of optics.");
  4453. break;
  4454. case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
  4455. sprintf(message, "Incompatible optics - Replace with "
  4456. "compatible optics for card to function.");
  4457. break;
  4458. case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
  4459. sprintf(message, "Unqualified optics - Replace with "
  4460. "Avago optics for Warranty and Technical "
  4461. "Support - Link is%s operational",
  4462. (operational) ? " not" : "");
  4463. break;
  4464. case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
  4465. sprintf(message, "Uncertified optics - Replace with "
  4466. "Avago-certified optics to enable link "
  4467. "operation - Link is%s operational",
  4468. (operational) ? " not" : "");
  4469. break;
  4470. default:
  4471. /* firmware is reporting a status we don't know about */
  4472. sprintf(message, "Unknown event status x%02x", status);
  4473. break;
  4474. }
  4475. phba->sli4_hba.lnk_info.optic_state = status;
  4476. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4477. "3176 Port Name %c %s\n", port_name, message);
  4478. break;
  4479. case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
  4480. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4481. "3192 Remote DPort Test Initiated - "
  4482. "Event Data1:x%08x Event Data2: x%08x\n",
  4483. acqe_sli->event_data1, acqe_sli->event_data2);
  4484. break;
  4485. default:
  4486. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4487. "3193 Async SLI event - Event Data1:x%08x Event Data2:"
  4488. "x%08x SLI Event Type:%d\n",
  4489. acqe_sli->event_data1, acqe_sli->event_data2,
  4490. evt_type);
  4491. break;
  4492. }
  4493. }
  4494. /**
  4495. * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
  4496. * @vport: pointer to vport data structure.
  4497. *
  4498. * This routine is to perform Clear Virtual Link (CVL) on a vport in
  4499. * response to a CVL event.
  4500. *
  4501. * Return the pointer to the ndlp with the vport if successful, otherwise
  4502. * return NULL.
  4503. **/
  4504. static struct lpfc_nodelist *
  4505. lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
  4506. {
  4507. struct lpfc_nodelist *ndlp;
  4508. struct Scsi_Host *shost;
  4509. struct lpfc_hba *phba;
  4510. if (!vport)
  4511. return NULL;
  4512. phba = vport->phba;
  4513. if (!phba)
  4514. return NULL;
  4515. ndlp = lpfc_findnode_did(vport, Fabric_DID);
  4516. if (!ndlp) {
  4517. /* Cannot find existing Fabric ndlp, so allocate a new one */
  4518. ndlp = lpfc_nlp_init(vport, Fabric_DID);
  4519. if (!ndlp)
  4520. return 0;
  4521. /* Set the node type */
  4522. ndlp->nlp_type |= NLP_FABRIC;
  4523. /* Put ndlp onto node list */
  4524. lpfc_enqueue_node(vport, ndlp);
  4525. } else if (!NLP_CHK_NODE_ACT(ndlp)) {
  4526. /* re-setup ndlp without removing from node list */
  4527. ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
  4528. if (!ndlp)
  4529. return 0;
  4530. }
  4531. if ((phba->pport->port_state < LPFC_FLOGI) &&
  4532. (phba->pport->port_state != LPFC_VPORT_FAILED))
  4533. return NULL;
  4534. /* If virtual link is not yet instantiated ignore CVL */
  4535. if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
  4536. && (vport->port_state != LPFC_VPORT_FAILED))
  4537. return NULL;
  4538. shost = lpfc_shost_from_vport(vport);
  4539. if (!shost)
  4540. return NULL;
  4541. lpfc_linkdown_port(vport);
  4542. lpfc_cleanup_pending_mbox(vport);
  4543. spin_lock_irq(shost->host_lock);
  4544. vport->fc_flag |= FC_VPORT_CVL_RCVD;
  4545. spin_unlock_irq(shost->host_lock);
  4546. return ndlp;
  4547. }
  4548. /**
  4549. * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
  4550. * @vport: pointer to lpfc hba data structure.
  4551. *
  4552. * This routine is to perform Clear Virtual Link (CVL) on all vports in
  4553. * response to a FCF dead event.
  4554. **/
  4555. static void
  4556. lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
  4557. {
  4558. struct lpfc_vport **vports;
  4559. int i;
  4560. vports = lpfc_create_vport_work_array(phba);
  4561. if (vports)
  4562. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
  4563. lpfc_sli4_perform_vport_cvl(vports[i]);
  4564. lpfc_destroy_vport_work_array(phba, vports);
  4565. }
  4566. /**
  4567. * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
  4568. * @phba: pointer to lpfc hba data structure.
  4569. * @acqe_link: pointer to the async fcoe completion queue entry.
  4570. *
  4571. * This routine is to handle the SLI4 asynchronous fcoe event.
  4572. **/
  4573. static void
  4574. lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
  4575. struct lpfc_acqe_fip *acqe_fip)
  4576. {
  4577. uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
  4578. int rc;
  4579. struct lpfc_vport *vport;
  4580. struct lpfc_nodelist *ndlp;
  4581. struct Scsi_Host *shost;
  4582. int active_vlink_present;
  4583. struct lpfc_vport **vports;
  4584. int i;
  4585. phba->fc_eventTag = acqe_fip->event_tag;
  4586. phba->fcoe_eventtag = acqe_fip->event_tag;
  4587. switch (event_type) {
  4588. case LPFC_FIP_EVENT_TYPE_NEW_FCF:
  4589. case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
  4590. if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
  4591. lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
  4592. LOG_DISCOVERY,
  4593. "2546 New FCF event, evt_tag:x%x, "
  4594. "index:x%x\n",
  4595. acqe_fip->event_tag,
  4596. acqe_fip->index);
  4597. else
  4598. lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
  4599. LOG_DISCOVERY,
  4600. "2788 FCF param modified event, "
  4601. "evt_tag:x%x, index:x%x\n",
  4602. acqe_fip->event_tag,
  4603. acqe_fip->index);
  4604. if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
  4605. /*
  4606. * During period of FCF discovery, read the FCF
  4607. * table record indexed by the event to update
  4608. * FCF roundrobin failover eligible FCF bmask.
  4609. */
  4610. lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
  4611. LOG_DISCOVERY,
  4612. "2779 Read FCF (x%x) for updating "
  4613. "roundrobin FCF failover bmask\n",
  4614. acqe_fip->index);
  4615. rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
  4616. }
  4617. /* If the FCF discovery is in progress, do nothing. */
  4618. spin_lock_irq(&phba->hbalock);
  4619. if (phba->hba_flag & FCF_TS_INPROG) {
  4620. spin_unlock_irq(&phba->hbalock);
  4621. break;
  4622. }
  4623. /* If fast FCF failover rescan event is pending, do nothing */
  4624. if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
  4625. spin_unlock_irq(&phba->hbalock);
  4626. break;
  4627. }
  4628. /* If the FCF has been in discovered state, do nothing. */
  4629. if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
  4630. spin_unlock_irq(&phba->hbalock);
  4631. break;
  4632. }
  4633. spin_unlock_irq(&phba->hbalock);
  4634. /* Otherwise, scan the entire FCF table and re-discover SAN */
  4635. lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
  4636. "2770 Start FCF table scan per async FCF "
  4637. "event, evt_tag:x%x, index:x%x\n",
  4638. acqe_fip->event_tag, acqe_fip->index);
  4639. rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
  4640. LPFC_FCOE_FCF_GET_FIRST);
  4641. if (rc)
  4642. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
  4643. "2547 Issue FCF scan read FCF mailbox "
  4644. "command failed (x%x)\n", rc);
  4645. break;
  4646. case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
  4647. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4648. "2548 FCF Table full count 0x%x tag 0x%x\n",
  4649. bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
  4650. acqe_fip->event_tag);
  4651. break;
  4652. case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
  4653. phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
  4654. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
  4655. "2549 FCF (x%x) disconnected from network, "
  4656. "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
  4657. /*
  4658. * If we are in the middle of FCF failover process, clear
  4659. * the corresponding FCF bit in the roundrobin bitmap.
  4660. */
  4661. spin_lock_irq(&phba->hbalock);
  4662. if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
  4663. (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
  4664. spin_unlock_irq(&phba->hbalock);
  4665. /* Update FLOGI FCF failover eligible FCF bmask */
  4666. lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
  4667. break;
  4668. }
  4669. spin_unlock_irq(&phba->hbalock);
  4670. /* If the event is not for currently used fcf do nothing */
  4671. if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
  4672. break;
  4673. /*
  4674. * Otherwise, request the port to rediscover the entire FCF
  4675. * table for a fast recovery from case that the current FCF
  4676. * is no longer valid as we are not in the middle of FCF
  4677. * failover process already.
  4678. */
  4679. spin_lock_irq(&phba->hbalock);
  4680. /* Mark the fast failover process in progress */
  4681. phba->fcf.fcf_flag |= FCF_DEAD_DISC;
  4682. spin_unlock_irq(&phba->hbalock);
  4683. lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
  4684. "2771 Start FCF fast failover process due to "
  4685. "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
  4686. "\n", acqe_fip->event_tag, acqe_fip->index);
  4687. rc = lpfc_sli4_redisc_fcf_table(phba);
  4688. if (rc) {
  4689. lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
  4690. LOG_DISCOVERY,
  4691. "2772 Issue FCF rediscover mailbox "
  4692. "command failed, fail through to FCF "
  4693. "dead event\n");
  4694. spin_lock_irq(&phba->hbalock);
  4695. phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
  4696. spin_unlock_irq(&phba->hbalock);
  4697. /*
  4698. * Last resort will fail over by treating this
  4699. * as a link down to FCF registration.
  4700. */
  4701. lpfc_sli4_fcf_dead_failthrough(phba);
  4702. } else {
  4703. /* Reset FCF roundrobin bmask for new discovery */
  4704. lpfc_sli4_clear_fcf_rr_bmask(phba);
  4705. /*
  4706. * Handling fast FCF failover to a DEAD FCF event is
  4707. * considered equalivant to receiving CVL to all vports.
  4708. */
  4709. lpfc_sli4_perform_all_vport_cvl(phba);
  4710. }
  4711. break;
  4712. case LPFC_FIP_EVENT_TYPE_CVL:
  4713. phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
  4714. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
  4715. "2718 Clear Virtual Link Received for VPI 0x%x"
  4716. " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
  4717. vport = lpfc_find_vport_by_vpid(phba,
  4718. acqe_fip->index);
  4719. ndlp = lpfc_sli4_perform_vport_cvl(vport);
  4720. if (!ndlp)
  4721. break;
  4722. active_vlink_present = 0;
  4723. vports = lpfc_create_vport_work_array(phba);
  4724. if (vports) {
  4725. for (i = 0; i <= phba->max_vports && vports[i] != NULL;
  4726. i++) {
  4727. if ((!(vports[i]->fc_flag &
  4728. FC_VPORT_CVL_RCVD)) &&
  4729. (vports[i]->port_state > LPFC_FDISC)) {
  4730. active_vlink_present = 1;
  4731. break;
  4732. }
  4733. }
  4734. lpfc_destroy_vport_work_array(phba, vports);
  4735. }
  4736. /*
  4737. * Don't re-instantiate if vport is marked for deletion.
  4738. * If we are here first then vport_delete is going to wait
  4739. * for discovery to complete.
  4740. */
  4741. if (!(vport->load_flag & FC_UNLOADING) &&
  4742. active_vlink_present) {
  4743. /*
  4744. * If there are other active VLinks present,
  4745. * re-instantiate the Vlink using FDISC.
  4746. */
  4747. mod_timer(&ndlp->nlp_delayfunc,
  4748. jiffies + msecs_to_jiffies(1000));
  4749. shost = lpfc_shost_from_vport(vport);
  4750. spin_lock_irq(shost->host_lock);
  4751. ndlp->nlp_flag |= NLP_DELAY_TMO;
  4752. spin_unlock_irq(shost->host_lock);
  4753. ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
  4754. vport->port_state = LPFC_FDISC;
  4755. } else {
  4756. /*
  4757. * Otherwise, we request port to rediscover
  4758. * the entire FCF table for a fast recovery
  4759. * from possible case that the current FCF
  4760. * is no longer valid if we are not already
  4761. * in the FCF failover process.
  4762. */
  4763. spin_lock_irq(&phba->hbalock);
  4764. if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
  4765. spin_unlock_irq(&phba->hbalock);
  4766. break;
  4767. }
  4768. /* Mark the fast failover process in progress */
  4769. phba->fcf.fcf_flag |= FCF_ACVL_DISC;
  4770. spin_unlock_irq(&phba->hbalock);
  4771. lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
  4772. LOG_DISCOVERY,
  4773. "2773 Start FCF failover per CVL, "
  4774. "evt_tag:x%x\n", acqe_fip->event_tag);
  4775. rc = lpfc_sli4_redisc_fcf_table(phba);
  4776. if (rc) {
  4777. lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
  4778. LOG_DISCOVERY,
  4779. "2774 Issue FCF rediscover "
  4780. "mailbox command failed, "
  4781. "through to CVL event\n");
  4782. spin_lock_irq(&phba->hbalock);
  4783. phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
  4784. spin_unlock_irq(&phba->hbalock);
  4785. /*
  4786. * Last resort will be re-try on the
  4787. * the current registered FCF entry.
  4788. */
  4789. lpfc_retry_pport_discovery(phba);
  4790. } else
  4791. /*
  4792. * Reset FCF roundrobin bmask for new
  4793. * discovery.
  4794. */
  4795. lpfc_sli4_clear_fcf_rr_bmask(phba);
  4796. }
  4797. break;
  4798. default:
  4799. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4800. "0288 Unknown FCoE event type 0x%x event tag "
  4801. "0x%x\n", event_type, acqe_fip->event_tag);
  4802. break;
  4803. }
  4804. }
  4805. /**
  4806. * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
  4807. * @phba: pointer to lpfc hba data structure.
  4808. * @acqe_link: pointer to the async dcbx completion queue entry.
  4809. *
  4810. * This routine is to handle the SLI4 asynchronous dcbx event.
  4811. **/
  4812. static void
  4813. lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
  4814. struct lpfc_acqe_dcbx *acqe_dcbx)
  4815. {
  4816. phba->fc_eventTag = acqe_dcbx->event_tag;
  4817. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4818. "0290 The SLI4 DCBX asynchronous event is not "
  4819. "handled yet\n");
  4820. }
  4821. /**
  4822. * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
  4823. * @phba: pointer to lpfc hba data structure.
  4824. * @acqe_link: pointer to the async grp5 completion queue entry.
  4825. *
  4826. * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
  4827. * is an asynchronous notified of a logical link speed change. The Port
  4828. * reports the logical link speed in units of 10Mbps.
  4829. **/
  4830. static void
  4831. lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
  4832. struct lpfc_acqe_grp5 *acqe_grp5)
  4833. {
  4834. uint16_t prev_ll_spd;
  4835. phba->fc_eventTag = acqe_grp5->event_tag;
  4836. phba->fcoe_eventtag = acqe_grp5->event_tag;
  4837. prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
  4838. phba->sli4_hba.link_state.logical_speed =
  4839. (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
  4840. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4841. "2789 GRP5 Async Event: Updating logical link speed "
  4842. "from %dMbps to %dMbps\n", prev_ll_spd,
  4843. phba->sli4_hba.link_state.logical_speed);
  4844. }
  4845. /**
  4846. * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
  4847. * @phba: pointer to lpfc hba data structure.
  4848. *
  4849. * This routine is invoked by the worker thread to process all the pending
  4850. * SLI4 asynchronous events.
  4851. **/
  4852. void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
  4853. {
  4854. struct lpfc_cq_event *cq_event;
  4855. /* First, declare the async event has been handled */
  4856. spin_lock_irq(&phba->hbalock);
  4857. phba->hba_flag &= ~ASYNC_EVENT;
  4858. spin_unlock_irq(&phba->hbalock);
  4859. /* Now, handle all the async events */
  4860. while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
  4861. /* Get the first event from the head of the event queue */
  4862. spin_lock_irq(&phba->hbalock);
  4863. list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
  4864. cq_event, struct lpfc_cq_event, list);
  4865. spin_unlock_irq(&phba->hbalock);
  4866. /* Process the asynchronous event */
  4867. switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
  4868. case LPFC_TRAILER_CODE_LINK:
  4869. lpfc_sli4_async_link_evt(phba,
  4870. &cq_event->cqe.acqe_link);
  4871. break;
  4872. case LPFC_TRAILER_CODE_FCOE:
  4873. lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
  4874. break;
  4875. case LPFC_TRAILER_CODE_DCBX:
  4876. lpfc_sli4_async_dcbx_evt(phba,
  4877. &cq_event->cqe.acqe_dcbx);
  4878. break;
  4879. case LPFC_TRAILER_CODE_GRP5:
  4880. lpfc_sli4_async_grp5_evt(phba,
  4881. &cq_event->cqe.acqe_grp5);
  4882. break;
  4883. case LPFC_TRAILER_CODE_FC:
  4884. lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
  4885. break;
  4886. case LPFC_TRAILER_CODE_SLI:
  4887. lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
  4888. break;
  4889. default:
  4890. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4891. "1804 Invalid asynchrous event code: "
  4892. "x%x\n", bf_get(lpfc_trailer_code,
  4893. &cq_event->cqe.mcqe_cmpl));
  4894. break;
  4895. }
  4896. /* Free the completion event processed to the free pool */
  4897. lpfc_sli4_cq_event_release(phba, cq_event);
  4898. }
  4899. }
  4900. /**
  4901. * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
  4902. * @phba: pointer to lpfc hba data structure.
  4903. *
  4904. * This routine is invoked by the worker thread to process FCF table
  4905. * rediscovery pending completion event.
  4906. **/
  4907. void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
  4908. {
  4909. int rc;
  4910. spin_lock_irq(&phba->hbalock);
  4911. /* Clear FCF rediscovery timeout event */
  4912. phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
  4913. /* Clear driver fast failover FCF record flag */
  4914. phba->fcf.failover_rec.flag = 0;
  4915. /* Set state for FCF fast failover */
  4916. phba->fcf.fcf_flag |= FCF_REDISC_FOV;
  4917. spin_unlock_irq(&phba->hbalock);
  4918. /* Scan FCF table from the first entry to re-discover SAN */
  4919. lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
  4920. "2777 Start post-quiescent FCF table scan\n");
  4921. rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
  4922. if (rc)
  4923. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
  4924. "2747 Issue FCF scan read FCF mailbox "
  4925. "command failed 0x%x\n", rc);
  4926. }
  4927. /**
  4928. * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
  4929. * @phba: pointer to lpfc hba data structure.
  4930. * @dev_grp: The HBA PCI-Device group number.
  4931. *
  4932. * This routine is invoked to set up the per HBA PCI-Device group function
  4933. * API jump table entries.
  4934. *
  4935. * Return: 0 if success, otherwise -ENODEV
  4936. **/
  4937. int
  4938. lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
  4939. {
  4940. int rc;
  4941. /* Set up lpfc PCI-device group */
  4942. phba->pci_dev_grp = dev_grp;
  4943. /* The LPFC_PCI_DEV_OC uses SLI4 */
  4944. if (dev_grp == LPFC_PCI_DEV_OC)
  4945. phba->sli_rev = LPFC_SLI_REV4;
  4946. /* Set up device INIT API function jump table */
  4947. rc = lpfc_init_api_table_setup(phba, dev_grp);
  4948. if (rc)
  4949. return -ENODEV;
  4950. /* Set up SCSI API function jump table */
  4951. rc = lpfc_scsi_api_table_setup(phba, dev_grp);
  4952. if (rc)
  4953. return -ENODEV;
  4954. /* Set up SLI API function jump table */
  4955. rc = lpfc_sli_api_table_setup(phba, dev_grp);
  4956. if (rc)
  4957. return -ENODEV;
  4958. /* Set up MBOX API function jump table */
  4959. rc = lpfc_mbox_api_table_setup(phba, dev_grp);
  4960. if (rc)
  4961. return -ENODEV;
  4962. return 0;
  4963. }
  4964. /**
  4965. * lpfc_log_intr_mode - Log the active interrupt mode
  4966. * @phba: pointer to lpfc hba data structure.
  4967. * @intr_mode: active interrupt mode adopted.
  4968. *
  4969. * This routine it invoked to log the currently used active interrupt mode
  4970. * to the device.
  4971. **/
  4972. static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
  4973. {
  4974. switch (intr_mode) {
  4975. case 0:
  4976. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  4977. "0470 Enable INTx interrupt mode.\n");
  4978. break;
  4979. case 1:
  4980. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  4981. "0481 Enabled MSI interrupt mode.\n");
  4982. break;
  4983. case 2:
  4984. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  4985. "0480 Enabled MSI-X interrupt mode.\n");
  4986. break;
  4987. default:
  4988. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  4989. "0482 Illegal interrupt mode.\n");
  4990. break;
  4991. }
  4992. return;
  4993. }
  4994. /**
  4995. * lpfc_enable_pci_dev - Enable a generic PCI device.
  4996. * @phba: pointer to lpfc hba data structure.
  4997. *
  4998. * This routine is invoked to enable the PCI device that is common to all
  4999. * PCI devices.
  5000. *
  5001. * Return codes
  5002. * 0 - successful
  5003. * other values - error
  5004. **/
  5005. static int
  5006. lpfc_enable_pci_dev(struct lpfc_hba *phba)
  5007. {
  5008. struct pci_dev *pdev;
  5009. /* Obtain PCI device reference */
  5010. if (!phba->pcidev)
  5011. goto out_error;
  5012. else
  5013. pdev = phba->pcidev;
  5014. /* Enable PCI device */
  5015. if (pci_enable_device_mem(pdev))
  5016. goto out_error;
  5017. /* Request PCI resource for the device */
  5018. if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
  5019. goto out_disable_device;
  5020. /* Set up device as PCI master and save state for EEH */
  5021. pci_set_master(pdev);
  5022. pci_try_set_mwi(pdev);
  5023. pci_save_state(pdev);
  5024. /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
  5025. if (pci_is_pcie(pdev))
  5026. pdev->needs_freset = 1;
  5027. return 0;
  5028. out_disable_device:
  5029. pci_disable_device(pdev);
  5030. out_error:
  5031. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5032. "1401 Failed to enable pci device\n");
  5033. return -ENODEV;
  5034. }
  5035. /**
  5036. * lpfc_disable_pci_dev - Disable a generic PCI device.
  5037. * @phba: pointer to lpfc hba data structure.
  5038. *
  5039. * This routine is invoked to disable the PCI device that is common to all
  5040. * PCI devices.
  5041. **/
  5042. static void
  5043. lpfc_disable_pci_dev(struct lpfc_hba *phba)
  5044. {
  5045. struct pci_dev *pdev;
  5046. /* Obtain PCI device reference */
  5047. if (!phba->pcidev)
  5048. return;
  5049. else
  5050. pdev = phba->pcidev;
  5051. /* Release PCI resource and disable PCI device */
  5052. pci_release_mem_regions(pdev);
  5053. pci_disable_device(pdev);
  5054. return;
  5055. }
  5056. /**
  5057. * lpfc_reset_hba - Reset a hba
  5058. * @phba: pointer to lpfc hba data structure.
  5059. *
  5060. * This routine is invoked to reset a hba device. It brings the HBA
  5061. * offline, performs a board restart, and then brings the board back
  5062. * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
  5063. * on outstanding mailbox commands.
  5064. **/
  5065. void
  5066. lpfc_reset_hba(struct lpfc_hba *phba)
  5067. {
  5068. /* If resets are disabled then set error state and return. */
  5069. if (!phba->cfg_enable_hba_reset) {
  5070. phba->link_state = LPFC_HBA_ERROR;
  5071. return;
  5072. }
  5073. if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
  5074. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  5075. else
  5076. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  5077. lpfc_offline(phba);
  5078. lpfc_sli_brdrestart(phba);
  5079. lpfc_online(phba);
  5080. lpfc_unblock_mgmt_io(phba);
  5081. }
  5082. /**
  5083. * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
  5084. * @phba: pointer to lpfc hba data structure.
  5085. *
  5086. * This function enables the PCI SR-IOV virtual functions to a physical
  5087. * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
  5088. * enable the number of virtual functions to the physical function. As
  5089. * not all devices support SR-IOV, the return code from the pci_enable_sriov()
  5090. * API call does not considered as an error condition for most of the device.
  5091. **/
  5092. uint16_t
  5093. lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
  5094. {
  5095. struct pci_dev *pdev = phba->pcidev;
  5096. uint16_t nr_virtfn;
  5097. int pos;
  5098. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
  5099. if (pos == 0)
  5100. return 0;
  5101. pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
  5102. return nr_virtfn;
  5103. }
  5104. /**
  5105. * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
  5106. * @phba: pointer to lpfc hba data structure.
  5107. * @nr_vfn: number of virtual functions to be enabled.
  5108. *
  5109. * This function enables the PCI SR-IOV virtual functions to a physical
  5110. * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
  5111. * enable the number of virtual functions to the physical function. As
  5112. * not all devices support SR-IOV, the return code from the pci_enable_sriov()
  5113. * API call does not considered as an error condition for most of the device.
  5114. **/
  5115. int
  5116. lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
  5117. {
  5118. struct pci_dev *pdev = phba->pcidev;
  5119. uint16_t max_nr_vfn;
  5120. int rc;
  5121. max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
  5122. if (nr_vfn > max_nr_vfn) {
  5123. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5124. "3057 Requested vfs (%d) greater than "
  5125. "supported vfs (%d)", nr_vfn, max_nr_vfn);
  5126. return -EINVAL;
  5127. }
  5128. rc = pci_enable_sriov(pdev, nr_vfn);
  5129. if (rc) {
  5130. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  5131. "2806 Failed to enable sriov on this device "
  5132. "with vfn number nr_vf:%d, rc:%d\n",
  5133. nr_vfn, rc);
  5134. } else
  5135. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  5136. "2807 Successful enable sriov on this device "
  5137. "with vfn number nr_vf:%d\n", nr_vfn);
  5138. return rc;
  5139. }
  5140. /**
  5141. * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
  5142. * @phba: pointer to lpfc hba data structure.
  5143. *
  5144. * This routine is invoked to set up the driver internal resources before the
  5145. * device specific resource setup to support the HBA device it attached to.
  5146. *
  5147. * Return codes
  5148. * 0 - successful
  5149. * other values - error
  5150. **/
  5151. static int
  5152. lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
  5153. {
  5154. struct lpfc_sli *psli = &phba->sli;
  5155. /*
  5156. * Driver resources common to all SLI revisions
  5157. */
  5158. atomic_set(&phba->fast_event_count, 0);
  5159. spin_lock_init(&phba->hbalock);
  5160. /* Initialize ndlp management spinlock */
  5161. spin_lock_init(&phba->ndlp_lock);
  5162. INIT_LIST_HEAD(&phba->port_list);
  5163. INIT_LIST_HEAD(&phba->work_list);
  5164. init_waitqueue_head(&phba->wait_4_mlo_m_q);
  5165. /* Initialize the wait queue head for the kernel thread */
  5166. init_waitqueue_head(&phba->work_waitq);
  5167. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  5168. "1403 Protocols supported %s %s %s\n",
  5169. ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
  5170. "SCSI" : " "),
  5171. ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
  5172. "NVME" : " "),
  5173. (phba->nvmet_support ? "NVMET" : " "));
  5174. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
  5175. /* Initialize the scsi buffer list used by driver for scsi IO */
  5176. spin_lock_init(&phba->scsi_buf_list_get_lock);
  5177. INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
  5178. spin_lock_init(&phba->scsi_buf_list_put_lock);
  5179. INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
  5180. }
  5181. if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
  5182. (phba->nvmet_support == 0)) {
  5183. /* Initialize the NVME buffer list used by driver for NVME IO */
  5184. spin_lock_init(&phba->nvme_buf_list_get_lock);
  5185. INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
  5186. phba->get_nvme_bufs = 0;
  5187. spin_lock_init(&phba->nvme_buf_list_put_lock);
  5188. INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
  5189. phba->put_nvme_bufs = 0;
  5190. }
  5191. /* Initialize the fabric iocb list */
  5192. INIT_LIST_HEAD(&phba->fabric_iocb_list);
  5193. /* Initialize list to save ELS buffers */
  5194. INIT_LIST_HEAD(&phba->elsbuf);
  5195. /* Initialize FCF connection rec list */
  5196. INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
  5197. /* Initialize OAS configuration list */
  5198. spin_lock_init(&phba->devicelock);
  5199. INIT_LIST_HEAD(&phba->luns);
  5200. /* MBOX heartbeat timer */
  5201. timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
  5202. /* Fabric block timer */
  5203. timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
  5204. /* EA polling mode timer */
  5205. timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
  5206. /* Heartbeat timer */
  5207. timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
  5208. return 0;
  5209. }
  5210. /**
  5211. * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
  5212. * @phba: pointer to lpfc hba data structure.
  5213. *
  5214. * This routine is invoked to set up the driver internal resources specific to
  5215. * support the SLI-3 HBA device it attached to.
  5216. *
  5217. * Return codes
  5218. * 0 - successful
  5219. * other values - error
  5220. **/
  5221. static int
  5222. lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
  5223. {
  5224. int rc;
  5225. /*
  5226. * Initialize timers used by driver
  5227. */
  5228. /* FCP polling mode timer */
  5229. timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
  5230. /* Host attention work mask setup */
  5231. phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
  5232. phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
  5233. /* Get all the module params for configuring this host */
  5234. lpfc_get_cfgparam(phba);
  5235. /* Set up phase-1 common device driver resources */
  5236. rc = lpfc_setup_driver_resource_phase1(phba);
  5237. if (rc)
  5238. return -ENODEV;
  5239. if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
  5240. phba->menlo_flag |= HBA_MENLO_SUPPORT;
  5241. /* check for menlo minimum sg count */
  5242. if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
  5243. phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
  5244. }
  5245. if (!phba->sli.sli3_ring)
  5246. phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
  5247. sizeof(struct lpfc_sli_ring),
  5248. GFP_KERNEL);
  5249. if (!phba->sli.sli3_ring)
  5250. return -ENOMEM;
  5251. /*
  5252. * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
  5253. * used to create the sg_dma_buf_pool must be dynamically calculated.
  5254. */
  5255. /* Initialize the host templates the configured values. */
  5256. lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
  5257. lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
  5258. lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
  5259. /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
  5260. if (phba->cfg_enable_bg) {
  5261. /*
  5262. * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
  5263. * the FCP rsp, and a BDE for each. Sice we have no control
  5264. * over how many protection data segments the SCSI Layer
  5265. * will hand us (ie: there could be one for every block
  5266. * in the IO), we just allocate enough BDEs to accomidate
  5267. * our max amount and we need to limit lpfc_sg_seg_cnt to
  5268. * minimize the risk of running out.
  5269. */
  5270. phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
  5271. sizeof(struct fcp_rsp) +
  5272. (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
  5273. if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
  5274. phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
  5275. /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
  5276. phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
  5277. } else {
  5278. /*
  5279. * The scsi_buf for a regular I/O will hold the FCP cmnd,
  5280. * the FCP rsp, a BDE for each, and a BDE for up to
  5281. * cfg_sg_seg_cnt data segments.
  5282. */
  5283. phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
  5284. sizeof(struct fcp_rsp) +
  5285. ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
  5286. /* Total BDEs in BPL for scsi_sg_list */
  5287. phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
  5288. }
  5289. lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
  5290. "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
  5291. phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
  5292. phba->cfg_total_seg_cnt);
  5293. phba->max_vpi = LPFC_MAX_VPI;
  5294. /* This will be set to correct value after config_port mbox */
  5295. phba->max_vports = 0;
  5296. /*
  5297. * Initialize the SLI Layer to run with lpfc HBAs.
  5298. */
  5299. lpfc_sli_setup(phba);
  5300. lpfc_sli_queue_init(phba);
  5301. /* Allocate device driver memory */
  5302. if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
  5303. return -ENOMEM;
  5304. /*
  5305. * Enable sr-iov virtual functions if supported and configured
  5306. * through the module parameter.
  5307. */
  5308. if (phba->cfg_sriov_nr_virtfn > 0) {
  5309. rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
  5310. phba->cfg_sriov_nr_virtfn);
  5311. if (rc) {
  5312. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  5313. "2808 Requested number of SR-IOV "
  5314. "virtual functions (%d) is not "
  5315. "supported\n",
  5316. phba->cfg_sriov_nr_virtfn);
  5317. phba->cfg_sriov_nr_virtfn = 0;
  5318. }
  5319. }
  5320. return 0;
  5321. }
  5322. /**
  5323. * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
  5324. * @phba: pointer to lpfc hba data structure.
  5325. *
  5326. * This routine is invoked to unset the driver internal resources set up
  5327. * specific for supporting the SLI-3 HBA device it attached to.
  5328. **/
  5329. static void
  5330. lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
  5331. {
  5332. /* Free device driver memory allocated */
  5333. lpfc_mem_free_all(phba);
  5334. return;
  5335. }
  5336. /**
  5337. * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
  5338. * @phba: pointer to lpfc hba data structure.
  5339. *
  5340. * This routine is invoked to set up the driver internal resources specific to
  5341. * support the SLI-4 HBA device it attached to.
  5342. *
  5343. * Return codes
  5344. * 0 - successful
  5345. * other values - error
  5346. **/
  5347. static int
  5348. lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
  5349. {
  5350. LPFC_MBOXQ_t *mboxq;
  5351. MAILBOX_t *mb;
  5352. int rc, i, max_buf_size;
  5353. int longs;
  5354. int fof_vectors = 0;
  5355. int extra;
  5356. uint64_t wwn;
  5357. u32 if_type;
  5358. u32 if_fam;
  5359. phba->sli4_hba.num_online_cpu = num_online_cpus();
  5360. phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
  5361. phba->sli4_hba.curr_disp_cpu = 0;
  5362. /* Get all the module params for configuring this host */
  5363. lpfc_get_cfgparam(phba);
  5364. /* Set up phase-1 common device driver resources */
  5365. rc = lpfc_setup_driver_resource_phase1(phba);
  5366. if (rc)
  5367. return -ENODEV;
  5368. /* Before proceed, wait for POST done and device ready */
  5369. rc = lpfc_sli4_post_status_check(phba);
  5370. if (rc)
  5371. return -ENODEV;
  5372. /*
  5373. * Initialize timers used by driver
  5374. */
  5375. timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
  5376. /* FCF rediscover timer */
  5377. timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
  5378. /*
  5379. * Control structure for handling external multi-buffer mailbox
  5380. * command pass-through.
  5381. */
  5382. memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
  5383. sizeof(struct lpfc_mbox_ext_buf_ctx));
  5384. INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
  5385. phba->max_vpi = LPFC_MAX_VPI;
  5386. /* This will be set to correct value after the read_config mbox */
  5387. phba->max_vports = 0;
  5388. /* Program the default value of vlan_id and fc_map */
  5389. phba->valid_vlan = 0;
  5390. phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
  5391. phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
  5392. phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
  5393. /*
  5394. * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
  5395. * we will associate a new ring, for each EQ/CQ/WQ tuple.
  5396. * The WQ create will allocate the ring.
  5397. */
  5398. /*
  5399. * 1 for cmd, 1 for rsp, NVME adds an extra one
  5400. * for boundary conditions in its max_sgl_segment template.
  5401. */
  5402. extra = 2;
  5403. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
  5404. extra++;
  5405. /*
  5406. * It doesn't matter what family our adapter is in, we are
  5407. * limited to 2 Pages, 512 SGEs, for our SGL.
  5408. * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
  5409. */
  5410. max_buf_size = (2 * SLI4_PAGE_SIZE);
  5411. if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
  5412. phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
  5413. /*
  5414. * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
  5415. * used to create the sg_dma_buf_pool must be calculated.
  5416. */
  5417. if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
  5418. /*
  5419. * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
  5420. * the FCP rsp, and a SGE. Sice we have no control
  5421. * over how many protection segments the SCSI Layer
  5422. * will hand us (ie: there could be one for every block
  5423. * in the IO), just allocate enough SGEs to accomidate
  5424. * our max amount and we need to limit lpfc_sg_seg_cnt
  5425. * to minimize the risk of running out.
  5426. */
  5427. phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
  5428. sizeof(struct fcp_rsp) + max_buf_size;
  5429. /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
  5430. phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
  5431. if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
  5432. phba->cfg_sg_seg_cnt =
  5433. LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
  5434. } else {
  5435. /*
  5436. * The scsi_buf for a regular I/O holds the FCP cmnd,
  5437. * the FCP rsp, a SGE for each, and a SGE for up to
  5438. * cfg_sg_seg_cnt data segments.
  5439. */
  5440. phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
  5441. sizeof(struct fcp_rsp) +
  5442. ((phba->cfg_sg_seg_cnt + extra) *
  5443. sizeof(struct sli4_sge));
  5444. /* Total SGEs for scsi_sg_list */
  5445. phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
  5446. /*
  5447. * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
  5448. * need to post 1 page for the SGL.
  5449. */
  5450. }
  5451. /* Initialize the host templates with the updated values. */
  5452. lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
  5453. lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
  5454. lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
  5455. if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
  5456. phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
  5457. else
  5458. phba->cfg_sg_dma_buf_size =
  5459. SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
  5460. lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
  5461. "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
  5462. phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
  5463. phba->cfg_total_seg_cnt);
  5464. /* Initialize buffer queue management fields */
  5465. INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
  5466. phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
  5467. phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
  5468. /*
  5469. * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
  5470. */
  5471. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
  5472. /* Initialize the Abort scsi buffer list used by driver */
  5473. spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
  5474. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
  5475. }
  5476. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  5477. /* Initialize the Abort nvme buffer list used by driver */
  5478. spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
  5479. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
  5480. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
  5481. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
  5482. }
  5483. /* This abort list used by worker thread */
  5484. spin_lock_init(&phba->sli4_hba.sgl_list_lock);
  5485. spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
  5486. /*
  5487. * Initialize driver internal slow-path work queues
  5488. */
  5489. /* Driver internel slow-path CQ Event pool */
  5490. INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
  5491. /* Response IOCB work queue list */
  5492. INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
  5493. /* Asynchronous event CQ Event work queue list */
  5494. INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
  5495. /* Fast-path XRI aborted CQ Event work queue list */
  5496. INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
  5497. /* Slow-path XRI aborted CQ Event work queue list */
  5498. INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
  5499. /* Receive queue CQ Event work queue list */
  5500. INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
  5501. /* Initialize extent block lists. */
  5502. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
  5503. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
  5504. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
  5505. INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
  5506. /* Initialize mboxq lists. If the early init routines fail
  5507. * these lists need to be correctly initialized.
  5508. */
  5509. INIT_LIST_HEAD(&phba->sli.mboxq);
  5510. INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
  5511. /* initialize optic_state to 0xFF */
  5512. phba->sli4_hba.lnk_info.optic_state = 0xff;
  5513. /* Allocate device driver memory */
  5514. rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
  5515. if (rc)
  5516. return -ENOMEM;
  5517. /* IF Type 2 ports get initialized now. */
  5518. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
  5519. LPFC_SLI_INTF_IF_TYPE_2) {
  5520. rc = lpfc_pci_function_reset(phba);
  5521. if (unlikely(rc)) {
  5522. rc = -ENODEV;
  5523. goto out_free_mem;
  5524. }
  5525. phba->temp_sensor_support = 1;
  5526. }
  5527. /* Create the bootstrap mailbox command */
  5528. rc = lpfc_create_bootstrap_mbox(phba);
  5529. if (unlikely(rc))
  5530. goto out_free_mem;
  5531. /* Set up the host's endian order with the device. */
  5532. rc = lpfc_setup_endian_order(phba);
  5533. if (unlikely(rc))
  5534. goto out_free_bsmbx;
  5535. /* Set up the hba's configuration parameters. */
  5536. rc = lpfc_sli4_read_config(phba);
  5537. if (unlikely(rc))
  5538. goto out_free_bsmbx;
  5539. rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
  5540. if (unlikely(rc))
  5541. goto out_free_bsmbx;
  5542. /* IF Type 0 ports get initialized now. */
  5543. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
  5544. LPFC_SLI_INTF_IF_TYPE_0) {
  5545. rc = lpfc_pci_function_reset(phba);
  5546. if (unlikely(rc))
  5547. goto out_free_bsmbx;
  5548. }
  5549. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  5550. GFP_KERNEL);
  5551. if (!mboxq) {
  5552. rc = -ENOMEM;
  5553. goto out_free_bsmbx;
  5554. }
  5555. /* Check for NVMET being configured */
  5556. phba->nvmet_support = 0;
  5557. if (lpfc_enable_nvmet_cnt) {
  5558. /* First get WWN of HBA instance */
  5559. lpfc_read_nv(phba, mboxq);
  5560. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  5561. if (rc != MBX_SUCCESS) {
  5562. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  5563. "6016 Mailbox failed , mbxCmd x%x "
  5564. "READ_NV, mbxStatus x%x\n",
  5565. bf_get(lpfc_mqe_command, &mboxq->u.mqe),
  5566. bf_get(lpfc_mqe_status, &mboxq->u.mqe));
  5567. mempool_free(mboxq, phba->mbox_mem_pool);
  5568. rc = -EIO;
  5569. goto out_free_bsmbx;
  5570. }
  5571. mb = &mboxq->u.mb;
  5572. memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
  5573. sizeof(uint64_t));
  5574. wwn = cpu_to_be64(wwn);
  5575. phba->sli4_hba.wwnn.u.name = wwn;
  5576. memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
  5577. sizeof(uint64_t));
  5578. /* wwn is WWPN of HBA instance */
  5579. wwn = cpu_to_be64(wwn);
  5580. phba->sli4_hba.wwpn.u.name = wwn;
  5581. /* Check to see if it matches any module parameter */
  5582. for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
  5583. if (wwn == lpfc_enable_nvmet[i]) {
  5584. #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
  5585. if (lpfc_nvmet_mem_alloc(phba))
  5586. break;
  5587. phba->nvmet_support = 1; /* a match */
  5588. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5589. "6017 NVME Target %016llx\n",
  5590. wwn);
  5591. #else
  5592. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5593. "6021 Can't enable NVME Target."
  5594. " NVME_TARGET_FC infrastructure"
  5595. " is not in kernel\n");
  5596. #endif
  5597. break;
  5598. }
  5599. }
  5600. }
  5601. lpfc_nvme_mod_param_dep(phba);
  5602. /*
  5603. * Get sli4 parameters that override parameters from Port capabilities.
  5604. * If this call fails, it isn't critical unless the SLI4 parameters come
  5605. * back in conflict.
  5606. */
  5607. rc = lpfc_get_sli4_parameters(phba, mboxq);
  5608. if (rc) {
  5609. if_type = bf_get(lpfc_sli_intf_if_type,
  5610. &phba->sli4_hba.sli_intf);
  5611. if_fam = bf_get(lpfc_sli_intf_sli_family,
  5612. &phba->sli4_hba.sli_intf);
  5613. if (phba->sli4_hba.extents_in_use &&
  5614. phba->sli4_hba.rpi_hdrs_in_use) {
  5615. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5616. "2999 Unsupported SLI4 Parameters "
  5617. "Extents and RPI headers enabled.\n");
  5618. if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
  5619. if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
  5620. mempool_free(mboxq, phba->mbox_mem_pool);
  5621. rc = -EIO;
  5622. goto out_free_bsmbx;
  5623. }
  5624. }
  5625. if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
  5626. if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
  5627. mempool_free(mboxq, phba->mbox_mem_pool);
  5628. rc = -EIO;
  5629. goto out_free_bsmbx;
  5630. }
  5631. }
  5632. mempool_free(mboxq, phba->mbox_mem_pool);
  5633. /* Verify OAS is supported */
  5634. lpfc_sli4_oas_verify(phba);
  5635. if (phba->cfg_fof)
  5636. fof_vectors = 1;
  5637. /* Verify all the SLI4 queues */
  5638. rc = lpfc_sli4_queue_verify(phba);
  5639. if (rc)
  5640. goto out_free_bsmbx;
  5641. /* Create driver internal CQE event pool */
  5642. rc = lpfc_sli4_cq_event_pool_create(phba);
  5643. if (rc)
  5644. goto out_free_bsmbx;
  5645. /* Initialize sgl lists per host */
  5646. lpfc_init_sgl_list(phba);
  5647. /* Allocate and initialize active sgl array */
  5648. rc = lpfc_init_active_sgl_array(phba);
  5649. if (rc) {
  5650. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5651. "1430 Failed to initialize sgl list.\n");
  5652. goto out_destroy_cq_event_pool;
  5653. }
  5654. rc = lpfc_sli4_init_rpi_hdrs(phba);
  5655. if (rc) {
  5656. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5657. "1432 Failed to initialize rpi headers.\n");
  5658. goto out_free_active_sgl;
  5659. }
  5660. /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
  5661. longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
  5662. phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
  5663. GFP_KERNEL);
  5664. if (!phba->fcf.fcf_rr_bmask) {
  5665. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5666. "2759 Failed allocate memory for FCF round "
  5667. "robin failover bmask\n");
  5668. rc = -ENOMEM;
  5669. goto out_remove_rpi_hdrs;
  5670. }
  5671. phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
  5672. sizeof(struct lpfc_hba_eq_hdl),
  5673. GFP_KERNEL);
  5674. if (!phba->sli4_hba.hba_eq_hdl) {
  5675. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5676. "2572 Failed allocate memory for "
  5677. "fast-path per-EQ handle array\n");
  5678. rc = -ENOMEM;
  5679. goto out_free_fcf_rr_bmask;
  5680. }
  5681. phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
  5682. sizeof(struct lpfc_vector_map_info),
  5683. GFP_KERNEL);
  5684. if (!phba->sli4_hba.cpu_map) {
  5685. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5686. "3327 Failed allocate memory for msi-x "
  5687. "interrupt vector mapping\n");
  5688. rc = -ENOMEM;
  5689. goto out_free_hba_eq_hdl;
  5690. }
  5691. if (lpfc_used_cpu == NULL) {
  5692. lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t),
  5693. GFP_KERNEL);
  5694. if (!lpfc_used_cpu) {
  5695. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5696. "3335 Failed allocate memory for msi-x "
  5697. "interrupt vector mapping\n");
  5698. kfree(phba->sli4_hba.cpu_map);
  5699. rc = -ENOMEM;
  5700. goto out_free_hba_eq_hdl;
  5701. }
  5702. for (i = 0; i < lpfc_present_cpu; i++)
  5703. lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
  5704. }
  5705. /*
  5706. * Enable sr-iov virtual functions if supported and configured
  5707. * through the module parameter.
  5708. */
  5709. if (phba->cfg_sriov_nr_virtfn > 0) {
  5710. rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
  5711. phba->cfg_sriov_nr_virtfn);
  5712. if (rc) {
  5713. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  5714. "3020 Requested number of SR-IOV "
  5715. "virtual functions (%d) is not "
  5716. "supported\n",
  5717. phba->cfg_sriov_nr_virtfn);
  5718. phba->cfg_sriov_nr_virtfn = 0;
  5719. }
  5720. }
  5721. return 0;
  5722. out_free_hba_eq_hdl:
  5723. kfree(phba->sli4_hba.hba_eq_hdl);
  5724. out_free_fcf_rr_bmask:
  5725. kfree(phba->fcf.fcf_rr_bmask);
  5726. out_remove_rpi_hdrs:
  5727. lpfc_sli4_remove_rpi_hdrs(phba);
  5728. out_free_active_sgl:
  5729. lpfc_free_active_sgl(phba);
  5730. out_destroy_cq_event_pool:
  5731. lpfc_sli4_cq_event_pool_destroy(phba);
  5732. out_free_bsmbx:
  5733. lpfc_destroy_bootstrap_mbox(phba);
  5734. out_free_mem:
  5735. lpfc_mem_free(phba);
  5736. return rc;
  5737. }
  5738. /**
  5739. * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
  5740. * @phba: pointer to lpfc hba data structure.
  5741. *
  5742. * This routine is invoked to unset the driver internal resources set up
  5743. * specific for supporting the SLI-4 HBA device it attached to.
  5744. **/
  5745. static void
  5746. lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
  5747. {
  5748. struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
  5749. /* Free memory allocated for msi-x interrupt vector to CPU mapping */
  5750. kfree(phba->sli4_hba.cpu_map);
  5751. phba->sli4_hba.num_present_cpu = 0;
  5752. phba->sli4_hba.num_online_cpu = 0;
  5753. phba->sli4_hba.curr_disp_cpu = 0;
  5754. /* Free memory allocated for fast-path work queue handles */
  5755. kfree(phba->sli4_hba.hba_eq_hdl);
  5756. /* Free the allocated rpi headers. */
  5757. lpfc_sli4_remove_rpi_hdrs(phba);
  5758. lpfc_sli4_remove_rpis(phba);
  5759. /* Free eligible FCF index bmask */
  5760. kfree(phba->fcf.fcf_rr_bmask);
  5761. /* Free the ELS sgl list */
  5762. lpfc_free_active_sgl(phba);
  5763. lpfc_free_els_sgl_list(phba);
  5764. lpfc_free_nvmet_sgl_list(phba);
  5765. /* Free the completion queue EQ event pool */
  5766. lpfc_sli4_cq_event_release_all(phba);
  5767. lpfc_sli4_cq_event_pool_destroy(phba);
  5768. /* Release resource identifiers. */
  5769. lpfc_sli4_dealloc_resource_identifiers(phba);
  5770. /* Free the bsmbx region. */
  5771. lpfc_destroy_bootstrap_mbox(phba);
  5772. /* Free the SLI Layer memory with SLI4 HBAs */
  5773. lpfc_mem_free_all(phba);
  5774. /* Free the current connect table */
  5775. list_for_each_entry_safe(conn_entry, next_conn_entry,
  5776. &phba->fcf_conn_rec_list, list) {
  5777. list_del_init(&conn_entry->list);
  5778. kfree(conn_entry);
  5779. }
  5780. return;
  5781. }
  5782. /**
  5783. * lpfc_init_api_table_setup - Set up init api function jump table
  5784. * @phba: The hba struct for which this call is being executed.
  5785. * @dev_grp: The HBA PCI-Device group number.
  5786. *
  5787. * This routine sets up the device INIT interface API function jump table
  5788. * in @phba struct.
  5789. *
  5790. * Returns: 0 - success, -ENODEV - failure.
  5791. **/
  5792. int
  5793. lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
  5794. {
  5795. phba->lpfc_hba_init_link = lpfc_hba_init_link;
  5796. phba->lpfc_hba_down_link = lpfc_hba_down_link;
  5797. phba->lpfc_selective_reset = lpfc_selective_reset;
  5798. switch (dev_grp) {
  5799. case LPFC_PCI_DEV_LP:
  5800. phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
  5801. phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
  5802. phba->lpfc_stop_port = lpfc_stop_port_s3;
  5803. break;
  5804. case LPFC_PCI_DEV_OC:
  5805. phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
  5806. phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
  5807. phba->lpfc_stop_port = lpfc_stop_port_s4;
  5808. break;
  5809. default:
  5810. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5811. "1431 Invalid HBA PCI-device group: 0x%x\n",
  5812. dev_grp);
  5813. return -ENODEV;
  5814. break;
  5815. }
  5816. return 0;
  5817. }
  5818. /**
  5819. * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
  5820. * @phba: pointer to lpfc hba data structure.
  5821. *
  5822. * This routine is invoked to set up the driver internal resources after the
  5823. * device specific resource setup to support the HBA device it attached to.
  5824. *
  5825. * Return codes
  5826. * 0 - successful
  5827. * other values - error
  5828. **/
  5829. static int
  5830. lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
  5831. {
  5832. int error;
  5833. /* Startup the kernel thread for this host adapter. */
  5834. phba->worker_thread = kthread_run(lpfc_do_work, phba,
  5835. "lpfc_worker_%d", phba->brd_no);
  5836. if (IS_ERR(phba->worker_thread)) {
  5837. error = PTR_ERR(phba->worker_thread);
  5838. return error;
  5839. }
  5840. /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */
  5841. if (phba->sli_rev == LPFC_SLI_REV4)
  5842. phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
  5843. else
  5844. phba->wq = NULL;
  5845. return 0;
  5846. }
  5847. /**
  5848. * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
  5849. * @phba: pointer to lpfc hba data structure.
  5850. *
  5851. * This routine is invoked to unset the driver internal resources set up after
  5852. * the device specific resource setup for supporting the HBA device it
  5853. * attached to.
  5854. **/
  5855. static void
  5856. lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
  5857. {
  5858. if (phba->wq) {
  5859. flush_workqueue(phba->wq);
  5860. destroy_workqueue(phba->wq);
  5861. phba->wq = NULL;
  5862. }
  5863. /* Stop kernel worker thread */
  5864. if (phba->worker_thread)
  5865. kthread_stop(phba->worker_thread);
  5866. }
  5867. /**
  5868. * lpfc_free_iocb_list - Free iocb list.
  5869. * @phba: pointer to lpfc hba data structure.
  5870. *
  5871. * This routine is invoked to free the driver's IOCB list and memory.
  5872. **/
  5873. void
  5874. lpfc_free_iocb_list(struct lpfc_hba *phba)
  5875. {
  5876. struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
  5877. spin_lock_irq(&phba->hbalock);
  5878. list_for_each_entry_safe(iocbq_entry, iocbq_next,
  5879. &phba->lpfc_iocb_list, list) {
  5880. list_del(&iocbq_entry->list);
  5881. kfree(iocbq_entry);
  5882. phba->total_iocbq_bufs--;
  5883. }
  5884. spin_unlock_irq(&phba->hbalock);
  5885. return;
  5886. }
  5887. /**
  5888. * lpfc_init_iocb_list - Allocate and initialize iocb list.
  5889. * @phba: pointer to lpfc hba data structure.
  5890. *
  5891. * This routine is invoked to allocate and initizlize the driver's IOCB
  5892. * list and set up the IOCB tag array accordingly.
  5893. *
  5894. * Return codes
  5895. * 0 - successful
  5896. * other values - error
  5897. **/
  5898. int
  5899. lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
  5900. {
  5901. struct lpfc_iocbq *iocbq_entry = NULL;
  5902. uint16_t iotag;
  5903. int i;
  5904. /* Initialize and populate the iocb list per host. */
  5905. INIT_LIST_HEAD(&phba->lpfc_iocb_list);
  5906. for (i = 0; i < iocb_count; i++) {
  5907. iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
  5908. if (iocbq_entry == NULL) {
  5909. printk(KERN_ERR "%s: only allocated %d iocbs of "
  5910. "expected %d count. Unloading driver.\n",
  5911. __func__, i, LPFC_IOCB_LIST_CNT);
  5912. goto out_free_iocbq;
  5913. }
  5914. iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
  5915. if (iotag == 0) {
  5916. kfree(iocbq_entry);
  5917. printk(KERN_ERR "%s: failed to allocate IOTAG. "
  5918. "Unloading driver.\n", __func__);
  5919. goto out_free_iocbq;
  5920. }
  5921. iocbq_entry->sli4_lxritag = NO_XRI;
  5922. iocbq_entry->sli4_xritag = NO_XRI;
  5923. spin_lock_irq(&phba->hbalock);
  5924. list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
  5925. phba->total_iocbq_bufs++;
  5926. spin_unlock_irq(&phba->hbalock);
  5927. }
  5928. return 0;
  5929. out_free_iocbq:
  5930. lpfc_free_iocb_list(phba);
  5931. return -ENOMEM;
  5932. }
  5933. /**
  5934. * lpfc_free_sgl_list - Free a given sgl list.
  5935. * @phba: pointer to lpfc hba data structure.
  5936. * @sglq_list: pointer to the head of sgl list.
  5937. *
  5938. * This routine is invoked to free a give sgl list and memory.
  5939. **/
  5940. void
  5941. lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
  5942. {
  5943. struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
  5944. list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
  5945. list_del(&sglq_entry->list);
  5946. lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
  5947. kfree(sglq_entry);
  5948. }
  5949. }
  5950. /**
  5951. * lpfc_free_els_sgl_list - Free els sgl list.
  5952. * @phba: pointer to lpfc hba data structure.
  5953. *
  5954. * This routine is invoked to free the driver's els sgl list and memory.
  5955. **/
  5956. static void
  5957. lpfc_free_els_sgl_list(struct lpfc_hba *phba)
  5958. {
  5959. LIST_HEAD(sglq_list);
  5960. /* Retrieve all els sgls from driver list */
  5961. spin_lock_irq(&phba->hbalock);
  5962. spin_lock(&phba->sli4_hba.sgl_list_lock);
  5963. list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
  5964. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  5965. spin_unlock_irq(&phba->hbalock);
  5966. /* Now free the sgl list */
  5967. lpfc_free_sgl_list(phba, &sglq_list);
  5968. }
  5969. /**
  5970. * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
  5971. * @phba: pointer to lpfc hba data structure.
  5972. *
  5973. * This routine is invoked to free the driver's nvmet sgl list and memory.
  5974. **/
  5975. static void
  5976. lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
  5977. {
  5978. struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
  5979. LIST_HEAD(sglq_list);
  5980. /* Retrieve all nvmet sgls from driver list */
  5981. spin_lock_irq(&phba->hbalock);
  5982. spin_lock(&phba->sli4_hba.sgl_list_lock);
  5983. list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
  5984. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  5985. spin_unlock_irq(&phba->hbalock);
  5986. /* Now free the sgl list */
  5987. list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
  5988. list_del(&sglq_entry->list);
  5989. lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
  5990. kfree(sglq_entry);
  5991. }
  5992. /* Update the nvmet_xri_cnt to reflect no current sgls.
  5993. * The next initialization cycle sets the count and allocates
  5994. * the sgls over again.
  5995. */
  5996. phba->sli4_hba.nvmet_xri_cnt = 0;
  5997. }
  5998. /**
  5999. * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
  6000. * @phba: pointer to lpfc hba data structure.
  6001. *
  6002. * This routine is invoked to allocate the driver's active sgl memory.
  6003. * This array will hold the sglq_entry's for active IOs.
  6004. **/
  6005. static int
  6006. lpfc_init_active_sgl_array(struct lpfc_hba *phba)
  6007. {
  6008. int size;
  6009. size = sizeof(struct lpfc_sglq *);
  6010. size *= phba->sli4_hba.max_cfg_param.max_xri;
  6011. phba->sli4_hba.lpfc_sglq_active_list =
  6012. kzalloc(size, GFP_KERNEL);
  6013. if (!phba->sli4_hba.lpfc_sglq_active_list)
  6014. return -ENOMEM;
  6015. return 0;
  6016. }
  6017. /**
  6018. * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
  6019. * @phba: pointer to lpfc hba data structure.
  6020. *
  6021. * This routine is invoked to walk through the array of active sglq entries
  6022. * and free all of the resources.
  6023. * This is just a place holder for now.
  6024. **/
  6025. static void
  6026. lpfc_free_active_sgl(struct lpfc_hba *phba)
  6027. {
  6028. kfree(phba->sli4_hba.lpfc_sglq_active_list);
  6029. }
  6030. /**
  6031. * lpfc_init_sgl_list - Allocate and initialize sgl list.
  6032. * @phba: pointer to lpfc hba data structure.
  6033. *
  6034. * This routine is invoked to allocate and initizlize the driver's sgl
  6035. * list and set up the sgl xritag tag array accordingly.
  6036. *
  6037. **/
  6038. static void
  6039. lpfc_init_sgl_list(struct lpfc_hba *phba)
  6040. {
  6041. /* Initialize and populate the sglq list per host/VF. */
  6042. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
  6043. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
  6044. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
  6045. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
  6046. /* els xri-sgl book keeping */
  6047. phba->sli4_hba.els_xri_cnt = 0;
  6048. /* scsi xri-buffer book keeping */
  6049. phba->sli4_hba.scsi_xri_cnt = 0;
  6050. /* nvme xri-buffer book keeping */
  6051. phba->sli4_hba.nvme_xri_cnt = 0;
  6052. }
  6053. /**
  6054. * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
  6055. * @phba: pointer to lpfc hba data structure.
  6056. *
  6057. * This routine is invoked to post rpi header templates to the
  6058. * port for those SLI4 ports that do not support extents. This routine
  6059. * posts a PAGE_SIZE memory region to the port to hold up to
  6060. * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
  6061. * and should be called only when interrupts are disabled.
  6062. *
  6063. * Return codes
  6064. * 0 - successful
  6065. * -ERROR - otherwise.
  6066. **/
  6067. int
  6068. lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
  6069. {
  6070. int rc = 0;
  6071. struct lpfc_rpi_hdr *rpi_hdr;
  6072. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
  6073. if (!phba->sli4_hba.rpi_hdrs_in_use)
  6074. return rc;
  6075. if (phba->sli4_hba.extents_in_use)
  6076. return -EIO;
  6077. rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
  6078. if (!rpi_hdr) {
  6079. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
  6080. "0391 Error during rpi post operation\n");
  6081. lpfc_sli4_remove_rpis(phba);
  6082. rc = -ENODEV;
  6083. }
  6084. return rc;
  6085. }
  6086. /**
  6087. * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
  6088. * @phba: pointer to lpfc hba data structure.
  6089. *
  6090. * This routine is invoked to allocate a single 4KB memory region to
  6091. * support rpis and stores them in the phba. This single region
  6092. * provides support for up to 64 rpis. The region is used globally
  6093. * by the device.
  6094. *
  6095. * Returns:
  6096. * A valid rpi hdr on success.
  6097. * A NULL pointer on any failure.
  6098. **/
  6099. struct lpfc_rpi_hdr *
  6100. lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
  6101. {
  6102. uint16_t rpi_limit, curr_rpi_range;
  6103. struct lpfc_dmabuf *dmabuf;
  6104. struct lpfc_rpi_hdr *rpi_hdr;
  6105. /*
  6106. * If the SLI4 port supports extents, posting the rpi header isn't
  6107. * required. Set the expected maximum count and let the actual value
  6108. * get set when extents are fully allocated.
  6109. */
  6110. if (!phba->sli4_hba.rpi_hdrs_in_use)
  6111. return NULL;
  6112. if (phba->sli4_hba.extents_in_use)
  6113. return NULL;
  6114. /* The limit on the logical index is just the max_rpi count. */
  6115. rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
  6116. spin_lock_irq(&phba->hbalock);
  6117. /*
  6118. * Establish the starting RPI in this header block. The starting
  6119. * rpi is normalized to a zero base because the physical rpi is
  6120. * port based.
  6121. */
  6122. curr_rpi_range = phba->sli4_hba.next_rpi;
  6123. spin_unlock_irq(&phba->hbalock);
  6124. /* Reached full RPI range */
  6125. if (curr_rpi_range == rpi_limit)
  6126. return NULL;
  6127. /*
  6128. * First allocate the protocol header region for the port. The
  6129. * port expects a 4KB DMA-mapped memory region that is 4K aligned.
  6130. */
  6131. dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  6132. if (!dmabuf)
  6133. return NULL;
  6134. dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
  6135. LPFC_HDR_TEMPLATE_SIZE,
  6136. &dmabuf->phys, GFP_KERNEL);
  6137. if (!dmabuf->virt) {
  6138. rpi_hdr = NULL;
  6139. goto err_free_dmabuf;
  6140. }
  6141. if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
  6142. rpi_hdr = NULL;
  6143. goto err_free_coherent;
  6144. }
  6145. /* Save the rpi header data for cleanup later. */
  6146. rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
  6147. if (!rpi_hdr)
  6148. goto err_free_coherent;
  6149. rpi_hdr->dmabuf = dmabuf;
  6150. rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
  6151. rpi_hdr->page_count = 1;
  6152. spin_lock_irq(&phba->hbalock);
  6153. /* The rpi_hdr stores the logical index only. */
  6154. rpi_hdr->start_rpi = curr_rpi_range;
  6155. rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
  6156. list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
  6157. spin_unlock_irq(&phba->hbalock);
  6158. return rpi_hdr;
  6159. err_free_coherent:
  6160. dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
  6161. dmabuf->virt, dmabuf->phys);
  6162. err_free_dmabuf:
  6163. kfree(dmabuf);
  6164. return NULL;
  6165. }
  6166. /**
  6167. * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
  6168. * @phba: pointer to lpfc hba data structure.
  6169. *
  6170. * This routine is invoked to remove all memory resources allocated
  6171. * to support rpis for SLI4 ports not supporting extents. This routine
  6172. * presumes the caller has released all rpis consumed by fabric or port
  6173. * logins and is prepared to have the header pages removed.
  6174. **/
  6175. void
  6176. lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
  6177. {
  6178. struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
  6179. if (!phba->sli4_hba.rpi_hdrs_in_use)
  6180. goto exit;
  6181. list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
  6182. &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
  6183. list_del(&rpi_hdr->list);
  6184. dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
  6185. rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
  6186. kfree(rpi_hdr->dmabuf);
  6187. kfree(rpi_hdr);
  6188. }
  6189. exit:
  6190. /* There are no rpis available to the port now. */
  6191. phba->sli4_hba.next_rpi = 0;
  6192. }
  6193. /**
  6194. * lpfc_hba_alloc - Allocate driver hba data structure for a device.
  6195. * @pdev: pointer to pci device data structure.
  6196. *
  6197. * This routine is invoked to allocate the driver hba data structure for an
  6198. * HBA device. If the allocation is successful, the phba reference to the
  6199. * PCI device data structure is set.
  6200. *
  6201. * Return codes
  6202. * pointer to @phba - successful
  6203. * NULL - error
  6204. **/
  6205. static struct lpfc_hba *
  6206. lpfc_hba_alloc(struct pci_dev *pdev)
  6207. {
  6208. struct lpfc_hba *phba;
  6209. /* Allocate memory for HBA structure */
  6210. phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
  6211. if (!phba) {
  6212. dev_err(&pdev->dev, "failed to allocate hba struct\n");
  6213. return NULL;
  6214. }
  6215. /* Set reference to PCI device in HBA structure */
  6216. phba->pcidev = pdev;
  6217. /* Assign an unused board number */
  6218. phba->brd_no = lpfc_get_instance();
  6219. if (phba->brd_no < 0) {
  6220. kfree(phba);
  6221. return NULL;
  6222. }
  6223. phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
  6224. spin_lock_init(&phba->ct_ev_lock);
  6225. INIT_LIST_HEAD(&phba->ct_ev_waiters);
  6226. return phba;
  6227. }
  6228. /**
  6229. * lpfc_hba_free - Free driver hba data structure with a device.
  6230. * @phba: pointer to lpfc hba data structure.
  6231. *
  6232. * This routine is invoked to free the driver hba data structure with an
  6233. * HBA device.
  6234. **/
  6235. static void
  6236. lpfc_hba_free(struct lpfc_hba *phba)
  6237. {
  6238. /* Release the driver assigned board number */
  6239. idr_remove(&lpfc_hba_index, phba->brd_no);
  6240. /* Free memory allocated with sli3 rings */
  6241. kfree(phba->sli.sli3_ring);
  6242. phba->sli.sli3_ring = NULL;
  6243. kfree(phba);
  6244. return;
  6245. }
  6246. /**
  6247. * lpfc_create_shost - Create hba physical port with associated scsi host.
  6248. * @phba: pointer to lpfc hba data structure.
  6249. *
  6250. * This routine is invoked to create HBA physical port and associate a SCSI
  6251. * host with it.
  6252. *
  6253. * Return codes
  6254. * 0 - successful
  6255. * other values - error
  6256. **/
  6257. static int
  6258. lpfc_create_shost(struct lpfc_hba *phba)
  6259. {
  6260. struct lpfc_vport *vport;
  6261. struct Scsi_Host *shost;
  6262. /* Initialize HBA FC structure */
  6263. phba->fc_edtov = FF_DEF_EDTOV;
  6264. phba->fc_ratov = FF_DEF_RATOV;
  6265. phba->fc_altov = FF_DEF_ALTOV;
  6266. phba->fc_arbtov = FF_DEF_ARBTOV;
  6267. atomic_set(&phba->sdev_cnt, 0);
  6268. atomic_set(&phba->fc4ScsiInputRequests, 0);
  6269. atomic_set(&phba->fc4ScsiOutputRequests, 0);
  6270. atomic_set(&phba->fc4ScsiControlRequests, 0);
  6271. atomic_set(&phba->fc4ScsiIoCmpls, 0);
  6272. vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
  6273. if (!vport)
  6274. return -ENODEV;
  6275. shost = lpfc_shost_from_vport(vport);
  6276. phba->pport = vport;
  6277. if (phba->nvmet_support) {
  6278. /* Only 1 vport (pport) will support NVME target */
  6279. if (phba->txrdy_payload_pool == NULL) {
  6280. phba->txrdy_payload_pool = dma_pool_create(
  6281. "txrdy_pool", &phba->pcidev->dev,
  6282. TXRDY_PAYLOAD_LEN, 16, 0);
  6283. if (phba->txrdy_payload_pool) {
  6284. phba->targetport = NULL;
  6285. phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
  6286. lpfc_printf_log(phba, KERN_INFO,
  6287. LOG_INIT | LOG_NVME_DISC,
  6288. "6076 NVME Target Found\n");
  6289. }
  6290. }
  6291. }
  6292. lpfc_debugfs_initialize(vport);
  6293. /* Put reference to SCSI host to driver's device private data */
  6294. pci_set_drvdata(phba->pcidev, shost);
  6295. /*
  6296. * At this point we are fully registered with PSA. In addition,
  6297. * any initial discovery should be completed.
  6298. */
  6299. vport->load_flag |= FC_ALLOW_FDMI;
  6300. if (phba->cfg_enable_SmartSAN ||
  6301. (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
  6302. /* Setup appropriate attribute masks */
  6303. vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
  6304. if (phba->cfg_enable_SmartSAN)
  6305. vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
  6306. else
  6307. vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
  6308. }
  6309. return 0;
  6310. }
  6311. /**
  6312. * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
  6313. * @phba: pointer to lpfc hba data structure.
  6314. *
  6315. * This routine is invoked to destroy HBA physical port and the associated
  6316. * SCSI host.
  6317. **/
  6318. static void
  6319. lpfc_destroy_shost(struct lpfc_hba *phba)
  6320. {
  6321. struct lpfc_vport *vport = phba->pport;
  6322. /* Destroy physical port that associated with the SCSI host */
  6323. destroy_port(vport);
  6324. return;
  6325. }
  6326. /**
  6327. * lpfc_setup_bg - Setup Block guard structures and debug areas.
  6328. * @phba: pointer to lpfc hba data structure.
  6329. * @shost: the shost to be used to detect Block guard settings.
  6330. *
  6331. * This routine sets up the local Block guard protocol settings for @shost.
  6332. * This routine also allocates memory for debugging bg buffers.
  6333. **/
  6334. static void
  6335. lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
  6336. {
  6337. uint32_t old_mask;
  6338. uint32_t old_guard;
  6339. int pagecnt = 10;
  6340. if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
  6341. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  6342. "1478 Registering BlockGuard with the "
  6343. "SCSI layer\n");
  6344. old_mask = phba->cfg_prot_mask;
  6345. old_guard = phba->cfg_prot_guard;
  6346. /* Only allow supported values */
  6347. phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
  6348. SHOST_DIX_TYPE0_PROTECTION |
  6349. SHOST_DIX_TYPE1_PROTECTION);
  6350. phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
  6351. SHOST_DIX_GUARD_CRC);
  6352. /* DIF Type 1 protection for profiles AST1/C1 is end to end */
  6353. if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
  6354. phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
  6355. if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
  6356. if ((old_mask != phba->cfg_prot_mask) ||
  6357. (old_guard != phba->cfg_prot_guard))
  6358. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6359. "1475 Registering BlockGuard with the "
  6360. "SCSI layer: mask %d guard %d\n",
  6361. phba->cfg_prot_mask,
  6362. phba->cfg_prot_guard);
  6363. scsi_host_set_prot(shost, phba->cfg_prot_mask);
  6364. scsi_host_set_guard(shost, phba->cfg_prot_guard);
  6365. } else
  6366. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6367. "1479 Not Registering BlockGuard with the SCSI "
  6368. "layer, Bad protection parameters: %d %d\n",
  6369. old_mask, old_guard);
  6370. }
  6371. if (!_dump_buf_data) {
  6372. while (pagecnt) {
  6373. spin_lock_init(&_dump_buf_lock);
  6374. _dump_buf_data =
  6375. (char *) __get_free_pages(GFP_KERNEL, pagecnt);
  6376. if (_dump_buf_data) {
  6377. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6378. "9043 BLKGRD: allocated %d pages for "
  6379. "_dump_buf_data at 0x%p\n",
  6380. (1 << pagecnt), _dump_buf_data);
  6381. _dump_buf_data_order = pagecnt;
  6382. memset(_dump_buf_data, 0,
  6383. ((1 << PAGE_SHIFT) << pagecnt));
  6384. break;
  6385. } else
  6386. --pagecnt;
  6387. }
  6388. if (!_dump_buf_data_order)
  6389. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6390. "9044 BLKGRD: ERROR unable to allocate "
  6391. "memory for hexdump\n");
  6392. } else
  6393. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6394. "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
  6395. "\n", _dump_buf_data);
  6396. if (!_dump_buf_dif) {
  6397. while (pagecnt) {
  6398. _dump_buf_dif =
  6399. (char *) __get_free_pages(GFP_KERNEL, pagecnt);
  6400. if (_dump_buf_dif) {
  6401. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6402. "9046 BLKGRD: allocated %d pages for "
  6403. "_dump_buf_dif at 0x%p\n",
  6404. (1 << pagecnt), _dump_buf_dif);
  6405. _dump_buf_dif_order = pagecnt;
  6406. memset(_dump_buf_dif, 0,
  6407. ((1 << PAGE_SHIFT) << pagecnt));
  6408. break;
  6409. } else
  6410. --pagecnt;
  6411. }
  6412. if (!_dump_buf_dif_order)
  6413. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6414. "9047 BLKGRD: ERROR unable to allocate "
  6415. "memory for hexdump\n");
  6416. } else
  6417. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6418. "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
  6419. _dump_buf_dif);
  6420. }
  6421. /**
  6422. * lpfc_post_init_setup - Perform necessary device post initialization setup.
  6423. * @phba: pointer to lpfc hba data structure.
  6424. *
  6425. * This routine is invoked to perform all the necessary post initialization
  6426. * setup for the device.
  6427. **/
  6428. static void
  6429. lpfc_post_init_setup(struct lpfc_hba *phba)
  6430. {
  6431. struct Scsi_Host *shost;
  6432. struct lpfc_adapter_event_header adapter_event;
  6433. /* Get the default values for Model Name and Description */
  6434. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  6435. /*
  6436. * hba setup may have changed the hba_queue_depth so we need to
  6437. * adjust the value of can_queue.
  6438. */
  6439. shost = pci_get_drvdata(phba->pcidev);
  6440. shost->can_queue = phba->cfg_hba_queue_depth - 10;
  6441. if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
  6442. lpfc_setup_bg(phba, shost);
  6443. lpfc_host_attrib_init(shost);
  6444. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  6445. spin_lock_irq(shost->host_lock);
  6446. lpfc_poll_start_timer(phba);
  6447. spin_unlock_irq(shost->host_lock);
  6448. }
  6449. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  6450. "0428 Perform SCSI scan\n");
  6451. /* Send board arrival event to upper layer */
  6452. adapter_event.event_type = FC_REG_ADAPTER_EVENT;
  6453. adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
  6454. fc_host_post_vendor_event(shost, fc_get_event_number(),
  6455. sizeof(adapter_event),
  6456. (char *) &adapter_event,
  6457. LPFC_NL_VENDOR_ID);
  6458. return;
  6459. }
  6460. /**
  6461. * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
  6462. * @phba: pointer to lpfc hba data structure.
  6463. *
  6464. * This routine is invoked to set up the PCI device memory space for device
  6465. * with SLI-3 interface spec.
  6466. *
  6467. * Return codes
  6468. * 0 - successful
  6469. * other values - error
  6470. **/
  6471. static int
  6472. lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
  6473. {
  6474. struct pci_dev *pdev;
  6475. unsigned long bar0map_len, bar2map_len;
  6476. int i, hbq_count;
  6477. void *ptr;
  6478. int error = -ENODEV;
  6479. /* Obtain PCI device reference */
  6480. if (!phba->pcidev)
  6481. return error;
  6482. else
  6483. pdev = phba->pcidev;
  6484. /* Set the device DMA mask size */
  6485. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
  6486. || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
  6487. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
  6488. || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
  6489. return error;
  6490. }
  6491. }
  6492. /* Get the bus address of Bar0 and Bar2 and the number of bytes
  6493. * required by each mapping.
  6494. */
  6495. phba->pci_bar0_map = pci_resource_start(pdev, 0);
  6496. bar0map_len = pci_resource_len(pdev, 0);
  6497. phba->pci_bar2_map = pci_resource_start(pdev, 2);
  6498. bar2map_len = pci_resource_len(pdev, 2);
  6499. /* Map HBA SLIM to a kernel virtual address. */
  6500. phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
  6501. if (!phba->slim_memmap_p) {
  6502. dev_printk(KERN_ERR, &pdev->dev,
  6503. "ioremap failed for SLIM memory.\n");
  6504. goto out;
  6505. }
  6506. /* Map HBA Control Registers to a kernel virtual address. */
  6507. phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
  6508. if (!phba->ctrl_regs_memmap_p) {
  6509. dev_printk(KERN_ERR, &pdev->dev,
  6510. "ioremap failed for HBA control registers.\n");
  6511. goto out_iounmap_slim;
  6512. }
  6513. /* Allocate memory for SLI-2 structures */
  6514. phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  6515. &phba->slim2p.phys, GFP_KERNEL);
  6516. if (!phba->slim2p.virt)
  6517. goto out_iounmap;
  6518. phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
  6519. phba->mbox_ext = (phba->slim2p.virt +
  6520. offsetof(struct lpfc_sli2_slim, mbx_ext_words));
  6521. phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
  6522. phba->IOCBs = (phba->slim2p.virt +
  6523. offsetof(struct lpfc_sli2_slim, IOCBs));
  6524. phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
  6525. lpfc_sli_hbq_size(),
  6526. &phba->hbqslimp.phys,
  6527. GFP_KERNEL);
  6528. if (!phba->hbqslimp.virt)
  6529. goto out_free_slim;
  6530. hbq_count = lpfc_sli_hbq_count();
  6531. ptr = phba->hbqslimp.virt;
  6532. for (i = 0; i < hbq_count; ++i) {
  6533. phba->hbqs[i].hbq_virt = ptr;
  6534. INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
  6535. ptr += (lpfc_hbq_defs[i]->entry_count *
  6536. sizeof(struct lpfc_hbq_entry));
  6537. }
  6538. phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
  6539. phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
  6540. memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
  6541. phba->MBslimaddr = phba->slim_memmap_p;
  6542. phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
  6543. phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
  6544. phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
  6545. phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
  6546. return 0;
  6547. out_free_slim:
  6548. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  6549. phba->slim2p.virt, phba->slim2p.phys);
  6550. out_iounmap:
  6551. iounmap(phba->ctrl_regs_memmap_p);
  6552. out_iounmap_slim:
  6553. iounmap(phba->slim_memmap_p);
  6554. out:
  6555. return error;
  6556. }
  6557. /**
  6558. * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
  6559. * @phba: pointer to lpfc hba data structure.
  6560. *
  6561. * This routine is invoked to unset the PCI device memory space for device
  6562. * with SLI-3 interface spec.
  6563. **/
  6564. static void
  6565. lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
  6566. {
  6567. struct pci_dev *pdev;
  6568. /* Obtain PCI device reference */
  6569. if (!phba->pcidev)
  6570. return;
  6571. else
  6572. pdev = phba->pcidev;
  6573. /* Free coherent DMA memory allocated */
  6574. dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
  6575. phba->hbqslimp.virt, phba->hbqslimp.phys);
  6576. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  6577. phba->slim2p.virt, phba->slim2p.phys);
  6578. /* I/O memory unmap */
  6579. iounmap(phba->ctrl_regs_memmap_p);
  6580. iounmap(phba->slim_memmap_p);
  6581. return;
  6582. }
  6583. /**
  6584. * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
  6585. * @phba: pointer to lpfc hba data structure.
  6586. *
  6587. * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
  6588. * done and check status.
  6589. *
  6590. * Return 0 if successful, otherwise -ENODEV.
  6591. **/
  6592. int
  6593. lpfc_sli4_post_status_check(struct lpfc_hba *phba)
  6594. {
  6595. struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
  6596. struct lpfc_register reg_data;
  6597. int i, port_error = 0;
  6598. uint32_t if_type;
  6599. memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
  6600. memset(&reg_data, 0, sizeof(reg_data));
  6601. if (!phba->sli4_hba.PSMPHRregaddr)
  6602. return -ENODEV;
  6603. /* Wait up to 30 seconds for the SLI Port POST done and ready */
  6604. for (i = 0; i < 3000; i++) {
  6605. if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
  6606. &portsmphr_reg.word0) ||
  6607. (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
  6608. /* Port has a fatal POST error, break out */
  6609. port_error = -ENODEV;
  6610. break;
  6611. }
  6612. if (LPFC_POST_STAGE_PORT_READY ==
  6613. bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
  6614. break;
  6615. msleep(10);
  6616. }
  6617. /*
  6618. * If there was a port error during POST, then don't proceed with
  6619. * other register reads as the data may not be valid. Just exit.
  6620. */
  6621. if (port_error) {
  6622. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6623. "1408 Port Failed POST - portsmphr=0x%x, "
  6624. "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
  6625. "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
  6626. portsmphr_reg.word0,
  6627. bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
  6628. bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
  6629. bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
  6630. bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
  6631. bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
  6632. bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
  6633. bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
  6634. bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
  6635. } else {
  6636. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  6637. "2534 Device Info: SLIFamily=0x%x, "
  6638. "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
  6639. "SLIHint_2=0x%x, FT=0x%x\n",
  6640. bf_get(lpfc_sli_intf_sli_family,
  6641. &phba->sli4_hba.sli_intf),
  6642. bf_get(lpfc_sli_intf_slirev,
  6643. &phba->sli4_hba.sli_intf),
  6644. bf_get(lpfc_sli_intf_if_type,
  6645. &phba->sli4_hba.sli_intf),
  6646. bf_get(lpfc_sli_intf_sli_hint1,
  6647. &phba->sli4_hba.sli_intf),
  6648. bf_get(lpfc_sli_intf_sli_hint2,
  6649. &phba->sli4_hba.sli_intf),
  6650. bf_get(lpfc_sli_intf_func_type,
  6651. &phba->sli4_hba.sli_intf));
  6652. /*
  6653. * Check for other Port errors during the initialization
  6654. * process. Fail the load if the port did not come up
  6655. * correctly.
  6656. */
  6657. if_type = bf_get(lpfc_sli_intf_if_type,
  6658. &phba->sli4_hba.sli_intf);
  6659. switch (if_type) {
  6660. case LPFC_SLI_INTF_IF_TYPE_0:
  6661. phba->sli4_hba.ue_mask_lo =
  6662. readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
  6663. phba->sli4_hba.ue_mask_hi =
  6664. readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
  6665. uerrlo_reg.word0 =
  6666. readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
  6667. uerrhi_reg.word0 =
  6668. readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
  6669. if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
  6670. (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
  6671. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6672. "1422 Unrecoverable Error "
  6673. "Detected during POST "
  6674. "uerr_lo_reg=0x%x, "
  6675. "uerr_hi_reg=0x%x, "
  6676. "ue_mask_lo_reg=0x%x, "
  6677. "ue_mask_hi_reg=0x%x\n",
  6678. uerrlo_reg.word0,
  6679. uerrhi_reg.word0,
  6680. phba->sli4_hba.ue_mask_lo,
  6681. phba->sli4_hba.ue_mask_hi);
  6682. port_error = -ENODEV;
  6683. }
  6684. break;
  6685. case LPFC_SLI_INTF_IF_TYPE_2:
  6686. case LPFC_SLI_INTF_IF_TYPE_6:
  6687. /* Final checks. The port status should be clean. */
  6688. if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
  6689. &reg_data.word0) ||
  6690. (bf_get(lpfc_sliport_status_err, &reg_data) &&
  6691. !bf_get(lpfc_sliport_status_rn, &reg_data))) {
  6692. phba->work_status[0] =
  6693. readl(phba->sli4_hba.u.if_type2.
  6694. ERR1regaddr);
  6695. phba->work_status[1] =
  6696. readl(phba->sli4_hba.u.if_type2.
  6697. ERR2regaddr);
  6698. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6699. "2888 Unrecoverable port error "
  6700. "following POST: port status reg "
  6701. "0x%x, port_smphr reg 0x%x, "
  6702. "error 1=0x%x, error 2=0x%x\n",
  6703. reg_data.word0,
  6704. portsmphr_reg.word0,
  6705. phba->work_status[0],
  6706. phba->work_status[1]);
  6707. port_error = -ENODEV;
  6708. }
  6709. break;
  6710. case LPFC_SLI_INTF_IF_TYPE_1:
  6711. default:
  6712. break;
  6713. }
  6714. }
  6715. return port_error;
  6716. }
  6717. /**
  6718. * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
  6719. * @phba: pointer to lpfc hba data structure.
  6720. * @if_type: The SLI4 interface type getting configured.
  6721. *
  6722. * This routine is invoked to set up SLI4 BAR0 PCI config space register
  6723. * memory map.
  6724. **/
  6725. static void
  6726. lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
  6727. {
  6728. switch (if_type) {
  6729. case LPFC_SLI_INTF_IF_TYPE_0:
  6730. phba->sli4_hba.u.if_type0.UERRLOregaddr =
  6731. phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
  6732. phba->sli4_hba.u.if_type0.UERRHIregaddr =
  6733. phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
  6734. phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
  6735. phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
  6736. phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
  6737. phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
  6738. phba->sli4_hba.SLIINTFregaddr =
  6739. phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
  6740. break;
  6741. case LPFC_SLI_INTF_IF_TYPE_2:
  6742. phba->sli4_hba.u.if_type2.EQDregaddr =
  6743. phba->sli4_hba.conf_regs_memmap_p +
  6744. LPFC_CTL_PORT_EQ_DELAY_OFFSET;
  6745. phba->sli4_hba.u.if_type2.ERR1regaddr =
  6746. phba->sli4_hba.conf_regs_memmap_p +
  6747. LPFC_CTL_PORT_ER1_OFFSET;
  6748. phba->sli4_hba.u.if_type2.ERR2regaddr =
  6749. phba->sli4_hba.conf_regs_memmap_p +
  6750. LPFC_CTL_PORT_ER2_OFFSET;
  6751. phba->sli4_hba.u.if_type2.CTRLregaddr =
  6752. phba->sli4_hba.conf_regs_memmap_p +
  6753. LPFC_CTL_PORT_CTL_OFFSET;
  6754. phba->sli4_hba.u.if_type2.STATUSregaddr =
  6755. phba->sli4_hba.conf_regs_memmap_p +
  6756. LPFC_CTL_PORT_STA_OFFSET;
  6757. phba->sli4_hba.SLIINTFregaddr =
  6758. phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
  6759. phba->sli4_hba.PSMPHRregaddr =
  6760. phba->sli4_hba.conf_regs_memmap_p +
  6761. LPFC_CTL_PORT_SEM_OFFSET;
  6762. phba->sli4_hba.RQDBregaddr =
  6763. phba->sli4_hba.conf_regs_memmap_p +
  6764. LPFC_ULP0_RQ_DOORBELL;
  6765. phba->sli4_hba.WQDBregaddr =
  6766. phba->sli4_hba.conf_regs_memmap_p +
  6767. LPFC_ULP0_WQ_DOORBELL;
  6768. phba->sli4_hba.CQDBregaddr =
  6769. phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
  6770. phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
  6771. phba->sli4_hba.MQDBregaddr =
  6772. phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
  6773. phba->sli4_hba.BMBXregaddr =
  6774. phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
  6775. break;
  6776. case LPFC_SLI_INTF_IF_TYPE_6:
  6777. phba->sli4_hba.u.if_type2.EQDregaddr =
  6778. phba->sli4_hba.conf_regs_memmap_p +
  6779. LPFC_CTL_PORT_EQ_DELAY_OFFSET;
  6780. phba->sli4_hba.u.if_type2.ERR1regaddr =
  6781. phba->sli4_hba.conf_regs_memmap_p +
  6782. LPFC_CTL_PORT_ER1_OFFSET;
  6783. phba->sli4_hba.u.if_type2.ERR2regaddr =
  6784. phba->sli4_hba.conf_regs_memmap_p +
  6785. LPFC_CTL_PORT_ER2_OFFSET;
  6786. phba->sli4_hba.u.if_type2.CTRLregaddr =
  6787. phba->sli4_hba.conf_regs_memmap_p +
  6788. LPFC_CTL_PORT_CTL_OFFSET;
  6789. phba->sli4_hba.u.if_type2.STATUSregaddr =
  6790. phba->sli4_hba.conf_regs_memmap_p +
  6791. LPFC_CTL_PORT_STA_OFFSET;
  6792. phba->sli4_hba.PSMPHRregaddr =
  6793. phba->sli4_hba.conf_regs_memmap_p +
  6794. LPFC_CTL_PORT_SEM_OFFSET;
  6795. phba->sli4_hba.BMBXregaddr =
  6796. phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
  6797. break;
  6798. case LPFC_SLI_INTF_IF_TYPE_1:
  6799. default:
  6800. dev_printk(KERN_ERR, &phba->pcidev->dev,
  6801. "FATAL - unsupported SLI4 interface type - %d\n",
  6802. if_type);
  6803. break;
  6804. }
  6805. }
  6806. /**
  6807. * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
  6808. * @phba: pointer to lpfc hba data structure.
  6809. *
  6810. * This routine is invoked to set up SLI4 BAR1 register memory map.
  6811. **/
  6812. static void
  6813. lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
  6814. {
  6815. switch (if_type) {
  6816. case LPFC_SLI_INTF_IF_TYPE_0:
  6817. phba->sli4_hba.PSMPHRregaddr =
  6818. phba->sli4_hba.ctrl_regs_memmap_p +
  6819. LPFC_SLIPORT_IF0_SMPHR;
  6820. phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
  6821. LPFC_HST_ISR0;
  6822. phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
  6823. LPFC_HST_IMR0;
  6824. phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
  6825. LPFC_HST_ISCR0;
  6826. break;
  6827. case LPFC_SLI_INTF_IF_TYPE_6:
  6828. phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
  6829. LPFC_IF6_RQ_DOORBELL;
  6830. phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
  6831. LPFC_IF6_WQ_DOORBELL;
  6832. phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
  6833. LPFC_IF6_CQ_DOORBELL;
  6834. phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
  6835. LPFC_IF6_EQ_DOORBELL;
  6836. phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
  6837. LPFC_IF6_MQ_DOORBELL;
  6838. break;
  6839. case LPFC_SLI_INTF_IF_TYPE_2:
  6840. case LPFC_SLI_INTF_IF_TYPE_1:
  6841. default:
  6842. dev_err(&phba->pcidev->dev,
  6843. "FATAL - unsupported SLI4 interface type - %d\n",
  6844. if_type);
  6845. break;
  6846. }
  6847. }
  6848. /**
  6849. * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
  6850. * @phba: pointer to lpfc hba data structure.
  6851. * @vf: virtual function number
  6852. *
  6853. * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
  6854. * based on the given viftual function number, @vf.
  6855. *
  6856. * Return 0 if successful, otherwise -ENODEV.
  6857. **/
  6858. static int
  6859. lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
  6860. {
  6861. if (vf > LPFC_VIR_FUNC_MAX)
  6862. return -ENODEV;
  6863. phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6864. vf * LPFC_VFR_PAGE_SIZE +
  6865. LPFC_ULP0_RQ_DOORBELL);
  6866. phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6867. vf * LPFC_VFR_PAGE_SIZE +
  6868. LPFC_ULP0_WQ_DOORBELL);
  6869. phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6870. vf * LPFC_VFR_PAGE_SIZE +
  6871. LPFC_EQCQ_DOORBELL);
  6872. phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
  6873. phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6874. vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
  6875. phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6876. vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
  6877. return 0;
  6878. }
  6879. /**
  6880. * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
  6881. * @phba: pointer to lpfc hba data structure.
  6882. *
  6883. * This routine is invoked to create the bootstrap mailbox
  6884. * region consistent with the SLI-4 interface spec. This
  6885. * routine allocates all memory necessary to communicate
  6886. * mailbox commands to the port and sets up all alignment
  6887. * needs. No locks are expected to be held when calling
  6888. * this routine.
  6889. *
  6890. * Return codes
  6891. * 0 - successful
  6892. * -ENOMEM - could not allocated memory.
  6893. **/
  6894. static int
  6895. lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
  6896. {
  6897. uint32_t bmbx_size;
  6898. struct lpfc_dmabuf *dmabuf;
  6899. struct dma_address *dma_address;
  6900. uint32_t pa_addr;
  6901. uint64_t phys_addr;
  6902. dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  6903. if (!dmabuf)
  6904. return -ENOMEM;
  6905. /*
  6906. * The bootstrap mailbox region is comprised of 2 parts
  6907. * plus an alignment restriction of 16 bytes.
  6908. */
  6909. bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
  6910. dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
  6911. &dmabuf->phys, GFP_KERNEL);
  6912. if (!dmabuf->virt) {
  6913. kfree(dmabuf);
  6914. return -ENOMEM;
  6915. }
  6916. /*
  6917. * Initialize the bootstrap mailbox pointers now so that the register
  6918. * operations are simple later. The mailbox dma address is required
  6919. * to be 16-byte aligned. Also align the virtual memory as each
  6920. * maibox is copied into the bmbx mailbox region before issuing the
  6921. * command to the port.
  6922. */
  6923. phba->sli4_hba.bmbx.dmabuf = dmabuf;
  6924. phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
  6925. phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
  6926. LPFC_ALIGN_16_BYTE);
  6927. phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
  6928. LPFC_ALIGN_16_BYTE);
  6929. /*
  6930. * Set the high and low physical addresses now. The SLI4 alignment
  6931. * requirement is 16 bytes and the mailbox is posted to the port
  6932. * as two 30-bit addresses. The other data is a bit marking whether
  6933. * the 30-bit address is the high or low address.
  6934. * Upcast bmbx aphys to 64bits so shift instruction compiles
  6935. * clean on 32 bit machines.
  6936. */
  6937. dma_address = &phba->sli4_hba.bmbx.dma_address;
  6938. phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
  6939. pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
  6940. dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
  6941. LPFC_BMBX_BIT1_ADDR_HI);
  6942. pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
  6943. dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
  6944. LPFC_BMBX_BIT1_ADDR_LO);
  6945. return 0;
  6946. }
  6947. /**
  6948. * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
  6949. * @phba: pointer to lpfc hba data structure.
  6950. *
  6951. * This routine is invoked to teardown the bootstrap mailbox
  6952. * region and release all host resources. This routine requires
  6953. * the caller to ensure all mailbox commands recovered, no
  6954. * additional mailbox comands are sent, and interrupts are disabled
  6955. * before calling this routine.
  6956. *
  6957. **/
  6958. static void
  6959. lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
  6960. {
  6961. dma_free_coherent(&phba->pcidev->dev,
  6962. phba->sli4_hba.bmbx.bmbx_size,
  6963. phba->sli4_hba.bmbx.dmabuf->virt,
  6964. phba->sli4_hba.bmbx.dmabuf->phys);
  6965. kfree(phba->sli4_hba.bmbx.dmabuf);
  6966. memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
  6967. }
  6968. /**
  6969. * lpfc_sli4_read_config - Get the config parameters.
  6970. * @phba: pointer to lpfc hba data structure.
  6971. *
  6972. * This routine is invoked to read the configuration parameters from the HBA.
  6973. * The configuration parameters are used to set the base and maximum values
  6974. * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
  6975. * allocation for the port.
  6976. *
  6977. * Return codes
  6978. * 0 - successful
  6979. * -ENOMEM - No available memory
  6980. * -EIO - The mailbox failed to complete successfully.
  6981. **/
  6982. int
  6983. lpfc_sli4_read_config(struct lpfc_hba *phba)
  6984. {
  6985. LPFC_MBOXQ_t *pmb;
  6986. struct lpfc_mbx_read_config *rd_config;
  6987. union lpfc_sli4_cfg_shdr *shdr;
  6988. uint32_t shdr_status, shdr_add_status;
  6989. struct lpfc_mbx_get_func_cfg *get_func_cfg;
  6990. struct lpfc_rsrc_desc_fcfcoe *desc;
  6991. char *pdesc_0;
  6992. uint16_t forced_link_speed;
  6993. uint32_t if_type;
  6994. int length, i, rc = 0, rc2;
  6995. pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  6996. if (!pmb) {
  6997. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  6998. "2011 Unable to allocate memory for issuing "
  6999. "SLI_CONFIG_SPECIAL mailbox command\n");
  7000. return -ENOMEM;
  7001. }
  7002. lpfc_read_config(phba, pmb);
  7003. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  7004. if (rc != MBX_SUCCESS) {
  7005. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  7006. "2012 Mailbox failed , mbxCmd x%x "
  7007. "READ_CONFIG, mbxStatus x%x\n",
  7008. bf_get(lpfc_mqe_command, &pmb->u.mqe),
  7009. bf_get(lpfc_mqe_status, &pmb->u.mqe));
  7010. rc = -EIO;
  7011. } else {
  7012. rd_config = &pmb->u.mqe.un.rd_config;
  7013. if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
  7014. phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
  7015. phba->sli4_hba.lnk_info.lnk_tp =
  7016. bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
  7017. phba->sli4_hba.lnk_info.lnk_no =
  7018. bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
  7019. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  7020. "3081 lnk_type:%d, lnk_numb:%d\n",
  7021. phba->sli4_hba.lnk_info.lnk_tp,
  7022. phba->sli4_hba.lnk_info.lnk_no);
  7023. } else
  7024. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  7025. "3082 Mailbox (x%x) returned ldv:x0\n",
  7026. bf_get(lpfc_mqe_command, &pmb->u.mqe));
  7027. if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
  7028. phba->bbcredit_support = 1;
  7029. phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
  7030. }
  7031. phba->sli4_hba.extents_in_use =
  7032. bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
  7033. phba->sli4_hba.max_cfg_param.max_xri =
  7034. bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
  7035. phba->sli4_hba.max_cfg_param.xri_base =
  7036. bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
  7037. phba->sli4_hba.max_cfg_param.max_vpi =
  7038. bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
  7039. /* Limit the max we support */
  7040. if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
  7041. phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
  7042. phba->sli4_hba.max_cfg_param.vpi_base =
  7043. bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
  7044. phba->sli4_hba.max_cfg_param.max_rpi =
  7045. bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
  7046. phba->sli4_hba.max_cfg_param.rpi_base =
  7047. bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
  7048. phba->sli4_hba.max_cfg_param.max_vfi =
  7049. bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
  7050. phba->sli4_hba.max_cfg_param.vfi_base =
  7051. bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
  7052. phba->sli4_hba.max_cfg_param.max_fcfi =
  7053. bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
  7054. phba->sli4_hba.max_cfg_param.max_eq =
  7055. bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
  7056. phba->sli4_hba.max_cfg_param.max_rq =
  7057. bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
  7058. phba->sli4_hba.max_cfg_param.max_wq =
  7059. bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
  7060. phba->sli4_hba.max_cfg_param.max_cq =
  7061. bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
  7062. phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
  7063. phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
  7064. phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
  7065. phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
  7066. phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
  7067. (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
  7068. phba->max_vports = phba->max_vpi;
  7069. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  7070. "2003 cfg params Extents? %d "
  7071. "XRI(B:%d M:%d), "
  7072. "VPI(B:%d M:%d) "
  7073. "VFI(B:%d M:%d) "
  7074. "RPI(B:%d M:%d) "
  7075. "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
  7076. phba->sli4_hba.extents_in_use,
  7077. phba->sli4_hba.max_cfg_param.xri_base,
  7078. phba->sli4_hba.max_cfg_param.max_xri,
  7079. phba->sli4_hba.max_cfg_param.vpi_base,
  7080. phba->sli4_hba.max_cfg_param.max_vpi,
  7081. phba->sli4_hba.max_cfg_param.vfi_base,
  7082. phba->sli4_hba.max_cfg_param.max_vfi,
  7083. phba->sli4_hba.max_cfg_param.rpi_base,
  7084. phba->sli4_hba.max_cfg_param.max_rpi,
  7085. phba->sli4_hba.max_cfg_param.max_fcfi,
  7086. phba->sli4_hba.max_cfg_param.max_eq,
  7087. phba->sli4_hba.max_cfg_param.max_cq,
  7088. phba->sli4_hba.max_cfg_param.max_wq,
  7089. phba->sli4_hba.max_cfg_param.max_rq);
  7090. /*
  7091. * Calculate NVME queue resources based on how
  7092. * many WQ/CQs are available.
  7093. */
  7094. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  7095. length = phba->sli4_hba.max_cfg_param.max_wq;
  7096. if (phba->sli4_hba.max_cfg_param.max_cq <
  7097. phba->sli4_hba.max_cfg_param.max_wq)
  7098. length = phba->sli4_hba.max_cfg_param.max_cq;
  7099. /*
  7100. * Whats left after this can go toward NVME.
  7101. * The minus 6 accounts for ELS, NVME LS, MBOX
  7102. * fof plus a couple extra. When configured for
  7103. * NVMET, FCP io channel WQs are not created.
  7104. */
  7105. length -= 6;
  7106. if (!phba->nvmet_support)
  7107. length -= phba->cfg_fcp_io_channel;
  7108. if (phba->cfg_nvme_io_channel > length) {
  7109. lpfc_printf_log(
  7110. phba, KERN_ERR, LOG_SLI,
  7111. "2005 Reducing NVME IO channel to %d: "
  7112. "WQ %d CQ %d NVMEIO %d FCPIO %d\n",
  7113. length,
  7114. phba->sli4_hba.max_cfg_param.max_wq,
  7115. phba->sli4_hba.max_cfg_param.max_cq,
  7116. phba->cfg_nvme_io_channel,
  7117. phba->cfg_fcp_io_channel);
  7118. phba->cfg_nvme_io_channel = length;
  7119. }
  7120. }
  7121. }
  7122. if (rc)
  7123. goto read_cfg_out;
  7124. /* Update link speed if forced link speed is supported */
  7125. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  7126. if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
  7127. forced_link_speed =
  7128. bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
  7129. if (forced_link_speed) {
  7130. phba->hba_flag |= HBA_FORCED_LINK_SPEED;
  7131. switch (forced_link_speed) {
  7132. case LINK_SPEED_1G:
  7133. phba->cfg_link_speed =
  7134. LPFC_USER_LINK_SPEED_1G;
  7135. break;
  7136. case LINK_SPEED_2G:
  7137. phba->cfg_link_speed =
  7138. LPFC_USER_LINK_SPEED_2G;
  7139. break;
  7140. case LINK_SPEED_4G:
  7141. phba->cfg_link_speed =
  7142. LPFC_USER_LINK_SPEED_4G;
  7143. break;
  7144. case LINK_SPEED_8G:
  7145. phba->cfg_link_speed =
  7146. LPFC_USER_LINK_SPEED_8G;
  7147. break;
  7148. case LINK_SPEED_10G:
  7149. phba->cfg_link_speed =
  7150. LPFC_USER_LINK_SPEED_10G;
  7151. break;
  7152. case LINK_SPEED_16G:
  7153. phba->cfg_link_speed =
  7154. LPFC_USER_LINK_SPEED_16G;
  7155. break;
  7156. case LINK_SPEED_32G:
  7157. phba->cfg_link_speed =
  7158. LPFC_USER_LINK_SPEED_32G;
  7159. break;
  7160. case LINK_SPEED_64G:
  7161. phba->cfg_link_speed =
  7162. LPFC_USER_LINK_SPEED_64G;
  7163. break;
  7164. case 0xffff:
  7165. phba->cfg_link_speed =
  7166. LPFC_USER_LINK_SPEED_AUTO;
  7167. break;
  7168. default:
  7169. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  7170. "0047 Unrecognized link "
  7171. "speed : %d\n",
  7172. forced_link_speed);
  7173. phba->cfg_link_speed =
  7174. LPFC_USER_LINK_SPEED_AUTO;
  7175. }
  7176. }
  7177. }
  7178. /* Reset the DFT_HBA_Q_DEPTH to the max xri */
  7179. length = phba->sli4_hba.max_cfg_param.max_xri -
  7180. lpfc_sli4_get_els_iocb_cnt(phba);
  7181. if (phba->cfg_hba_queue_depth > length) {
  7182. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  7183. "3361 HBA queue depth changed from %d to %d\n",
  7184. phba->cfg_hba_queue_depth, length);
  7185. phba->cfg_hba_queue_depth = length;
  7186. }
  7187. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
  7188. LPFC_SLI_INTF_IF_TYPE_2)
  7189. goto read_cfg_out;
  7190. /* get the pf# and vf# for SLI4 if_type 2 port */
  7191. length = (sizeof(struct lpfc_mbx_get_func_cfg) -
  7192. sizeof(struct lpfc_sli4_cfg_mhdr));
  7193. lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
  7194. LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
  7195. length, LPFC_SLI4_MBX_EMBED);
  7196. rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  7197. shdr = (union lpfc_sli4_cfg_shdr *)
  7198. &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
  7199. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  7200. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  7201. if (rc2 || shdr_status || shdr_add_status) {
  7202. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  7203. "3026 Mailbox failed , mbxCmd x%x "
  7204. "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
  7205. bf_get(lpfc_mqe_command, &pmb->u.mqe),
  7206. bf_get(lpfc_mqe_status, &pmb->u.mqe));
  7207. goto read_cfg_out;
  7208. }
  7209. /* search for fc_fcoe resrouce descriptor */
  7210. get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
  7211. pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
  7212. desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
  7213. length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
  7214. if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
  7215. length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
  7216. else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
  7217. goto read_cfg_out;
  7218. for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
  7219. desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
  7220. if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
  7221. bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
  7222. phba->sli4_hba.iov.pf_number =
  7223. bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
  7224. phba->sli4_hba.iov.vf_number =
  7225. bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
  7226. break;
  7227. }
  7228. }
  7229. if (i < LPFC_RSRC_DESC_MAX_NUM)
  7230. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  7231. "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
  7232. "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
  7233. phba->sli4_hba.iov.vf_number);
  7234. else
  7235. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  7236. "3028 GET_FUNCTION_CONFIG: failed to find "
  7237. "Resrouce Descriptor:x%x\n",
  7238. LPFC_RSRC_DESC_TYPE_FCFCOE);
  7239. read_cfg_out:
  7240. mempool_free(pmb, phba->mbox_mem_pool);
  7241. return rc;
  7242. }
  7243. /**
  7244. * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
  7245. * @phba: pointer to lpfc hba data structure.
  7246. *
  7247. * This routine is invoked to setup the port-side endian order when
  7248. * the port if_type is 0. This routine has no function for other
  7249. * if_types.
  7250. *
  7251. * Return codes
  7252. * 0 - successful
  7253. * -ENOMEM - No available memory
  7254. * -EIO - The mailbox failed to complete successfully.
  7255. **/
  7256. static int
  7257. lpfc_setup_endian_order(struct lpfc_hba *phba)
  7258. {
  7259. LPFC_MBOXQ_t *mboxq;
  7260. uint32_t if_type, rc = 0;
  7261. uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
  7262. HOST_ENDIAN_HIGH_WORD1};
  7263. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  7264. switch (if_type) {
  7265. case LPFC_SLI_INTF_IF_TYPE_0:
  7266. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  7267. GFP_KERNEL);
  7268. if (!mboxq) {
  7269. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7270. "0492 Unable to allocate memory for "
  7271. "issuing SLI_CONFIG_SPECIAL mailbox "
  7272. "command\n");
  7273. return -ENOMEM;
  7274. }
  7275. /*
  7276. * The SLI4_CONFIG_SPECIAL mailbox command requires the first
  7277. * two words to contain special data values and no other data.
  7278. */
  7279. memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
  7280. memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
  7281. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7282. if (rc != MBX_SUCCESS) {
  7283. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7284. "0493 SLI_CONFIG_SPECIAL mailbox "
  7285. "failed with status x%x\n",
  7286. rc);
  7287. rc = -EIO;
  7288. }
  7289. mempool_free(mboxq, phba->mbox_mem_pool);
  7290. break;
  7291. case LPFC_SLI_INTF_IF_TYPE_6:
  7292. case LPFC_SLI_INTF_IF_TYPE_2:
  7293. case LPFC_SLI_INTF_IF_TYPE_1:
  7294. default:
  7295. break;
  7296. }
  7297. return rc;
  7298. }
  7299. /**
  7300. * lpfc_sli4_queue_verify - Verify and update EQ counts
  7301. * @phba: pointer to lpfc hba data structure.
  7302. *
  7303. * This routine is invoked to check the user settable queue counts for EQs.
  7304. * After this routine is called the counts will be set to valid values that
  7305. * adhere to the constraints of the system's interrupt vectors and the port's
  7306. * queue resources.
  7307. *
  7308. * Return codes
  7309. * 0 - successful
  7310. * -ENOMEM - No available memory
  7311. **/
  7312. static int
  7313. lpfc_sli4_queue_verify(struct lpfc_hba *phba)
  7314. {
  7315. int io_channel;
  7316. int fof_vectors = phba->cfg_fof ? 1 : 0;
  7317. /*
  7318. * Sanity check for configured queue parameters against the run-time
  7319. * device parameters
  7320. */
  7321. /* Sanity check on HBA EQ parameters */
  7322. io_channel = phba->io_channel_irqs;
  7323. if (phba->sli4_hba.num_online_cpu < io_channel) {
  7324. lpfc_printf_log(phba,
  7325. KERN_ERR, LOG_INIT,
  7326. "3188 Reducing IO channels to match number of "
  7327. "online CPUs: from %d to %d\n",
  7328. io_channel, phba->sli4_hba.num_online_cpu);
  7329. io_channel = phba->sli4_hba.num_online_cpu;
  7330. }
  7331. if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
  7332. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7333. "2575 Reducing IO channels to match number of "
  7334. "available EQs: from %d to %d\n",
  7335. io_channel,
  7336. phba->sli4_hba.max_cfg_param.max_eq);
  7337. io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
  7338. }
  7339. /* The actual number of FCP / NVME event queues adopted */
  7340. if (io_channel != phba->io_channel_irqs)
  7341. phba->io_channel_irqs = io_channel;
  7342. if (phba->cfg_fcp_io_channel > io_channel)
  7343. phba->cfg_fcp_io_channel = io_channel;
  7344. if (phba->cfg_nvme_io_channel > io_channel)
  7345. phba->cfg_nvme_io_channel = io_channel;
  7346. if (phba->nvmet_support) {
  7347. if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
  7348. phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
  7349. }
  7350. if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
  7351. phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
  7352. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7353. "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
  7354. phba->io_channel_irqs, phba->cfg_fcp_io_channel,
  7355. phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
  7356. /* Get EQ depth from module parameter, fake the default for now */
  7357. phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
  7358. phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
  7359. /* Get CQ depth from module parameter, fake the default for now */
  7360. phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
  7361. phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
  7362. return 0;
  7363. }
  7364. static int
  7365. lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
  7366. {
  7367. struct lpfc_queue *qdesc;
  7368. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
  7369. phba->sli4_hba.cq_esize,
  7370. LPFC_CQE_EXP_COUNT);
  7371. if (!qdesc) {
  7372. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7373. "0508 Failed allocate fast-path NVME CQ (%d)\n",
  7374. wqidx);
  7375. return 1;
  7376. }
  7377. qdesc->qe_valid = 1;
  7378. phba->sli4_hba.nvme_cq[wqidx] = qdesc;
  7379. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
  7380. LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
  7381. if (!qdesc) {
  7382. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7383. "0509 Failed allocate fast-path NVME WQ (%d)\n",
  7384. wqidx);
  7385. return 1;
  7386. }
  7387. phba->sli4_hba.nvme_wq[wqidx] = qdesc;
  7388. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  7389. return 0;
  7390. }
  7391. static int
  7392. lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
  7393. {
  7394. struct lpfc_queue *qdesc;
  7395. uint32_t wqesize;
  7396. /* Create Fast Path FCP CQs */
  7397. if (phba->enab_exp_wqcq_pages)
  7398. /* Increase the CQ size when WQEs contain an embedded cdb */
  7399. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
  7400. phba->sli4_hba.cq_esize,
  7401. LPFC_CQE_EXP_COUNT);
  7402. else
  7403. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
  7404. phba->sli4_hba.cq_esize,
  7405. phba->sli4_hba.cq_ecount);
  7406. if (!qdesc) {
  7407. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7408. "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
  7409. return 1;
  7410. }
  7411. qdesc->qe_valid = 1;
  7412. phba->sli4_hba.fcp_cq[wqidx] = qdesc;
  7413. /* Create Fast Path FCP WQs */
  7414. if (phba->enab_exp_wqcq_pages) {
  7415. /* Increase the WQ size when WQEs contain an embedded cdb */
  7416. wqesize = (phba->fcp_embed_io) ?
  7417. LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
  7418. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
  7419. wqesize,
  7420. LPFC_WQE_EXP_COUNT);
  7421. } else
  7422. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
  7423. phba->sli4_hba.wq_esize,
  7424. phba->sli4_hba.wq_ecount);
  7425. if (!qdesc) {
  7426. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7427. "0503 Failed allocate fast-path FCP WQ (%d)\n",
  7428. wqidx);
  7429. return 1;
  7430. }
  7431. phba->sli4_hba.fcp_wq[wqidx] = qdesc;
  7432. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  7433. return 0;
  7434. }
  7435. /**
  7436. * lpfc_sli4_queue_create - Create all the SLI4 queues
  7437. * @phba: pointer to lpfc hba data structure.
  7438. *
  7439. * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
  7440. * operation. For each SLI4 queue type, the parameters such as queue entry
  7441. * count (queue depth) shall be taken from the module parameter. For now,
  7442. * we just use some constant number as place holder.
  7443. *
  7444. * Return codes
  7445. * 0 - successful
  7446. * -ENOMEM - No availble memory
  7447. * -EIO - The mailbox failed to complete successfully.
  7448. **/
  7449. int
  7450. lpfc_sli4_queue_create(struct lpfc_hba *phba)
  7451. {
  7452. struct lpfc_queue *qdesc;
  7453. int idx, io_channel;
  7454. /*
  7455. * Create HBA Record arrays.
  7456. * Both NVME and FCP will share that same vectors / EQs
  7457. */
  7458. io_channel = phba->io_channel_irqs;
  7459. if (!io_channel)
  7460. return -ERANGE;
  7461. phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
  7462. phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
  7463. phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
  7464. phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
  7465. phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
  7466. phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
  7467. phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
  7468. phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
  7469. phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
  7470. phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
  7471. phba->sli4_hba.hba_eq = kcalloc(io_channel,
  7472. sizeof(struct lpfc_queue *),
  7473. GFP_KERNEL);
  7474. if (!phba->sli4_hba.hba_eq) {
  7475. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7476. "2576 Failed allocate memory for "
  7477. "fast-path EQ record array\n");
  7478. goto out_error;
  7479. }
  7480. if (phba->cfg_fcp_io_channel) {
  7481. phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
  7482. sizeof(struct lpfc_queue *),
  7483. GFP_KERNEL);
  7484. if (!phba->sli4_hba.fcp_cq) {
  7485. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7486. "2577 Failed allocate memory for "
  7487. "fast-path CQ record array\n");
  7488. goto out_error;
  7489. }
  7490. phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
  7491. sizeof(struct lpfc_queue *),
  7492. GFP_KERNEL);
  7493. if (!phba->sli4_hba.fcp_wq) {
  7494. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7495. "2578 Failed allocate memory for "
  7496. "fast-path FCP WQ record array\n");
  7497. goto out_error;
  7498. }
  7499. /*
  7500. * Since the first EQ can have multiple CQs associated with it,
  7501. * this array is used to quickly see if we have a FCP fast-path
  7502. * CQ match.
  7503. */
  7504. phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
  7505. sizeof(uint16_t),
  7506. GFP_KERNEL);
  7507. if (!phba->sli4_hba.fcp_cq_map) {
  7508. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7509. "2545 Failed allocate memory for "
  7510. "fast-path CQ map\n");
  7511. goto out_error;
  7512. }
  7513. }
  7514. if (phba->cfg_nvme_io_channel) {
  7515. phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
  7516. sizeof(struct lpfc_queue *),
  7517. GFP_KERNEL);
  7518. if (!phba->sli4_hba.nvme_cq) {
  7519. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7520. "6077 Failed allocate memory for "
  7521. "fast-path CQ record array\n");
  7522. goto out_error;
  7523. }
  7524. phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
  7525. sizeof(struct lpfc_queue *),
  7526. GFP_KERNEL);
  7527. if (!phba->sli4_hba.nvme_wq) {
  7528. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7529. "2581 Failed allocate memory for "
  7530. "fast-path NVME WQ record array\n");
  7531. goto out_error;
  7532. }
  7533. /*
  7534. * Since the first EQ can have multiple CQs associated with it,
  7535. * this array is used to quickly see if we have a NVME fast-path
  7536. * CQ match.
  7537. */
  7538. phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
  7539. sizeof(uint16_t),
  7540. GFP_KERNEL);
  7541. if (!phba->sli4_hba.nvme_cq_map) {
  7542. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7543. "6078 Failed allocate memory for "
  7544. "fast-path CQ map\n");
  7545. goto out_error;
  7546. }
  7547. if (phba->nvmet_support) {
  7548. phba->sli4_hba.nvmet_cqset = kcalloc(
  7549. phba->cfg_nvmet_mrq,
  7550. sizeof(struct lpfc_queue *),
  7551. GFP_KERNEL);
  7552. if (!phba->sli4_hba.nvmet_cqset) {
  7553. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7554. "3121 Fail allocate memory for "
  7555. "fast-path CQ set array\n");
  7556. goto out_error;
  7557. }
  7558. phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
  7559. phba->cfg_nvmet_mrq,
  7560. sizeof(struct lpfc_queue *),
  7561. GFP_KERNEL);
  7562. if (!phba->sli4_hba.nvmet_mrq_hdr) {
  7563. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7564. "3122 Fail allocate memory for "
  7565. "fast-path RQ set hdr array\n");
  7566. goto out_error;
  7567. }
  7568. phba->sli4_hba.nvmet_mrq_data = kcalloc(
  7569. phba->cfg_nvmet_mrq,
  7570. sizeof(struct lpfc_queue *),
  7571. GFP_KERNEL);
  7572. if (!phba->sli4_hba.nvmet_mrq_data) {
  7573. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7574. "3124 Fail allocate memory for "
  7575. "fast-path RQ set data array\n");
  7576. goto out_error;
  7577. }
  7578. }
  7579. }
  7580. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
  7581. /* Create HBA Event Queues (EQs) */
  7582. for (idx = 0; idx < io_channel; idx++) {
  7583. /* Create EQs */
  7584. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
  7585. phba->sli4_hba.eq_esize,
  7586. phba->sli4_hba.eq_ecount);
  7587. if (!qdesc) {
  7588. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7589. "0497 Failed allocate EQ (%d)\n", idx);
  7590. goto out_error;
  7591. }
  7592. qdesc->qe_valid = 1;
  7593. phba->sli4_hba.hba_eq[idx] = qdesc;
  7594. }
  7595. /* FCP and NVME io channels are not required to be balanced */
  7596. for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
  7597. if (lpfc_alloc_fcp_wq_cq(phba, idx))
  7598. goto out_error;
  7599. for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
  7600. if (lpfc_alloc_nvme_wq_cq(phba, idx))
  7601. goto out_error;
  7602. if (phba->nvmet_support) {
  7603. for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
  7604. qdesc = lpfc_sli4_queue_alloc(phba,
  7605. LPFC_DEFAULT_PAGE_SIZE,
  7606. phba->sli4_hba.cq_esize,
  7607. phba->sli4_hba.cq_ecount);
  7608. if (!qdesc) {
  7609. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7610. "3142 Failed allocate NVME "
  7611. "CQ Set (%d)\n", idx);
  7612. goto out_error;
  7613. }
  7614. qdesc->qe_valid = 1;
  7615. phba->sli4_hba.nvmet_cqset[idx] = qdesc;
  7616. }
  7617. }
  7618. /*
  7619. * Create Slow Path Completion Queues (CQs)
  7620. */
  7621. /* Create slow-path Mailbox Command Complete Queue */
  7622. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
  7623. phba->sli4_hba.cq_esize,
  7624. phba->sli4_hba.cq_ecount);
  7625. if (!qdesc) {
  7626. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7627. "0500 Failed allocate slow-path mailbox CQ\n");
  7628. goto out_error;
  7629. }
  7630. qdesc->qe_valid = 1;
  7631. phba->sli4_hba.mbx_cq = qdesc;
  7632. /* Create slow-path ELS Complete Queue */
  7633. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
  7634. phba->sli4_hba.cq_esize,
  7635. phba->sli4_hba.cq_ecount);
  7636. if (!qdesc) {
  7637. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7638. "0501 Failed allocate slow-path ELS CQ\n");
  7639. goto out_error;
  7640. }
  7641. qdesc->qe_valid = 1;
  7642. phba->sli4_hba.els_cq = qdesc;
  7643. /*
  7644. * Create Slow Path Work Queues (WQs)
  7645. */
  7646. /* Create Mailbox Command Queue */
  7647. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
  7648. phba->sli4_hba.mq_esize,
  7649. phba->sli4_hba.mq_ecount);
  7650. if (!qdesc) {
  7651. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7652. "0505 Failed allocate slow-path MQ\n");
  7653. goto out_error;
  7654. }
  7655. phba->sli4_hba.mbx_wq = qdesc;
  7656. /*
  7657. * Create ELS Work Queues
  7658. */
  7659. /* Create slow-path ELS Work Queue */
  7660. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
  7661. phba->sli4_hba.wq_esize,
  7662. phba->sli4_hba.wq_ecount);
  7663. if (!qdesc) {
  7664. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7665. "0504 Failed allocate slow-path ELS WQ\n");
  7666. goto out_error;
  7667. }
  7668. phba->sli4_hba.els_wq = qdesc;
  7669. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  7670. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  7671. /* Create NVME LS Complete Queue */
  7672. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
  7673. phba->sli4_hba.cq_esize,
  7674. phba->sli4_hba.cq_ecount);
  7675. if (!qdesc) {
  7676. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7677. "6079 Failed allocate NVME LS CQ\n");
  7678. goto out_error;
  7679. }
  7680. qdesc->qe_valid = 1;
  7681. phba->sli4_hba.nvmels_cq = qdesc;
  7682. /* Create NVME LS Work Queue */
  7683. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
  7684. phba->sli4_hba.wq_esize,
  7685. phba->sli4_hba.wq_ecount);
  7686. if (!qdesc) {
  7687. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7688. "6080 Failed allocate NVME LS WQ\n");
  7689. goto out_error;
  7690. }
  7691. phba->sli4_hba.nvmels_wq = qdesc;
  7692. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  7693. }
  7694. /*
  7695. * Create Receive Queue (RQ)
  7696. */
  7697. /* Create Receive Queue for header */
  7698. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
  7699. phba->sli4_hba.rq_esize,
  7700. phba->sli4_hba.rq_ecount);
  7701. if (!qdesc) {
  7702. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7703. "0506 Failed allocate receive HRQ\n");
  7704. goto out_error;
  7705. }
  7706. phba->sli4_hba.hdr_rq = qdesc;
  7707. /* Create Receive Queue for data */
  7708. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
  7709. phba->sli4_hba.rq_esize,
  7710. phba->sli4_hba.rq_ecount);
  7711. if (!qdesc) {
  7712. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7713. "0507 Failed allocate receive DRQ\n");
  7714. goto out_error;
  7715. }
  7716. phba->sli4_hba.dat_rq = qdesc;
  7717. if (phba->nvmet_support) {
  7718. for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
  7719. /* Create NVMET Receive Queue for header */
  7720. qdesc = lpfc_sli4_queue_alloc(phba,
  7721. LPFC_DEFAULT_PAGE_SIZE,
  7722. phba->sli4_hba.rq_esize,
  7723. LPFC_NVMET_RQE_DEF_COUNT);
  7724. if (!qdesc) {
  7725. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7726. "3146 Failed allocate "
  7727. "receive HRQ\n");
  7728. goto out_error;
  7729. }
  7730. phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
  7731. /* Only needed for header of RQ pair */
  7732. qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
  7733. GFP_KERNEL);
  7734. if (qdesc->rqbp == NULL) {
  7735. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7736. "6131 Failed allocate "
  7737. "Header RQBP\n");
  7738. goto out_error;
  7739. }
  7740. /* Put list in known state in case driver load fails. */
  7741. INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
  7742. /* Create NVMET Receive Queue for data */
  7743. qdesc = lpfc_sli4_queue_alloc(phba,
  7744. LPFC_DEFAULT_PAGE_SIZE,
  7745. phba->sli4_hba.rq_esize,
  7746. LPFC_NVMET_RQE_DEF_COUNT);
  7747. if (!qdesc) {
  7748. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7749. "3156 Failed allocate "
  7750. "receive DRQ\n");
  7751. goto out_error;
  7752. }
  7753. phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
  7754. }
  7755. }
  7756. /* Create the Queues needed for Flash Optimized Fabric operations */
  7757. if (phba->cfg_fof)
  7758. lpfc_fof_queue_create(phba);
  7759. return 0;
  7760. out_error:
  7761. lpfc_sli4_queue_destroy(phba);
  7762. return -ENOMEM;
  7763. }
  7764. static inline void
  7765. __lpfc_sli4_release_queue(struct lpfc_queue **qp)
  7766. {
  7767. if (*qp != NULL) {
  7768. lpfc_sli4_queue_free(*qp);
  7769. *qp = NULL;
  7770. }
  7771. }
  7772. static inline void
  7773. lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
  7774. {
  7775. int idx;
  7776. if (*qs == NULL)
  7777. return;
  7778. for (idx = 0; idx < max; idx++)
  7779. __lpfc_sli4_release_queue(&(*qs)[idx]);
  7780. kfree(*qs);
  7781. *qs = NULL;
  7782. }
  7783. static inline void
  7784. lpfc_sli4_release_queue_map(uint16_t **qmap)
  7785. {
  7786. if (*qmap != NULL) {
  7787. kfree(*qmap);
  7788. *qmap = NULL;
  7789. }
  7790. }
  7791. /**
  7792. * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
  7793. * @phba: pointer to lpfc hba data structure.
  7794. *
  7795. * This routine is invoked to release all the SLI4 queues with the FCoE HBA
  7796. * operation.
  7797. *
  7798. * Return codes
  7799. * 0 - successful
  7800. * -ENOMEM - No available memory
  7801. * -EIO - The mailbox failed to complete successfully.
  7802. **/
  7803. void
  7804. lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
  7805. {
  7806. if (phba->cfg_fof)
  7807. lpfc_fof_queue_destroy(phba);
  7808. /* Release HBA eqs */
  7809. lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
  7810. /* Release FCP cqs */
  7811. lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
  7812. phba->cfg_fcp_io_channel);
  7813. /* Release FCP wqs */
  7814. lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
  7815. phba->cfg_fcp_io_channel);
  7816. /* Release FCP CQ mapping array */
  7817. lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
  7818. /* Release NVME cqs */
  7819. lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
  7820. phba->cfg_nvme_io_channel);
  7821. /* Release NVME wqs */
  7822. lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
  7823. phba->cfg_nvme_io_channel);
  7824. /* Release NVME CQ mapping array */
  7825. lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
  7826. if (phba->nvmet_support) {
  7827. lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
  7828. phba->cfg_nvmet_mrq);
  7829. lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
  7830. phba->cfg_nvmet_mrq);
  7831. lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
  7832. phba->cfg_nvmet_mrq);
  7833. }
  7834. /* Release mailbox command work queue */
  7835. __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
  7836. /* Release ELS work queue */
  7837. __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
  7838. /* Release ELS work queue */
  7839. __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
  7840. /* Release unsolicited receive queue */
  7841. __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
  7842. __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
  7843. /* Release ELS complete queue */
  7844. __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
  7845. /* Release NVME LS complete queue */
  7846. __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
  7847. /* Release mailbox command complete queue */
  7848. __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
  7849. /* Everything on this list has been freed */
  7850. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
  7851. }
  7852. int
  7853. lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
  7854. {
  7855. struct lpfc_rqb *rqbp;
  7856. struct lpfc_dmabuf *h_buf;
  7857. struct rqb_dmabuf *rqb_buffer;
  7858. rqbp = rq->rqbp;
  7859. while (!list_empty(&rqbp->rqb_buffer_list)) {
  7860. list_remove_head(&rqbp->rqb_buffer_list, h_buf,
  7861. struct lpfc_dmabuf, list);
  7862. rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
  7863. (rqbp->rqb_free_buffer)(phba, rqb_buffer);
  7864. rqbp->buffer_count--;
  7865. }
  7866. return 1;
  7867. }
  7868. static int
  7869. lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
  7870. struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
  7871. int qidx, uint32_t qtype)
  7872. {
  7873. struct lpfc_sli_ring *pring;
  7874. int rc;
  7875. if (!eq || !cq || !wq) {
  7876. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7877. "6085 Fast-path %s (%d) not allocated\n",
  7878. ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
  7879. return -ENOMEM;
  7880. }
  7881. /* create the Cq first */
  7882. rc = lpfc_cq_create(phba, cq, eq,
  7883. (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
  7884. if (rc) {
  7885. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7886. "6086 Failed setup of CQ (%d), rc = 0x%x\n",
  7887. qidx, (uint32_t)rc);
  7888. return rc;
  7889. }
  7890. cq->chann = qidx;
  7891. if (qtype != LPFC_MBOX) {
  7892. /* Setup nvme_cq_map for fast lookup */
  7893. if (cq_map)
  7894. *cq_map = cq->queue_id;
  7895. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7896. "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
  7897. qidx, cq->queue_id, qidx, eq->queue_id);
  7898. /* create the wq */
  7899. rc = lpfc_wq_create(phba, wq, cq, qtype);
  7900. if (rc) {
  7901. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7902. "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
  7903. qidx, (uint32_t)rc);
  7904. /* no need to tear down cq - caller will do so */
  7905. return rc;
  7906. }
  7907. wq->chann = qidx;
  7908. /* Bind this CQ/WQ to the NVME ring */
  7909. pring = wq->pring;
  7910. pring->sli.sli4.wqp = (void *)wq;
  7911. cq->pring = pring;
  7912. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7913. "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
  7914. qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
  7915. } else {
  7916. rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
  7917. if (rc) {
  7918. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7919. "0539 Failed setup of slow-path MQ: "
  7920. "rc = 0x%x\n", rc);
  7921. /* no need to tear down cq - caller will do so */
  7922. return rc;
  7923. }
  7924. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7925. "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
  7926. phba->sli4_hba.mbx_wq->queue_id,
  7927. phba->sli4_hba.mbx_cq->queue_id);
  7928. }
  7929. return 0;
  7930. }
  7931. /**
  7932. * lpfc_sli4_queue_setup - Set up all the SLI4 queues
  7933. * @phba: pointer to lpfc hba data structure.
  7934. *
  7935. * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
  7936. * operation.
  7937. *
  7938. * Return codes
  7939. * 0 - successful
  7940. * -ENOMEM - No available memory
  7941. * -EIO - The mailbox failed to complete successfully.
  7942. **/
  7943. int
  7944. lpfc_sli4_queue_setup(struct lpfc_hba *phba)
  7945. {
  7946. uint32_t shdr_status, shdr_add_status;
  7947. union lpfc_sli4_cfg_shdr *shdr;
  7948. LPFC_MBOXQ_t *mboxq;
  7949. int qidx;
  7950. uint32_t length, io_channel;
  7951. int rc = -ENOMEM;
  7952. /* Check for dual-ULP support */
  7953. mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  7954. if (!mboxq) {
  7955. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7956. "3249 Unable to allocate memory for "
  7957. "QUERY_FW_CFG mailbox command\n");
  7958. return -ENOMEM;
  7959. }
  7960. length = (sizeof(struct lpfc_mbx_query_fw_config) -
  7961. sizeof(struct lpfc_sli4_cfg_mhdr));
  7962. lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
  7963. LPFC_MBOX_OPCODE_QUERY_FW_CFG,
  7964. length, LPFC_SLI4_MBX_EMBED);
  7965. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7966. shdr = (union lpfc_sli4_cfg_shdr *)
  7967. &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
  7968. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  7969. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  7970. if (shdr_status || shdr_add_status || rc) {
  7971. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7972. "3250 QUERY_FW_CFG mailbox failed with status "
  7973. "x%x add_status x%x, mbx status x%x\n",
  7974. shdr_status, shdr_add_status, rc);
  7975. if (rc != MBX_TIMEOUT)
  7976. mempool_free(mboxq, phba->mbox_mem_pool);
  7977. rc = -ENXIO;
  7978. goto out_error;
  7979. }
  7980. phba->sli4_hba.fw_func_mode =
  7981. mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
  7982. phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
  7983. phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
  7984. phba->sli4_hba.physical_port =
  7985. mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
  7986. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7987. "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
  7988. "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
  7989. phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
  7990. if (rc != MBX_TIMEOUT)
  7991. mempool_free(mboxq, phba->mbox_mem_pool);
  7992. /*
  7993. * Set up HBA Event Queues (EQs)
  7994. */
  7995. io_channel = phba->io_channel_irqs;
  7996. /* Set up HBA event queue */
  7997. if (io_channel && !phba->sli4_hba.hba_eq) {
  7998. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7999. "3147 Fast-path EQs not allocated\n");
  8000. rc = -ENOMEM;
  8001. goto out_error;
  8002. }
  8003. for (qidx = 0; qidx < io_channel; qidx++) {
  8004. if (!phba->sli4_hba.hba_eq[qidx]) {
  8005. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8006. "0522 Fast-path EQ (%d) not "
  8007. "allocated\n", qidx);
  8008. rc = -ENOMEM;
  8009. goto out_destroy;
  8010. }
  8011. rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
  8012. phba->cfg_fcp_imax);
  8013. if (rc) {
  8014. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8015. "0523 Failed setup of fast-path EQ "
  8016. "(%d), rc = 0x%x\n", qidx,
  8017. (uint32_t)rc);
  8018. goto out_destroy;
  8019. }
  8020. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8021. "2584 HBA EQ setup: queue[%d]-id=%d\n",
  8022. qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
  8023. }
  8024. if (phba->cfg_nvme_io_channel) {
  8025. if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
  8026. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8027. "6084 Fast-path NVME %s array not allocated\n",
  8028. (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
  8029. rc = -ENOMEM;
  8030. goto out_destroy;
  8031. }
  8032. for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
  8033. rc = lpfc_create_wq_cq(phba,
  8034. phba->sli4_hba.hba_eq[
  8035. qidx % io_channel],
  8036. phba->sli4_hba.nvme_cq[qidx],
  8037. phba->sli4_hba.nvme_wq[qidx],
  8038. &phba->sli4_hba.nvme_cq_map[qidx],
  8039. qidx, LPFC_NVME);
  8040. if (rc) {
  8041. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8042. "6123 Failed to setup fastpath "
  8043. "NVME WQ/CQ (%d), rc = 0x%x\n",
  8044. qidx, (uint32_t)rc);
  8045. goto out_destroy;
  8046. }
  8047. }
  8048. }
  8049. if (phba->cfg_fcp_io_channel) {
  8050. /* Set up fast-path FCP Response Complete Queue */
  8051. if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
  8052. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8053. "3148 Fast-path FCP %s array not allocated\n",
  8054. phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
  8055. rc = -ENOMEM;
  8056. goto out_destroy;
  8057. }
  8058. for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
  8059. rc = lpfc_create_wq_cq(phba,
  8060. phba->sli4_hba.hba_eq[
  8061. qidx % io_channel],
  8062. phba->sli4_hba.fcp_cq[qidx],
  8063. phba->sli4_hba.fcp_wq[qidx],
  8064. &phba->sli4_hba.fcp_cq_map[qidx],
  8065. qidx, LPFC_FCP);
  8066. if (rc) {
  8067. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8068. "0535 Failed to setup fastpath "
  8069. "FCP WQ/CQ (%d), rc = 0x%x\n",
  8070. qidx, (uint32_t)rc);
  8071. goto out_destroy;
  8072. }
  8073. }
  8074. }
  8075. /*
  8076. * Set up Slow Path Complete Queues (CQs)
  8077. */
  8078. /* Set up slow-path MBOX CQ/MQ */
  8079. if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
  8080. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8081. "0528 %s not allocated\n",
  8082. phba->sli4_hba.mbx_cq ?
  8083. "Mailbox WQ" : "Mailbox CQ");
  8084. rc = -ENOMEM;
  8085. goto out_destroy;
  8086. }
  8087. rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
  8088. phba->sli4_hba.mbx_cq,
  8089. phba->sli4_hba.mbx_wq,
  8090. NULL, 0, LPFC_MBOX);
  8091. if (rc) {
  8092. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8093. "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
  8094. (uint32_t)rc);
  8095. goto out_destroy;
  8096. }
  8097. if (phba->nvmet_support) {
  8098. if (!phba->sli4_hba.nvmet_cqset) {
  8099. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8100. "3165 Fast-path NVME CQ Set "
  8101. "array not allocated\n");
  8102. rc = -ENOMEM;
  8103. goto out_destroy;
  8104. }
  8105. if (phba->cfg_nvmet_mrq > 1) {
  8106. rc = lpfc_cq_create_set(phba,
  8107. phba->sli4_hba.nvmet_cqset,
  8108. phba->sli4_hba.hba_eq,
  8109. LPFC_WCQ, LPFC_NVMET);
  8110. if (rc) {
  8111. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8112. "3164 Failed setup of NVME CQ "
  8113. "Set, rc = 0x%x\n",
  8114. (uint32_t)rc);
  8115. goto out_destroy;
  8116. }
  8117. } else {
  8118. /* Set up NVMET Receive Complete Queue */
  8119. rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
  8120. phba->sli4_hba.hba_eq[0],
  8121. LPFC_WCQ, LPFC_NVMET);
  8122. if (rc) {
  8123. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8124. "6089 Failed setup NVMET CQ: "
  8125. "rc = 0x%x\n", (uint32_t)rc);
  8126. goto out_destroy;
  8127. }
  8128. phba->sli4_hba.nvmet_cqset[0]->chann = 0;
  8129. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8130. "6090 NVMET CQ setup: cq-id=%d, "
  8131. "parent eq-id=%d\n",
  8132. phba->sli4_hba.nvmet_cqset[0]->queue_id,
  8133. phba->sli4_hba.hba_eq[0]->queue_id);
  8134. }
  8135. }
  8136. /* Set up slow-path ELS WQ/CQ */
  8137. if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
  8138. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8139. "0530 ELS %s not allocated\n",
  8140. phba->sli4_hba.els_cq ? "WQ" : "CQ");
  8141. rc = -ENOMEM;
  8142. goto out_destroy;
  8143. }
  8144. rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
  8145. phba->sli4_hba.els_cq,
  8146. phba->sli4_hba.els_wq,
  8147. NULL, 0, LPFC_ELS);
  8148. if (rc) {
  8149. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8150. "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
  8151. (uint32_t)rc);
  8152. goto out_destroy;
  8153. }
  8154. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8155. "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
  8156. phba->sli4_hba.els_wq->queue_id,
  8157. phba->sli4_hba.els_cq->queue_id);
  8158. if (phba->cfg_nvme_io_channel) {
  8159. /* Set up NVME LS Complete Queue */
  8160. if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
  8161. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8162. "6091 LS %s not allocated\n",
  8163. phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
  8164. rc = -ENOMEM;
  8165. goto out_destroy;
  8166. }
  8167. rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
  8168. phba->sli4_hba.nvmels_cq,
  8169. phba->sli4_hba.nvmels_wq,
  8170. NULL, 0, LPFC_NVME_LS);
  8171. if (rc) {
  8172. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8173. "0529 Failed setup of NVVME LS WQ/CQ: "
  8174. "rc = 0x%x\n", (uint32_t)rc);
  8175. goto out_destroy;
  8176. }
  8177. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8178. "6096 ELS WQ setup: wq-id=%d, "
  8179. "parent cq-id=%d\n",
  8180. phba->sli4_hba.nvmels_wq->queue_id,
  8181. phba->sli4_hba.nvmels_cq->queue_id);
  8182. }
  8183. /*
  8184. * Create NVMET Receive Queue (RQ)
  8185. */
  8186. if (phba->nvmet_support) {
  8187. if ((!phba->sli4_hba.nvmet_cqset) ||
  8188. (!phba->sli4_hba.nvmet_mrq_hdr) ||
  8189. (!phba->sli4_hba.nvmet_mrq_data)) {
  8190. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8191. "6130 MRQ CQ Queues not "
  8192. "allocated\n");
  8193. rc = -ENOMEM;
  8194. goto out_destroy;
  8195. }
  8196. if (phba->cfg_nvmet_mrq > 1) {
  8197. rc = lpfc_mrq_create(phba,
  8198. phba->sli4_hba.nvmet_mrq_hdr,
  8199. phba->sli4_hba.nvmet_mrq_data,
  8200. phba->sli4_hba.nvmet_cqset,
  8201. LPFC_NVMET);
  8202. if (rc) {
  8203. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8204. "6098 Failed setup of NVMET "
  8205. "MRQ: rc = 0x%x\n",
  8206. (uint32_t)rc);
  8207. goto out_destroy;
  8208. }
  8209. } else {
  8210. rc = lpfc_rq_create(phba,
  8211. phba->sli4_hba.nvmet_mrq_hdr[0],
  8212. phba->sli4_hba.nvmet_mrq_data[0],
  8213. phba->sli4_hba.nvmet_cqset[0],
  8214. LPFC_NVMET);
  8215. if (rc) {
  8216. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8217. "6057 Failed setup of NVMET "
  8218. "Receive Queue: rc = 0x%x\n",
  8219. (uint32_t)rc);
  8220. goto out_destroy;
  8221. }
  8222. lpfc_printf_log(
  8223. phba, KERN_INFO, LOG_INIT,
  8224. "6099 NVMET RQ setup: hdr-rq-id=%d, "
  8225. "dat-rq-id=%d parent cq-id=%d\n",
  8226. phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
  8227. phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
  8228. phba->sli4_hba.nvmet_cqset[0]->queue_id);
  8229. }
  8230. }
  8231. if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
  8232. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8233. "0540 Receive Queue not allocated\n");
  8234. rc = -ENOMEM;
  8235. goto out_destroy;
  8236. }
  8237. rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
  8238. phba->sli4_hba.els_cq, LPFC_USOL);
  8239. if (rc) {
  8240. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8241. "0541 Failed setup of Receive Queue: "
  8242. "rc = 0x%x\n", (uint32_t)rc);
  8243. goto out_destroy;
  8244. }
  8245. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8246. "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
  8247. "parent cq-id=%d\n",
  8248. phba->sli4_hba.hdr_rq->queue_id,
  8249. phba->sli4_hba.dat_rq->queue_id,
  8250. phba->sli4_hba.els_cq->queue_id);
  8251. if (phba->cfg_fof) {
  8252. rc = lpfc_fof_queue_setup(phba);
  8253. if (rc) {
  8254. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8255. "0549 Failed setup of FOF Queues: "
  8256. "rc = 0x%x\n", rc);
  8257. goto out_destroy;
  8258. }
  8259. }
  8260. for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
  8261. lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
  8262. phba->cfg_fcp_imax);
  8263. return 0;
  8264. out_destroy:
  8265. lpfc_sli4_queue_unset(phba);
  8266. out_error:
  8267. return rc;
  8268. }
  8269. /**
  8270. * lpfc_sli4_queue_unset - Unset all the SLI4 queues
  8271. * @phba: pointer to lpfc hba data structure.
  8272. *
  8273. * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
  8274. * operation.
  8275. *
  8276. * Return codes
  8277. * 0 - successful
  8278. * -ENOMEM - No available memory
  8279. * -EIO - The mailbox failed to complete successfully.
  8280. **/
  8281. void
  8282. lpfc_sli4_queue_unset(struct lpfc_hba *phba)
  8283. {
  8284. int qidx;
  8285. /* Unset the queues created for Flash Optimized Fabric operations */
  8286. if (phba->cfg_fof)
  8287. lpfc_fof_queue_destroy(phba);
  8288. /* Unset mailbox command work queue */
  8289. if (phba->sli4_hba.mbx_wq)
  8290. lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
  8291. /* Unset NVME LS work queue */
  8292. if (phba->sli4_hba.nvmels_wq)
  8293. lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
  8294. /* Unset ELS work queue */
  8295. if (phba->sli4_hba.els_wq)
  8296. lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
  8297. /* Unset unsolicited receive queue */
  8298. if (phba->sli4_hba.hdr_rq)
  8299. lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
  8300. phba->sli4_hba.dat_rq);
  8301. /* Unset FCP work queue */
  8302. if (phba->sli4_hba.fcp_wq)
  8303. for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
  8304. lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
  8305. /* Unset NVME work queue */
  8306. if (phba->sli4_hba.nvme_wq) {
  8307. for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
  8308. lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
  8309. }
  8310. /* Unset mailbox command complete queue */
  8311. if (phba->sli4_hba.mbx_cq)
  8312. lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
  8313. /* Unset ELS complete queue */
  8314. if (phba->sli4_hba.els_cq)
  8315. lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
  8316. /* Unset NVME LS complete queue */
  8317. if (phba->sli4_hba.nvmels_cq)
  8318. lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
  8319. /* Unset NVME response complete queue */
  8320. if (phba->sli4_hba.nvme_cq)
  8321. for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
  8322. lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
  8323. if (phba->nvmet_support) {
  8324. /* Unset NVMET MRQ queue */
  8325. if (phba->sli4_hba.nvmet_mrq_hdr) {
  8326. for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
  8327. lpfc_rq_destroy(
  8328. phba,
  8329. phba->sli4_hba.nvmet_mrq_hdr[qidx],
  8330. phba->sli4_hba.nvmet_mrq_data[qidx]);
  8331. }
  8332. /* Unset NVMET CQ Set complete queue */
  8333. if (phba->sli4_hba.nvmet_cqset) {
  8334. for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
  8335. lpfc_cq_destroy(
  8336. phba, phba->sli4_hba.nvmet_cqset[qidx]);
  8337. }
  8338. }
  8339. /* Unset FCP response complete queue */
  8340. if (phba->sli4_hba.fcp_cq)
  8341. for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
  8342. lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
  8343. /* Unset fast-path event queue */
  8344. if (phba->sli4_hba.hba_eq)
  8345. for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
  8346. lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
  8347. }
  8348. /**
  8349. * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
  8350. * @phba: pointer to lpfc hba data structure.
  8351. *
  8352. * This routine is invoked to allocate and set up a pool of completion queue
  8353. * events. The body of the completion queue event is a completion queue entry
  8354. * CQE. For now, this pool is used for the interrupt service routine to queue
  8355. * the following HBA completion queue events for the worker thread to process:
  8356. * - Mailbox asynchronous events
  8357. * - Receive queue completion unsolicited events
  8358. * Later, this can be used for all the slow-path events.
  8359. *
  8360. * Return codes
  8361. * 0 - successful
  8362. * -ENOMEM - No available memory
  8363. **/
  8364. static int
  8365. lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
  8366. {
  8367. struct lpfc_cq_event *cq_event;
  8368. int i;
  8369. for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
  8370. cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
  8371. if (!cq_event)
  8372. goto out_pool_create_fail;
  8373. list_add_tail(&cq_event->list,
  8374. &phba->sli4_hba.sp_cqe_event_pool);
  8375. }
  8376. return 0;
  8377. out_pool_create_fail:
  8378. lpfc_sli4_cq_event_pool_destroy(phba);
  8379. return -ENOMEM;
  8380. }
  8381. /**
  8382. * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
  8383. * @phba: pointer to lpfc hba data structure.
  8384. *
  8385. * This routine is invoked to free the pool of completion queue events at
  8386. * driver unload time. Note that, it is the responsibility of the driver
  8387. * cleanup routine to free all the outstanding completion-queue events
  8388. * allocated from this pool back into the pool before invoking this routine
  8389. * to destroy the pool.
  8390. **/
  8391. static void
  8392. lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
  8393. {
  8394. struct lpfc_cq_event *cq_event, *next_cq_event;
  8395. list_for_each_entry_safe(cq_event, next_cq_event,
  8396. &phba->sli4_hba.sp_cqe_event_pool, list) {
  8397. list_del(&cq_event->list);
  8398. kfree(cq_event);
  8399. }
  8400. }
  8401. /**
  8402. * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
  8403. * @phba: pointer to lpfc hba data structure.
  8404. *
  8405. * This routine is the lock free version of the API invoked to allocate a
  8406. * completion-queue event from the free pool.
  8407. *
  8408. * Return: Pointer to the newly allocated completion-queue event if successful
  8409. * NULL otherwise.
  8410. **/
  8411. struct lpfc_cq_event *
  8412. __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
  8413. {
  8414. struct lpfc_cq_event *cq_event = NULL;
  8415. list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
  8416. struct lpfc_cq_event, list);
  8417. return cq_event;
  8418. }
  8419. /**
  8420. * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
  8421. * @phba: pointer to lpfc hba data structure.
  8422. *
  8423. * This routine is the lock version of the API invoked to allocate a
  8424. * completion-queue event from the free pool.
  8425. *
  8426. * Return: Pointer to the newly allocated completion-queue event if successful
  8427. * NULL otherwise.
  8428. **/
  8429. struct lpfc_cq_event *
  8430. lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
  8431. {
  8432. struct lpfc_cq_event *cq_event;
  8433. unsigned long iflags;
  8434. spin_lock_irqsave(&phba->hbalock, iflags);
  8435. cq_event = __lpfc_sli4_cq_event_alloc(phba);
  8436. spin_unlock_irqrestore(&phba->hbalock, iflags);
  8437. return cq_event;
  8438. }
  8439. /**
  8440. * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
  8441. * @phba: pointer to lpfc hba data structure.
  8442. * @cq_event: pointer to the completion queue event to be freed.
  8443. *
  8444. * This routine is the lock free version of the API invoked to release a
  8445. * completion-queue event back into the free pool.
  8446. **/
  8447. void
  8448. __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
  8449. struct lpfc_cq_event *cq_event)
  8450. {
  8451. list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
  8452. }
  8453. /**
  8454. * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
  8455. * @phba: pointer to lpfc hba data structure.
  8456. * @cq_event: pointer to the completion queue event to be freed.
  8457. *
  8458. * This routine is the lock version of the API invoked to release a
  8459. * completion-queue event back into the free pool.
  8460. **/
  8461. void
  8462. lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
  8463. struct lpfc_cq_event *cq_event)
  8464. {
  8465. unsigned long iflags;
  8466. spin_lock_irqsave(&phba->hbalock, iflags);
  8467. __lpfc_sli4_cq_event_release(phba, cq_event);
  8468. spin_unlock_irqrestore(&phba->hbalock, iflags);
  8469. }
  8470. /**
  8471. * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
  8472. * @phba: pointer to lpfc hba data structure.
  8473. *
  8474. * This routine is to free all the pending completion-queue events to the
  8475. * back into the free pool for device reset.
  8476. **/
  8477. static void
  8478. lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
  8479. {
  8480. LIST_HEAD(cqelist);
  8481. struct lpfc_cq_event *cqe;
  8482. unsigned long iflags;
  8483. /* Retrieve all the pending WCQEs from pending WCQE lists */
  8484. spin_lock_irqsave(&phba->hbalock, iflags);
  8485. /* Pending FCP XRI abort events */
  8486. list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
  8487. &cqelist);
  8488. /* Pending ELS XRI abort events */
  8489. list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
  8490. &cqelist);
  8491. /* Pending asynnc events */
  8492. list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
  8493. &cqelist);
  8494. spin_unlock_irqrestore(&phba->hbalock, iflags);
  8495. while (!list_empty(&cqelist)) {
  8496. list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
  8497. lpfc_sli4_cq_event_release(phba, cqe);
  8498. }
  8499. }
  8500. /**
  8501. * lpfc_pci_function_reset - Reset pci function.
  8502. * @phba: pointer to lpfc hba data structure.
  8503. *
  8504. * This routine is invoked to request a PCI function reset. It will destroys
  8505. * all resources assigned to the PCI function which originates this request.
  8506. *
  8507. * Return codes
  8508. * 0 - successful
  8509. * -ENOMEM - No available memory
  8510. * -EIO - The mailbox failed to complete successfully.
  8511. **/
  8512. int
  8513. lpfc_pci_function_reset(struct lpfc_hba *phba)
  8514. {
  8515. LPFC_MBOXQ_t *mboxq;
  8516. uint32_t rc = 0, if_type;
  8517. uint32_t shdr_status, shdr_add_status;
  8518. uint32_t rdy_chk;
  8519. uint32_t port_reset = 0;
  8520. union lpfc_sli4_cfg_shdr *shdr;
  8521. struct lpfc_register reg_data;
  8522. uint16_t devid;
  8523. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  8524. switch (if_type) {
  8525. case LPFC_SLI_INTF_IF_TYPE_0:
  8526. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  8527. GFP_KERNEL);
  8528. if (!mboxq) {
  8529. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8530. "0494 Unable to allocate memory for "
  8531. "issuing SLI_FUNCTION_RESET mailbox "
  8532. "command\n");
  8533. return -ENOMEM;
  8534. }
  8535. /* Setup PCI function reset mailbox-ioctl command */
  8536. lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
  8537. LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
  8538. LPFC_SLI4_MBX_EMBED);
  8539. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  8540. shdr = (union lpfc_sli4_cfg_shdr *)
  8541. &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
  8542. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  8543. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
  8544. &shdr->response);
  8545. if (rc != MBX_TIMEOUT)
  8546. mempool_free(mboxq, phba->mbox_mem_pool);
  8547. if (shdr_status || shdr_add_status || rc) {
  8548. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8549. "0495 SLI_FUNCTION_RESET mailbox "
  8550. "failed with status x%x add_status x%x,"
  8551. " mbx status x%x\n",
  8552. shdr_status, shdr_add_status, rc);
  8553. rc = -ENXIO;
  8554. }
  8555. break;
  8556. case LPFC_SLI_INTF_IF_TYPE_2:
  8557. case LPFC_SLI_INTF_IF_TYPE_6:
  8558. wait:
  8559. /*
  8560. * Poll the Port Status Register and wait for RDY for
  8561. * up to 30 seconds. If the port doesn't respond, treat
  8562. * it as an error.
  8563. */
  8564. for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
  8565. if (lpfc_readl(phba->sli4_hba.u.if_type2.
  8566. STATUSregaddr, &reg_data.word0)) {
  8567. rc = -ENODEV;
  8568. goto out;
  8569. }
  8570. if (bf_get(lpfc_sliport_status_rdy, &reg_data))
  8571. break;
  8572. msleep(20);
  8573. }
  8574. if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
  8575. phba->work_status[0] = readl(
  8576. phba->sli4_hba.u.if_type2.ERR1regaddr);
  8577. phba->work_status[1] = readl(
  8578. phba->sli4_hba.u.if_type2.ERR2regaddr);
  8579. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8580. "2890 Port not ready, port status reg "
  8581. "0x%x error 1=0x%x, error 2=0x%x\n",
  8582. reg_data.word0,
  8583. phba->work_status[0],
  8584. phba->work_status[1]);
  8585. rc = -ENODEV;
  8586. goto out;
  8587. }
  8588. if (!port_reset) {
  8589. /*
  8590. * Reset the port now
  8591. */
  8592. reg_data.word0 = 0;
  8593. bf_set(lpfc_sliport_ctrl_end, &reg_data,
  8594. LPFC_SLIPORT_LITTLE_ENDIAN);
  8595. bf_set(lpfc_sliport_ctrl_ip, &reg_data,
  8596. LPFC_SLIPORT_INIT_PORT);
  8597. writel(reg_data.word0, phba->sli4_hba.u.if_type2.
  8598. CTRLregaddr);
  8599. /* flush */
  8600. pci_read_config_word(phba->pcidev,
  8601. PCI_DEVICE_ID, &devid);
  8602. port_reset = 1;
  8603. msleep(20);
  8604. goto wait;
  8605. } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
  8606. rc = -ENODEV;
  8607. goto out;
  8608. }
  8609. break;
  8610. case LPFC_SLI_INTF_IF_TYPE_1:
  8611. default:
  8612. break;
  8613. }
  8614. out:
  8615. /* Catch the not-ready port failure after a port reset. */
  8616. if (rc) {
  8617. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8618. "3317 HBA not functional: IP Reset Failed "
  8619. "try: echo fw_reset > board_mode\n");
  8620. rc = -ENODEV;
  8621. }
  8622. return rc;
  8623. }
  8624. /**
  8625. * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
  8626. * @phba: pointer to lpfc hba data structure.
  8627. *
  8628. * This routine is invoked to set up the PCI device memory space for device
  8629. * with SLI-4 interface spec.
  8630. *
  8631. * Return codes
  8632. * 0 - successful
  8633. * other values - error
  8634. **/
  8635. static int
  8636. lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
  8637. {
  8638. struct pci_dev *pdev;
  8639. unsigned long bar0map_len, bar1map_len, bar2map_len;
  8640. int error = -ENODEV;
  8641. uint32_t if_type;
  8642. /* Obtain PCI device reference */
  8643. if (!phba->pcidev)
  8644. return error;
  8645. else
  8646. pdev = phba->pcidev;
  8647. /* Set the device DMA mask size */
  8648. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
  8649. || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
  8650. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
  8651. || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
  8652. return error;
  8653. }
  8654. }
  8655. /*
  8656. * The BARs and register set definitions and offset locations are
  8657. * dependent on the if_type.
  8658. */
  8659. if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
  8660. &phba->sli4_hba.sli_intf.word0)) {
  8661. return error;
  8662. }
  8663. /* There is no SLI3 failback for SLI4 devices. */
  8664. if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
  8665. LPFC_SLI_INTF_VALID) {
  8666. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8667. "2894 SLI_INTF reg contents invalid "
  8668. "sli_intf reg 0x%x\n",
  8669. phba->sli4_hba.sli_intf.word0);
  8670. return error;
  8671. }
  8672. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  8673. /*
  8674. * Get the bus address of SLI4 device Bar regions and the
  8675. * number of bytes required by each mapping. The mapping of the
  8676. * particular PCI BARs regions is dependent on the type of
  8677. * SLI4 device.
  8678. */
  8679. if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
  8680. phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
  8681. bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
  8682. /*
  8683. * Map SLI4 PCI Config Space Register base to a kernel virtual
  8684. * addr
  8685. */
  8686. phba->sli4_hba.conf_regs_memmap_p =
  8687. ioremap(phba->pci_bar0_map, bar0map_len);
  8688. if (!phba->sli4_hba.conf_regs_memmap_p) {
  8689. dev_printk(KERN_ERR, &pdev->dev,
  8690. "ioremap failed for SLI4 PCI config "
  8691. "registers.\n");
  8692. goto out;
  8693. }
  8694. phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
  8695. /* Set up BAR0 PCI config space register memory map */
  8696. lpfc_sli4_bar0_register_memmap(phba, if_type);
  8697. } else {
  8698. phba->pci_bar0_map = pci_resource_start(pdev, 1);
  8699. bar0map_len = pci_resource_len(pdev, 1);
  8700. if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
  8701. dev_printk(KERN_ERR, &pdev->dev,
  8702. "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
  8703. goto out;
  8704. }
  8705. phba->sli4_hba.conf_regs_memmap_p =
  8706. ioremap(phba->pci_bar0_map, bar0map_len);
  8707. if (!phba->sli4_hba.conf_regs_memmap_p) {
  8708. dev_printk(KERN_ERR, &pdev->dev,
  8709. "ioremap failed for SLI4 PCI config "
  8710. "registers.\n");
  8711. goto out;
  8712. }
  8713. lpfc_sli4_bar0_register_memmap(phba, if_type);
  8714. }
  8715. if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
  8716. if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
  8717. /*
  8718. * Map SLI4 if type 0 HBA Control Register base to a
  8719. * kernel virtual address and setup the registers.
  8720. */
  8721. phba->pci_bar1_map = pci_resource_start(pdev,
  8722. PCI_64BIT_BAR2);
  8723. bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
  8724. phba->sli4_hba.ctrl_regs_memmap_p =
  8725. ioremap(phba->pci_bar1_map,
  8726. bar1map_len);
  8727. if (!phba->sli4_hba.ctrl_regs_memmap_p) {
  8728. dev_err(&pdev->dev,
  8729. "ioremap failed for SLI4 HBA "
  8730. "control registers.\n");
  8731. error = -ENOMEM;
  8732. goto out_iounmap_conf;
  8733. }
  8734. phba->pci_bar2_memmap_p =
  8735. phba->sli4_hba.ctrl_regs_memmap_p;
  8736. lpfc_sli4_bar1_register_memmap(phba, if_type);
  8737. } else {
  8738. error = -ENOMEM;
  8739. goto out_iounmap_conf;
  8740. }
  8741. }
  8742. if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
  8743. (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
  8744. /*
  8745. * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
  8746. * virtual address and setup the registers.
  8747. */
  8748. phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
  8749. bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
  8750. phba->sli4_hba.drbl_regs_memmap_p =
  8751. ioremap(phba->pci_bar1_map, bar1map_len);
  8752. if (!phba->sli4_hba.drbl_regs_memmap_p) {
  8753. dev_err(&pdev->dev,
  8754. "ioremap failed for SLI4 HBA doorbell registers.\n");
  8755. goto out_iounmap_conf;
  8756. }
  8757. phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
  8758. lpfc_sli4_bar1_register_memmap(phba, if_type);
  8759. }
  8760. if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
  8761. if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
  8762. /*
  8763. * Map SLI4 if type 0 HBA Doorbell Register base to
  8764. * a kernel virtual address and setup the registers.
  8765. */
  8766. phba->pci_bar2_map = pci_resource_start(pdev,
  8767. PCI_64BIT_BAR4);
  8768. bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
  8769. phba->sli4_hba.drbl_regs_memmap_p =
  8770. ioremap(phba->pci_bar2_map,
  8771. bar2map_len);
  8772. if (!phba->sli4_hba.drbl_regs_memmap_p) {
  8773. dev_err(&pdev->dev,
  8774. "ioremap failed for SLI4 HBA"
  8775. " doorbell registers.\n");
  8776. error = -ENOMEM;
  8777. goto out_iounmap_ctrl;
  8778. }
  8779. phba->pci_bar4_memmap_p =
  8780. phba->sli4_hba.drbl_regs_memmap_p;
  8781. error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
  8782. if (error)
  8783. goto out_iounmap_all;
  8784. } else {
  8785. error = -ENOMEM;
  8786. goto out_iounmap_all;
  8787. }
  8788. }
  8789. if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
  8790. pci_resource_start(pdev, PCI_64BIT_BAR4)) {
  8791. /*
  8792. * Map SLI4 if type 6 HBA DPP Register base to a kernel
  8793. * virtual address and setup the registers.
  8794. */
  8795. phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
  8796. bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
  8797. phba->sli4_hba.dpp_regs_memmap_p =
  8798. ioremap(phba->pci_bar2_map, bar2map_len);
  8799. if (!phba->sli4_hba.dpp_regs_memmap_p) {
  8800. dev_err(&pdev->dev,
  8801. "ioremap failed for SLI4 HBA dpp registers.\n");
  8802. goto out_iounmap_ctrl;
  8803. }
  8804. phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
  8805. }
  8806. /* Set up the EQ/CQ register handeling functions now */
  8807. switch (if_type) {
  8808. case LPFC_SLI_INTF_IF_TYPE_0:
  8809. case LPFC_SLI_INTF_IF_TYPE_2:
  8810. phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
  8811. phba->sli4_hba.sli4_eq_release = lpfc_sli4_eq_release;
  8812. phba->sli4_hba.sli4_cq_release = lpfc_sli4_cq_release;
  8813. break;
  8814. case LPFC_SLI_INTF_IF_TYPE_6:
  8815. phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
  8816. phba->sli4_hba.sli4_eq_release = lpfc_sli4_if6_eq_release;
  8817. phba->sli4_hba.sli4_cq_release = lpfc_sli4_if6_cq_release;
  8818. break;
  8819. default:
  8820. break;
  8821. }
  8822. return 0;
  8823. out_iounmap_all:
  8824. iounmap(phba->sli4_hba.drbl_regs_memmap_p);
  8825. out_iounmap_ctrl:
  8826. iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
  8827. out_iounmap_conf:
  8828. iounmap(phba->sli4_hba.conf_regs_memmap_p);
  8829. out:
  8830. return error;
  8831. }
  8832. /**
  8833. * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
  8834. * @phba: pointer to lpfc hba data structure.
  8835. *
  8836. * This routine is invoked to unset the PCI device memory space for device
  8837. * with SLI-4 interface spec.
  8838. **/
  8839. static void
  8840. lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
  8841. {
  8842. uint32_t if_type;
  8843. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  8844. switch (if_type) {
  8845. case LPFC_SLI_INTF_IF_TYPE_0:
  8846. iounmap(phba->sli4_hba.drbl_regs_memmap_p);
  8847. iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
  8848. iounmap(phba->sli4_hba.conf_regs_memmap_p);
  8849. break;
  8850. case LPFC_SLI_INTF_IF_TYPE_2:
  8851. iounmap(phba->sli4_hba.conf_regs_memmap_p);
  8852. break;
  8853. case LPFC_SLI_INTF_IF_TYPE_6:
  8854. iounmap(phba->sli4_hba.drbl_regs_memmap_p);
  8855. iounmap(phba->sli4_hba.conf_regs_memmap_p);
  8856. break;
  8857. case LPFC_SLI_INTF_IF_TYPE_1:
  8858. default:
  8859. dev_printk(KERN_ERR, &phba->pcidev->dev,
  8860. "FATAL - unsupported SLI4 interface type - %d\n",
  8861. if_type);
  8862. break;
  8863. }
  8864. }
  8865. /**
  8866. * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
  8867. * @phba: pointer to lpfc hba data structure.
  8868. *
  8869. * This routine is invoked to enable the MSI-X interrupt vectors to device
  8870. * with SLI-3 interface specs.
  8871. *
  8872. * Return codes
  8873. * 0 - successful
  8874. * other values - error
  8875. **/
  8876. static int
  8877. lpfc_sli_enable_msix(struct lpfc_hba *phba)
  8878. {
  8879. int rc;
  8880. LPFC_MBOXQ_t *pmb;
  8881. /* Set up MSI-X multi-message vectors */
  8882. rc = pci_alloc_irq_vectors(phba->pcidev,
  8883. LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
  8884. if (rc < 0) {
  8885. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8886. "0420 PCI enable MSI-X failed (%d)\n", rc);
  8887. goto vec_fail_out;
  8888. }
  8889. /*
  8890. * Assign MSI-X vectors to interrupt handlers
  8891. */
  8892. /* vector-0 is associated to slow-path handler */
  8893. rc = request_irq(pci_irq_vector(phba->pcidev, 0),
  8894. &lpfc_sli_sp_intr_handler, 0,
  8895. LPFC_SP_DRIVER_HANDLER_NAME, phba);
  8896. if (rc) {
  8897. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  8898. "0421 MSI-X slow-path request_irq failed "
  8899. "(%d)\n", rc);
  8900. goto msi_fail_out;
  8901. }
  8902. /* vector-1 is associated to fast-path handler */
  8903. rc = request_irq(pci_irq_vector(phba->pcidev, 1),
  8904. &lpfc_sli_fp_intr_handler, 0,
  8905. LPFC_FP_DRIVER_HANDLER_NAME, phba);
  8906. if (rc) {
  8907. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  8908. "0429 MSI-X fast-path request_irq failed "
  8909. "(%d)\n", rc);
  8910. goto irq_fail_out;
  8911. }
  8912. /*
  8913. * Configure HBA MSI-X attention conditions to messages
  8914. */
  8915. pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  8916. if (!pmb) {
  8917. rc = -ENOMEM;
  8918. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8919. "0474 Unable to allocate memory for issuing "
  8920. "MBOX_CONFIG_MSI command\n");
  8921. goto mem_fail_out;
  8922. }
  8923. rc = lpfc_config_msi(phba, pmb);
  8924. if (rc)
  8925. goto mbx_fail_out;
  8926. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  8927. if (rc != MBX_SUCCESS) {
  8928. lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
  8929. "0351 Config MSI mailbox command failed, "
  8930. "mbxCmd x%x, mbxStatus x%x\n",
  8931. pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
  8932. goto mbx_fail_out;
  8933. }
  8934. /* Free memory allocated for mailbox command */
  8935. mempool_free(pmb, phba->mbox_mem_pool);
  8936. return rc;
  8937. mbx_fail_out:
  8938. /* Free memory allocated for mailbox command */
  8939. mempool_free(pmb, phba->mbox_mem_pool);
  8940. mem_fail_out:
  8941. /* free the irq already requested */
  8942. free_irq(pci_irq_vector(phba->pcidev, 1), phba);
  8943. irq_fail_out:
  8944. /* free the irq already requested */
  8945. free_irq(pci_irq_vector(phba->pcidev, 0), phba);
  8946. msi_fail_out:
  8947. /* Unconfigure MSI-X capability structure */
  8948. pci_free_irq_vectors(phba->pcidev);
  8949. vec_fail_out:
  8950. return rc;
  8951. }
  8952. /**
  8953. * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
  8954. * @phba: pointer to lpfc hba data structure.
  8955. *
  8956. * This routine is invoked to enable the MSI interrupt mode to device with
  8957. * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
  8958. * enable the MSI vector. The device driver is responsible for calling the
  8959. * request_irq() to register MSI vector with a interrupt the handler, which
  8960. * is done in this function.
  8961. *
  8962. * Return codes
  8963. * 0 - successful
  8964. * other values - error
  8965. */
  8966. static int
  8967. lpfc_sli_enable_msi(struct lpfc_hba *phba)
  8968. {
  8969. int rc;
  8970. rc = pci_enable_msi(phba->pcidev);
  8971. if (!rc)
  8972. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8973. "0462 PCI enable MSI mode success.\n");
  8974. else {
  8975. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8976. "0471 PCI enable MSI mode failed (%d)\n", rc);
  8977. return rc;
  8978. }
  8979. rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
  8980. 0, LPFC_DRIVER_NAME, phba);
  8981. if (rc) {
  8982. pci_disable_msi(phba->pcidev);
  8983. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  8984. "0478 MSI request_irq failed (%d)\n", rc);
  8985. }
  8986. return rc;
  8987. }
  8988. /**
  8989. * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
  8990. * @phba: pointer to lpfc hba data structure.
  8991. *
  8992. * This routine is invoked to enable device interrupt and associate driver's
  8993. * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
  8994. * spec. Depends on the interrupt mode configured to the driver, the driver
  8995. * will try to fallback from the configured interrupt mode to an interrupt
  8996. * mode which is supported by the platform, kernel, and device in the order
  8997. * of:
  8998. * MSI-X -> MSI -> IRQ.
  8999. *
  9000. * Return codes
  9001. * 0 - successful
  9002. * other values - error
  9003. **/
  9004. static uint32_t
  9005. lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
  9006. {
  9007. uint32_t intr_mode = LPFC_INTR_ERROR;
  9008. int retval;
  9009. if (cfg_mode == 2) {
  9010. /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
  9011. retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
  9012. if (!retval) {
  9013. /* Now, try to enable MSI-X interrupt mode */
  9014. retval = lpfc_sli_enable_msix(phba);
  9015. if (!retval) {
  9016. /* Indicate initialization to MSI-X mode */
  9017. phba->intr_type = MSIX;
  9018. intr_mode = 2;
  9019. }
  9020. }
  9021. }
  9022. /* Fallback to MSI if MSI-X initialization failed */
  9023. if (cfg_mode >= 1 && phba->intr_type == NONE) {
  9024. retval = lpfc_sli_enable_msi(phba);
  9025. if (!retval) {
  9026. /* Indicate initialization to MSI mode */
  9027. phba->intr_type = MSI;
  9028. intr_mode = 1;
  9029. }
  9030. }
  9031. /* Fallback to INTx if both MSI-X/MSI initalization failed */
  9032. if (phba->intr_type == NONE) {
  9033. retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
  9034. IRQF_SHARED, LPFC_DRIVER_NAME, phba);
  9035. if (!retval) {
  9036. /* Indicate initialization to INTx mode */
  9037. phba->intr_type = INTx;
  9038. intr_mode = 0;
  9039. }
  9040. }
  9041. return intr_mode;
  9042. }
  9043. /**
  9044. * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
  9045. * @phba: pointer to lpfc hba data structure.
  9046. *
  9047. * This routine is invoked to disable device interrupt and disassociate the
  9048. * driver's interrupt handler(s) from interrupt vector(s) to device with
  9049. * SLI-3 interface spec. Depending on the interrupt mode, the driver will
  9050. * release the interrupt vector(s) for the message signaled interrupt.
  9051. **/
  9052. static void
  9053. lpfc_sli_disable_intr(struct lpfc_hba *phba)
  9054. {
  9055. int nr_irqs, i;
  9056. if (phba->intr_type == MSIX)
  9057. nr_irqs = LPFC_MSIX_VECTORS;
  9058. else
  9059. nr_irqs = 1;
  9060. for (i = 0; i < nr_irqs; i++)
  9061. free_irq(pci_irq_vector(phba->pcidev, i), phba);
  9062. pci_free_irq_vectors(phba->pcidev);
  9063. /* Reset interrupt management states */
  9064. phba->intr_type = NONE;
  9065. phba->sli.slistat.sli_intr = 0;
  9066. }
  9067. /**
  9068. * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
  9069. * @phba: pointer to lpfc hba data structure.
  9070. * @vectors: number of msix vectors allocated.
  9071. *
  9072. * The routine will figure out the CPU affinity assignment for every
  9073. * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated
  9074. * with a pointer to the CPU mask that defines ALL the CPUs this vector
  9075. * can be associated with. If the vector can be unquely associated with
  9076. * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
  9077. * In addition, the CPU to IO channel mapping will be calculated
  9078. * and the phba->sli4_hba.cpu_map array will reflect this.
  9079. */
  9080. static void
  9081. lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
  9082. {
  9083. struct lpfc_vector_map_info *cpup;
  9084. int index = 0;
  9085. int vec = 0;
  9086. int cpu;
  9087. #ifdef CONFIG_X86
  9088. struct cpuinfo_x86 *cpuinfo;
  9089. #endif
  9090. /* Init cpu_map array */
  9091. memset(phba->sli4_hba.cpu_map, 0xff,
  9092. (sizeof(struct lpfc_vector_map_info) *
  9093. phba->sli4_hba.num_present_cpu));
  9094. /* Update CPU map with physical id and core id of each CPU */
  9095. cpup = phba->sli4_hba.cpu_map;
  9096. for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
  9097. #ifdef CONFIG_X86
  9098. cpuinfo = &cpu_data(cpu);
  9099. cpup->phys_id = cpuinfo->phys_proc_id;
  9100. cpup->core_id = cpuinfo->cpu_core_id;
  9101. #else
  9102. /* No distinction between CPUs for other platforms */
  9103. cpup->phys_id = 0;
  9104. cpup->core_id = 0;
  9105. #endif
  9106. cpup->channel_id = index; /* For now round robin */
  9107. cpup->irq = pci_irq_vector(phba->pcidev, vec);
  9108. vec++;
  9109. if (vec >= vectors)
  9110. vec = 0;
  9111. index++;
  9112. if (index >= phba->cfg_fcp_io_channel)
  9113. index = 0;
  9114. cpup++;
  9115. }
  9116. }
  9117. /**
  9118. * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
  9119. * @phba: pointer to lpfc hba data structure.
  9120. *
  9121. * This routine is invoked to enable the MSI-X interrupt vectors to device
  9122. * with SLI-4 interface spec.
  9123. *
  9124. * Return codes
  9125. * 0 - successful
  9126. * other values - error
  9127. **/
  9128. static int
  9129. lpfc_sli4_enable_msix(struct lpfc_hba *phba)
  9130. {
  9131. int vectors, rc, index;
  9132. char *name;
  9133. /* Set up MSI-X multi-message vectors */
  9134. vectors = phba->io_channel_irqs;
  9135. if (phba->cfg_fof)
  9136. vectors++;
  9137. rc = pci_alloc_irq_vectors(phba->pcidev,
  9138. (phba->nvmet_support) ? 1 : 2,
  9139. vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
  9140. if (rc < 0) {
  9141. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  9142. "0484 PCI enable MSI-X failed (%d)\n", rc);
  9143. goto vec_fail_out;
  9144. }
  9145. vectors = rc;
  9146. /* Assign MSI-X vectors to interrupt handlers */
  9147. for (index = 0; index < vectors; index++) {
  9148. name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
  9149. memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
  9150. snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
  9151. LPFC_DRIVER_HANDLER_NAME"%d", index);
  9152. phba->sli4_hba.hba_eq_hdl[index].idx = index;
  9153. phba->sli4_hba.hba_eq_hdl[index].phba = phba;
  9154. atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
  9155. if (phba->cfg_fof && (index == (vectors - 1)))
  9156. rc = request_irq(pci_irq_vector(phba->pcidev, index),
  9157. &lpfc_sli4_fof_intr_handler, 0,
  9158. name,
  9159. &phba->sli4_hba.hba_eq_hdl[index]);
  9160. else
  9161. rc = request_irq(pci_irq_vector(phba->pcidev, index),
  9162. &lpfc_sli4_hba_intr_handler, 0,
  9163. name,
  9164. &phba->sli4_hba.hba_eq_hdl[index]);
  9165. if (rc) {
  9166. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  9167. "0486 MSI-X fast-path (%d) "
  9168. "request_irq failed (%d)\n", index, rc);
  9169. goto cfg_fail_out;
  9170. }
  9171. }
  9172. if (phba->cfg_fof)
  9173. vectors--;
  9174. if (vectors != phba->io_channel_irqs) {
  9175. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9176. "3238 Reducing IO channels to match number of "
  9177. "MSI-X vectors, requested %d got %d\n",
  9178. phba->io_channel_irqs, vectors);
  9179. if (phba->cfg_fcp_io_channel > vectors)
  9180. phba->cfg_fcp_io_channel = vectors;
  9181. if (phba->cfg_nvme_io_channel > vectors)
  9182. phba->cfg_nvme_io_channel = vectors;
  9183. if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
  9184. phba->io_channel_irqs = phba->cfg_fcp_io_channel;
  9185. else
  9186. phba->io_channel_irqs = phba->cfg_nvme_io_channel;
  9187. }
  9188. lpfc_cpu_affinity_check(phba, vectors);
  9189. return rc;
  9190. cfg_fail_out:
  9191. /* free the irq already requested */
  9192. for (--index; index >= 0; index--)
  9193. free_irq(pci_irq_vector(phba->pcidev, index),
  9194. &phba->sli4_hba.hba_eq_hdl[index]);
  9195. /* Unconfigure MSI-X capability structure */
  9196. pci_free_irq_vectors(phba->pcidev);
  9197. vec_fail_out:
  9198. return rc;
  9199. }
  9200. /**
  9201. * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
  9202. * @phba: pointer to lpfc hba data structure.
  9203. *
  9204. * This routine is invoked to enable the MSI interrupt mode to device with
  9205. * SLI-4 interface spec. The kernel function pci_enable_msi() is called
  9206. * to enable the MSI vector. The device driver is responsible for calling
  9207. * the request_irq() to register MSI vector with a interrupt the handler,
  9208. * which is done in this function.
  9209. *
  9210. * Return codes
  9211. * 0 - successful
  9212. * other values - error
  9213. **/
  9214. static int
  9215. lpfc_sli4_enable_msi(struct lpfc_hba *phba)
  9216. {
  9217. int rc, index;
  9218. rc = pci_enable_msi(phba->pcidev);
  9219. if (!rc)
  9220. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  9221. "0487 PCI enable MSI mode success.\n");
  9222. else {
  9223. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  9224. "0488 PCI enable MSI mode failed (%d)\n", rc);
  9225. return rc;
  9226. }
  9227. rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
  9228. 0, LPFC_DRIVER_NAME, phba);
  9229. if (rc) {
  9230. pci_disable_msi(phba->pcidev);
  9231. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  9232. "0490 MSI request_irq failed (%d)\n", rc);
  9233. return rc;
  9234. }
  9235. for (index = 0; index < phba->io_channel_irqs; index++) {
  9236. phba->sli4_hba.hba_eq_hdl[index].idx = index;
  9237. phba->sli4_hba.hba_eq_hdl[index].phba = phba;
  9238. }
  9239. if (phba->cfg_fof) {
  9240. phba->sli4_hba.hba_eq_hdl[index].idx = index;
  9241. phba->sli4_hba.hba_eq_hdl[index].phba = phba;
  9242. }
  9243. return 0;
  9244. }
  9245. /**
  9246. * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
  9247. * @phba: pointer to lpfc hba data structure.
  9248. *
  9249. * This routine is invoked to enable device interrupt and associate driver's
  9250. * interrupt handler(s) to interrupt vector(s) to device with SLI-4
  9251. * interface spec. Depends on the interrupt mode configured to the driver,
  9252. * the driver will try to fallback from the configured interrupt mode to an
  9253. * interrupt mode which is supported by the platform, kernel, and device in
  9254. * the order of:
  9255. * MSI-X -> MSI -> IRQ.
  9256. *
  9257. * Return codes
  9258. * 0 - successful
  9259. * other values - error
  9260. **/
  9261. static uint32_t
  9262. lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
  9263. {
  9264. uint32_t intr_mode = LPFC_INTR_ERROR;
  9265. int retval, idx;
  9266. if (cfg_mode == 2) {
  9267. /* Preparation before conf_msi mbox cmd */
  9268. retval = 0;
  9269. if (!retval) {
  9270. /* Now, try to enable MSI-X interrupt mode */
  9271. retval = lpfc_sli4_enable_msix(phba);
  9272. if (!retval) {
  9273. /* Indicate initialization to MSI-X mode */
  9274. phba->intr_type = MSIX;
  9275. intr_mode = 2;
  9276. }
  9277. }
  9278. }
  9279. /* Fallback to MSI if MSI-X initialization failed */
  9280. if (cfg_mode >= 1 && phba->intr_type == NONE) {
  9281. retval = lpfc_sli4_enable_msi(phba);
  9282. if (!retval) {
  9283. /* Indicate initialization to MSI mode */
  9284. phba->intr_type = MSI;
  9285. intr_mode = 1;
  9286. }
  9287. }
  9288. /* Fallback to INTx if both MSI-X/MSI initalization failed */
  9289. if (phba->intr_type == NONE) {
  9290. retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
  9291. IRQF_SHARED, LPFC_DRIVER_NAME, phba);
  9292. if (!retval) {
  9293. struct lpfc_hba_eq_hdl *eqhdl;
  9294. /* Indicate initialization to INTx mode */
  9295. phba->intr_type = INTx;
  9296. intr_mode = 0;
  9297. for (idx = 0; idx < phba->io_channel_irqs; idx++) {
  9298. eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
  9299. eqhdl->idx = idx;
  9300. eqhdl->phba = phba;
  9301. atomic_set(&eqhdl->hba_eq_in_use, 1);
  9302. }
  9303. if (phba->cfg_fof) {
  9304. eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
  9305. eqhdl->idx = idx;
  9306. eqhdl->phba = phba;
  9307. atomic_set(&eqhdl->hba_eq_in_use, 1);
  9308. }
  9309. }
  9310. }
  9311. return intr_mode;
  9312. }
  9313. /**
  9314. * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
  9315. * @phba: pointer to lpfc hba data structure.
  9316. *
  9317. * This routine is invoked to disable device interrupt and disassociate
  9318. * the driver's interrupt handler(s) from interrupt vector(s) to device
  9319. * with SLI-4 interface spec. Depending on the interrupt mode, the driver
  9320. * will release the interrupt vector(s) for the message signaled interrupt.
  9321. **/
  9322. static void
  9323. lpfc_sli4_disable_intr(struct lpfc_hba *phba)
  9324. {
  9325. /* Disable the currently initialized interrupt mode */
  9326. if (phba->intr_type == MSIX) {
  9327. int index;
  9328. /* Free up MSI-X multi-message vectors */
  9329. for (index = 0; index < phba->io_channel_irqs; index++)
  9330. free_irq(pci_irq_vector(phba->pcidev, index),
  9331. &phba->sli4_hba.hba_eq_hdl[index]);
  9332. if (phba->cfg_fof)
  9333. free_irq(pci_irq_vector(phba->pcidev, index),
  9334. &phba->sli4_hba.hba_eq_hdl[index]);
  9335. } else {
  9336. free_irq(phba->pcidev->irq, phba);
  9337. }
  9338. pci_free_irq_vectors(phba->pcidev);
  9339. /* Reset interrupt management states */
  9340. phba->intr_type = NONE;
  9341. phba->sli.slistat.sli_intr = 0;
  9342. }
  9343. /**
  9344. * lpfc_unset_hba - Unset SLI3 hba device initialization
  9345. * @phba: pointer to lpfc hba data structure.
  9346. *
  9347. * This routine is invoked to unset the HBA device initialization steps to
  9348. * a device with SLI-3 interface spec.
  9349. **/
  9350. static void
  9351. lpfc_unset_hba(struct lpfc_hba *phba)
  9352. {
  9353. struct lpfc_vport *vport = phba->pport;
  9354. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  9355. spin_lock_irq(shost->host_lock);
  9356. vport->load_flag |= FC_UNLOADING;
  9357. spin_unlock_irq(shost->host_lock);
  9358. kfree(phba->vpi_bmask);
  9359. kfree(phba->vpi_ids);
  9360. lpfc_stop_hba_timers(phba);
  9361. phba->pport->work_port_events = 0;
  9362. lpfc_sli_hba_down(phba);
  9363. lpfc_sli_brdrestart(phba);
  9364. lpfc_sli_disable_intr(phba);
  9365. return;
  9366. }
  9367. /**
  9368. * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
  9369. * @phba: Pointer to HBA context object.
  9370. *
  9371. * This function is called in the SLI4 code path to wait for completion
  9372. * of device's XRIs exchange busy. It will check the XRI exchange busy
  9373. * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
  9374. * that, it will check the XRI exchange busy on outstanding FCP and ELS
  9375. * I/Os every 30 seconds, log error message, and wait forever. Only when
  9376. * all XRI exchange busy complete, the driver unload shall proceed with
  9377. * invoking the function reset ioctl mailbox command to the CNA and the
  9378. * the rest of the driver unload resource release.
  9379. **/
  9380. static void
  9381. lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
  9382. {
  9383. int wait_time = 0;
  9384. int nvme_xri_cmpl = 1;
  9385. int nvmet_xri_cmpl = 1;
  9386. int fcp_xri_cmpl = 1;
  9387. int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
  9388. /* Driver just aborted IOs during the hba_unset process. Pause
  9389. * here to give the HBA time to complete the IO and get entries
  9390. * into the abts lists.
  9391. */
  9392. msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
  9393. /* Wait for NVME pending IO to flush back to transport. */
  9394. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
  9395. lpfc_nvme_wait_for_io_drain(phba);
  9396. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
  9397. fcp_xri_cmpl =
  9398. list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
  9399. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  9400. nvme_xri_cmpl =
  9401. list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
  9402. nvmet_xri_cmpl =
  9403. list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
  9404. }
  9405. while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
  9406. !nvmet_xri_cmpl) {
  9407. if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
  9408. if (!nvmet_xri_cmpl)
  9409. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9410. "6424 NVMET XRI exchange busy "
  9411. "wait time: %d seconds.\n",
  9412. wait_time/1000);
  9413. if (!nvme_xri_cmpl)
  9414. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9415. "6100 NVME XRI exchange busy "
  9416. "wait time: %d seconds.\n",
  9417. wait_time/1000);
  9418. if (!fcp_xri_cmpl)
  9419. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9420. "2877 FCP XRI exchange busy "
  9421. "wait time: %d seconds.\n",
  9422. wait_time/1000);
  9423. if (!els_xri_cmpl)
  9424. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9425. "2878 ELS XRI exchange busy "
  9426. "wait time: %d seconds.\n",
  9427. wait_time/1000);
  9428. msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
  9429. wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
  9430. } else {
  9431. msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
  9432. wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
  9433. }
  9434. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  9435. nvme_xri_cmpl = list_empty(
  9436. &phba->sli4_hba.lpfc_abts_nvme_buf_list);
  9437. nvmet_xri_cmpl = list_empty(
  9438. &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
  9439. }
  9440. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
  9441. fcp_xri_cmpl = list_empty(
  9442. &phba->sli4_hba.lpfc_abts_scsi_buf_list);
  9443. els_xri_cmpl =
  9444. list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
  9445. }
  9446. }
  9447. /**
  9448. * lpfc_sli4_hba_unset - Unset the fcoe hba
  9449. * @phba: Pointer to HBA context object.
  9450. *
  9451. * This function is called in the SLI4 code path to reset the HBA's FCoE
  9452. * function. The caller is not required to hold any lock. This routine
  9453. * issues PCI function reset mailbox command to reset the FCoE function.
  9454. * At the end of the function, it calls lpfc_hba_down_post function to
  9455. * free any pending commands.
  9456. **/
  9457. static void
  9458. lpfc_sli4_hba_unset(struct lpfc_hba *phba)
  9459. {
  9460. int wait_cnt = 0;
  9461. LPFC_MBOXQ_t *mboxq;
  9462. struct pci_dev *pdev = phba->pcidev;
  9463. lpfc_stop_hba_timers(phba);
  9464. phba->sli4_hba.intr_enable = 0;
  9465. /*
  9466. * Gracefully wait out the potential current outstanding asynchronous
  9467. * mailbox command.
  9468. */
  9469. /* First, block any pending async mailbox command from posted */
  9470. spin_lock_irq(&phba->hbalock);
  9471. phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
  9472. spin_unlock_irq(&phba->hbalock);
  9473. /* Now, trying to wait it out if we can */
  9474. while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
  9475. msleep(10);
  9476. if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
  9477. break;
  9478. }
  9479. /* Forcefully release the outstanding mailbox command if timed out */
  9480. if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
  9481. spin_lock_irq(&phba->hbalock);
  9482. mboxq = phba->sli.mbox_active;
  9483. mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
  9484. __lpfc_mbox_cmpl_put(phba, mboxq);
  9485. phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  9486. phba->sli.mbox_active = NULL;
  9487. spin_unlock_irq(&phba->hbalock);
  9488. }
  9489. /* Abort all iocbs associated with the hba */
  9490. lpfc_sli_hba_iocb_abort(phba);
  9491. /* Wait for completion of device XRI exchange busy */
  9492. lpfc_sli4_xri_exchange_busy_wait(phba);
  9493. /* Disable PCI subsystem interrupt */
  9494. lpfc_sli4_disable_intr(phba);
  9495. /* Disable SR-IOV if enabled */
  9496. if (phba->cfg_sriov_nr_virtfn)
  9497. pci_disable_sriov(pdev);
  9498. /* Stop kthread signal shall trigger work_done one more time */
  9499. kthread_stop(phba->worker_thread);
  9500. /* Unset the queues shared with the hardware then release all
  9501. * allocated resources.
  9502. */
  9503. lpfc_sli4_queue_unset(phba);
  9504. lpfc_sli4_queue_destroy(phba);
  9505. /* Reset SLI4 HBA FCoE function */
  9506. lpfc_pci_function_reset(phba);
  9507. /* Stop the SLI4 device port */
  9508. phba->pport->work_port_events = 0;
  9509. }
  9510. /**
  9511. * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
  9512. * @phba: Pointer to HBA context object.
  9513. * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
  9514. *
  9515. * This function is called in the SLI4 code path to read the port's
  9516. * sli4 capabilities.
  9517. *
  9518. * This function may be be called from any context that can block-wait
  9519. * for the completion. The expectation is that this routine is called
  9520. * typically from probe_one or from the online routine.
  9521. **/
  9522. int
  9523. lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
  9524. {
  9525. int rc;
  9526. struct lpfc_mqe *mqe = &mboxq->u.mqe;
  9527. struct lpfc_pc_sli4_params *sli4_params;
  9528. uint32_t mbox_tmo;
  9529. int length;
  9530. bool exp_wqcq_pages = true;
  9531. struct lpfc_sli4_parameters *mbx_sli4_parameters;
  9532. /*
  9533. * By default, the driver assumes the SLI4 port requires RPI
  9534. * header postings. The SLI4_PARAM response will correct this
  9535. * assumption.
  9536. */
  9537. phba->sli4_hba.rpi_hdrs_in_use = 1;
  9538. /* Read the port's SLI4 Config Parameters */
  9539. length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
  9540. sizeof(struct lpfc_sli4_cfg_mhdr));
  9541. lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
  9542. LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
  9543. length, LPFC_SLI4_MBX_EMBED);
  9544. if (!phba->sli4_hba.intr_enable)
  9545. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  9546. else {
  9547. mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
  9548. rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
  9549. }
  9550. if (unlikely(rc))
  9551. return rc;
  9552. sli4_params = &phba->sli4_hba.pc_sli4_params;
  9553. mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
  9554. sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
  9555. sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
  9556. sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
  9557. sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
  9558. mbx_sli4_parameters);
  9559. sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
  9560. mbx_sli4_parameters);
  9561. if (bf_get(cfg_phwq, mbx_sli4_parameters))
  9562. phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
  9563. else
  9564. phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
  9565. sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
  9566. sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
  9567. mbx_sli4_parameters);
  9568. sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
  9569. sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
  9570. sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
  9571. sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
  9572. sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
  9573. sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
  9574. sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
  9575. sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
  9576. sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
  9577. sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
  9578. mbx_sli4_parameters);
  9579. sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
  9580. sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
  9581. mbx_sli4_parameters);
  9582. phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
  9583. phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
  9584. phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
  9585. bf_get(cfg_xib, mbx_sli4_parameters));
  9586. if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
  9587. !phba->nvme_support) {
  9588. phba->nvme_support = 0;
  9589. phba->nvmet_support = 0;
  9590. phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
  9591. phba->cfg_nvme_io_channel = 0;
  9592. phba->io_channel_irqs = phba->cfg_fcp_io_channel;
  9593. lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
  9594. "6101 Disabling NVME support: "
  9595. "Not supported by firmware: %d %d\n",
  9596. bf_get(cfg_nvme, mbx_sli4_parameters),
  9597. bf_get(cfg_xib, mbx_sli4_parameters));
  9598. /* If firmware doesn't support NVME, just use SCSI support */
  9599. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
  9600. return -ENODEV;
  9601. phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
  9602. }
  9603. /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
  9604. if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
  9605. LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
  9606. phba->cfg_enable_pbde = 0;
  9607. /*
  9608. * To support Suppress Response feature we must satisfy 3 conditions.
  9609. * lpfc_suppress_rsp module parameter must be set (default).
  9610. * In SLI4-Parameters Descriptor:
  9611. * Extended Inline Buffers (XIB) must be supported.
  9612. * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
  9613. * (double negative).
  9614. */
  9615. if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
  9616. !(bf_get(cfg_nosr, mbx_sli4_parameters)))
  9617. phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
  9618. else
  9619. phba->cfg_suppress_rsp = 0;
  9620. if (bf_get(cfg_eqdr, mbx_sli4_parameters))
  9621. phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
  9622. /* Make sure that sge_supp_len can be handled by the driver */
  9623. if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
  9624. sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
  9625. /*
  9626. * Check whether the adapter supports an embedded copy of the
  9627. * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
  9628. * to use this option, 128-byte WQEs must be used.
  9629. */
  9630. if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
  9631. phba->fcp_embed_io = 1;
  9632. else
  9633. phba->fcp_embed_io = 0;
  9634. lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
  9635. "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
  9636. bf_get(cfg_xib, mbx_sli4_parameters),
  9637. phba->cfg_enable_pbde,
  9638. phba->fcp_embed_io, phba->nvme_support,
  9639. phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
  9640. if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
  9641. LPFC_SLI_INTF_IF_TYPE_2) &&
  9642. (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
  9643. LPFC_SLI_INTF_FAMILY_LNCR_A0))
  9644. exp_wqcq_pages = false;
  9645. if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
  9646. (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
  9647. exp_wqcq_pages &&
  9648. (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
  9649. phba->enab_exp_wqcq_pages = 1;
  9650. else
  9651. phba->enab_exp_wqcq_pages = 0;
  9652. /*
  9653. * Check if the SLI port supports MDS Diagnostics
  9654. */
  9655. if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
  9656. phba->mds_diags_support = 1;
  9657. else
  9658. phba->mds_diags_support = 0;
  9659. return 0;
  9660. }
  9661. /**
  9662. * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
  9663. * @pdev: pointer to PCI device
  9664. * @pid: pointer to PCI device identifier
  9665. *
  9666. * This routine is to be called to attach a device with SLI-3 interface spec
  9667. * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
  9668. * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
  9669. * information of the device and driver to see if the driver state that it can
  9670. * support this kind of device. If the match is successful, the driver core
  9671. * invokes this routine. If this routine determines it can claim the HBA, it
  9672. * does all the initialization that it needs to do to handle the HBA properly.
  9673. *
  9674. * Return code
  9675. * 0 - driver can claim the device
  9676. * negative value - driver can not claim the device
  9677. **/
  9678. static int
  9679. lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
  9680. {
  9681. struct lpfc_hba *phba;
  9682. struct lpfc_vport *vport = NULL;
  9683. struct Scsi_Host *shost = NULL;
  9684. int error;
  9685. uint32_t cfg_mode, intr_mode;
  9686. /* Allocate memory for HBA structure */
  9687. phba = lpfc_hba_alloc(pdev);
  9688. if (!phba)
  9689. return -ENOMEM;
  9690. /* Perform generic PCI device enabling operation */
  9691. error = lpfc_enable_pci_dev(phba);
  9692. if (error)
  9693. goto out_free_phba;
  9694. /* Set up SLI API function jump table for PCI-device group-0 HBAs */
  9695. error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
  9696. if (error)
  9697. goto out_disable_pci_dev;
  9698. /* Set up SLI-3 specific device PCI memory space */
  9699. error = lpfc_sli_pci_mem_setup(phba);
  9700. if (error) {
  9701. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9702. "1402 Failed to set up pci memory space.\n");
  9703. goto out_disable_pci_dev;
  9704. }
  9705. /* Set up SLI-3 specific device driver resources */
  9706. error = lpfc_sli_driver_resource_setup(phba);
  9707. if (error) {
  9708. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9709. "1404 Failed to set up driver resource.\n");
  9710. goto out_unset_pci_mem_s3;
  9711. }
  9712. /* Initialize and populate the iocb list per host */
  9713. error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
  9714. if (error) {
  9715. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9716. "1405 Failed to initialize iocb list.\n");
  9717. goto out_unset_driver_resource_s3;
  9718. }
  9719. /* Set up common device driver resources */
  9720. error = lpfc_setup_driver_resource_phase2(phba);
  9721. if (error) {
  9722. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9723. "1406 Failed to set up driver resource.\n");
  9724. goto out_free_iocb_list;
  9725. }
  9726. /* Get the default values for Model Name and Description */
  9727. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  9728. /* Create SCSI host to the physical port */
  9729. error = lpfc_create_shost(phba);
  9730. if (error) {
  9731. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9732. "1407 Failed to create scsi host.\n");
  9733. goto out_unset_driver_resource;
  9734. }
  9735. /* Configure sysfs attributes */
  9736. vport = phba->pport;
  9737. error = lpfc_alloc_sysfs_attr(vport);
  9738. if (error) {
  9739. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9740. "1476 Failed to allocate sysfs attr\n");
  9741. goto out_destroy_shost;
  9742. }
  9743. shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
  9744. /* Now, trying to enable interrupt and bring up the device */
  9745. cfg_mode = phba->cfg_use_msi;
  9746. while (true) {
  9747. /* Put device to a known state before enabling interrupt */
  9748. lpfc_stop_port(phba);
  9749. /* Configure and enable interrupt */
  9750. intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
  9751. if (intr_mode == LPFC_INTR_ERROR) {
  9752. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9753. "0431 Failed to enable interrupt.\n");
  9754. error = -ENODEV;
  9755. goto out_free_sysfs_attr;
  9756. }
  9757. /* SLI-3 HBA setup */
  9758. if (lpfc_sli_hba_setup(phba)) {
  9759. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9760. "1477 Failed to set up hba\n");
  9761. error = -ENODEV;
  9762. goto out_remove_device;
  9763. }
  9764. /* Wait 50ms for the interrupts of previous mailbox commands */
  9765. msleep(50);
  9766. /* Check active interrupts on message signaled interrupts */
  9767. if (intr_mode == 0 ||
  9768. phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
  9769. /* Log the current active interrupt mode */
  9770. phba->intr_mode = intr_mode;
  9771. lpfc_log_intr_mode(phba, intr_mode);
  9772. break;
  9773. } else {
  9774. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  9775. "0447 Configure interrupt mode (%d) "
  9776. "failed active interrupt test.\n",
  9777. intr_mode);
  9778. /* Disable the current interrupt mode */
  9779. lpfc_sli_disable_intr(phba);
  9780. /* Try next level of interrupt mode */
  9781. cfg_mode = --intr_mode;
  9782. }
  9783. }
  9784. /* Perform post initialization setup */
  9785. lpfc_post_init_setup(phba);
  9786. /* Check if there are static vports to be created. */
  9787. lpfc_create_static_vport(phba);
  9788. return 0;
  9789. out_remove_device:
  9790. lpfc_unset_hba(phba);
  9791. out_free_sysfs_attr:
  9792. lpfc_free_sysfs_attr(vport);
  9793. out_destroy_shost:
  9794. lpfc_destroy_shost(phba);
  9795. out_unset_driver_resource:
  9796. lpfc_unset_driver_resource_phase2(phba);
  9797. out_free_iocb_list:
  9798. lpfc_free_iocb_list(phba);
  9799. out_unset_driver_resource_s3:
  9800. lpfc_sli_driver_resource_unset(phba);
  9801. out_unset_pci_mem_s3:
  9802. lpfc_sli_pci_mem_unset(phba);
  9803. out_disable_pci_dev:
  9804. lpfc_disable_pci_dev(phba);
  9805. if (shost)
  9806. scsi_host_put(shost);
  9807. out_free_phba:
  9808. lpfc_hba_free(phba);
  9809. return error;
  9810. }
  9811. /**
  9812. * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
  9813. * @pdev: pointer to PCI device
  9814. *
  9815. * This routine is to be called to disattach a device with SLI-3 interface
  9816. * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
  9817. * removed from PCI bus, it performs all the necessary cleanup for the HBA
  9818. * device to be removed from the PCI subsystem properly.
  9819. **/
  9820. static void
  9821. lpfc_pci_remove_one_s3(struct pci_dev *pdev)
  9822. {
  9823. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9824. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  9825. struct lpfc_vport **vports;
  9826. struct lpfc_hba *phba = vport->phba;
  9827. int i;
  9828. spin_lock_irq(&phba->hbalock);
  9829. vport->load_flag |= FC_UNLOADING;
  9830. spin_unlock_irq(&phba->hbalock);
  9831. lpfc_free_sysfs_attr(vport);
  9832. /* Release all the vports against this physical port */
  9833. vports = lpfc_create_vport_work_array(phba);
  9834. if (vports != NULL)
  9835. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  9836. if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
  9837. continue;
  9838. fc_vport_terminate(vports[i]->fc_vport);
  9839. }
  9840. lpfc_destroy_vport_work_array(phba, vports);
  9841. /* Remove FC host and then SCSI host with the physical port */
  9842. fc_remove_host(shost);
  9843. scsi_remove_host(shost);
  9844. lpfc_cleanup(vport);
  9845. /*
  9846. * Bring down the SLI Layer. This step disable all interrupts,
  9847. * clears the rings, discards all mailbox commands, and resets
  9848. * the HBA.
  9849. */
  9850. /* HBA interrupt will be disabled after this call */
  9851. lpfc_sli_hba_down(phba);
  9852. /* Stop kthread signal shall trigger work_done one more time */
  9853. kthread_stop(phba->worker_thread);
  9854. /* Final cleanup of txcmplq and reset the HBA */
  9855. lpfc_sli_brdrestart(phba);
  9856. kfree(phba->vpi_bmask);
  9857. kfree(phba->vpi_ids);
  9858. lpfc_stop_hba_timers(phba);
  9859. spin_lock_irq(&phba->hbalock);
  9860. list_del_init(&vport->listentry);
  9861. spin_unlock_irq(&phba->hbalock);
  9862. lpfc_debugfs_terminate(vport);
  9863. /* Disable SR-IOV if enabled */
  9864. if (phba->cfg_sriov_nr_virtfn)
  9865. pci_disable_sriov(pdev);
  9866. /* Disable interrupt */
  9867. lpfc_sli_disable_intr(phba);
  9868. scsi_host_put(shost);
  9869. /*
  9870. * Call scsi_free before mem_free since scsi bufs are released to their
  9871. * corresponding pools here.
  9872. */
  9873. lpfc_scsi_free(phba);
  9874. lpfc_mem_free_all(phba);
  9875. dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
  9876. phba->hbqslimp.virt, phba->hbqslimp.phys);
  9877. /* Free resources associated with SLI2 interface */
  9878. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  9879. phba->slim2p.virt, phba->slim2p.phys);
  9880. /* unmap adapter SLIM and Control Registers */
  9881. iounmap(phba->ctrl_regs_memmap_p);
  9882. iounmap(phba->slim_memmap_p);
  9883. lpfc_hba_free(phba);
  9884. pci_release_mem_regions(pdev);
  9885. pci_disable_device(pdev);
  9886. }
  9887. /**
  9888. * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
  9889. * @pdev: pointer to PCI device
  9890. * @msg: power management message
  9891. *
  9892. * This routine is to be called from the kernel's PCI subsystem to support
  9893. * system Power Management (PM) to device with SLI-3 interface spec. When
  9894. * PM invokes this method, it quiesces the device by stopping the driver's
  9895. * worker thread for the device, turning off device's interrupt and DMA,
  9896. * and bring the device offline. Note that as the driver implements the
  9897. * minimum PM requirements to a power-aware driver's PM support for the
  9898. * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
  9899. * to the suspend() method call will be treated as SUSPEND and the driver will
  9900. * fully reinitialize its device during resume() method call, the driver will
  9901. * set device to PCI_D3hot state in PCI config space instead of setting it
  9902. * according to the @msg provided by the PM.
  9903. *
  9904. * Return code
  9905. * 0 - driver suspended the device
  9906. * Error otherwise
  9907. **/
  9908. static int
  9909. lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
  9910. {
  9911. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9912. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  9913. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  9914. "0473 PCI device Power Management suspend.\n");
  9915. /* Bring down the device */
  9916. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  9917. lpfc_offline(phba);
  9918. kthread_stop(phba->worker_thread);
  9919. /* Disable interrupt from device */
  9920. lpfc_sli_disable_intr(phba);
  9921. /* Save device state to PCI config space */
  9922. pci_save_state(pdev);
  9923. pci_set_power_state(pdev, PCI_D3hot);
  9924. return 0;
  9925. }
  9926. /**
  9927. * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
  9928. * @pdev: pointer to PCI device
  9929. *
  9930. * This routine is to be called from the kernel's PCI subsystem to support
  9931. * system Power Management (PM) to device with SLI-3 interface spec. When PM
  9932. * invokes this method, it restores the device's PCI config space state and
  9933. * fully reinitializes the device and brings it online. Note that as the
  9934. * driver implements the minimum PM requirements to a power-aware driver's
  9935. * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
  9936. * FREEZE) to the suspend() method call will be treated as SUSPEND and the
  9937. * driver will fully reinitialize its device during resume() method call,
  9938. * the device will be set to PCI_D0 directly in PCI config space before
  9939. * restoring the state.
  9940. *
  9941. * Return code
  9942. * 0 - driver suspended the device
  9943. * Error otherwise
  9944. **/
  9945. static int
  9946. lpfc_pci_resume_one_s3(struct pci_dev *pdev)
  9947. {
  9948. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9949. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  9950. uint32_t intr_mode;
  9951. int error;
  9952. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  9953. "0452 PCI device Power Management resume.\n");
  9954. /* Restore device state from PCI config space */
  9955. pci_set_power_state(pdev, PCI_D0);
  9956. pci_restore_state(pdev);
  9957. /*
  9958. * As the new kernel behavior of pci_restore_state() API call clears
  9959. * device saved_state flag, need to save the restored state again.
  9960. */
  9961. pci_save_state(pdev);
  9962. if (pdev->is_busmaster)
  9963. pci_set_master(pdev);
  9964. /* Startup the kernel thread for this host adapter. */
  9965. phba->worker_thread = kthread_run(lpfc_do_work, phba,
  9966. "lpfc_worker_%d", phba->brd_no);
  9967. if (IS_ERR(phba->worker_thread)) {
  9968. error = PTR_ERR(phba->worker_thread);
  9969. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9970. "0434 PM resume failed to start worker "
  9971. "thread: error=x%x.\n", error);
  9972. return error;
  9973. }
  9974. /* Configure and enable interrupt */
  9975. intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
  9976. if (intr_mode == LPFC_INTR_ERROR) {
  9977. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9978. "0430 PM resume Failed to enable interrupt\n");
  9979. return -EIO;
  9980. } else
  9981. phba->intr_mode = intr_mode;
  9982. /* Restart HBA and bring it online */
  9983. lpfc_sli_brdrestart(phba);
  9984. lpfc_online(phba);
  9985. /* Log the current active interrupt mode */
  9986. lpfc_log_intr_mode(phba, phba->intr_mode);
  9987. return 0;
  9988. }
  9989. /**
  9990. * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
  9991. * @phba: pointer to lpfc hba data structure.
  9992. *
  9993. * This routine is called to prepare the SLI3 device for PCI slot recover. It
  9994. * aborts all the outstanding SCSI I/Os to the pci device.
  9995. **/
  9996. static void
  9997. lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
  9998. {
  9999. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10000. "2723 PCI channel I/O abort preparing for recovery\n");
  10001. /*
  10002. * There may be errored I/Os through HBA, abort all I/Os on txcmplq
  10003. * and let the SCSI mid-layer to retry them to recover.
  10004. */
  10005. lpfc_sli_abort_fcp_rings(phba);
  10006. }
  10007. /**
  10008. * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
  10009. * @phba: pointer to lpfc hba data structure.
  10010. *
  10011. * This routine is called to prepare the SLI3 device for PCI slot reset. It
  10012. * disables the device interrupt and pci device, and aborts the internal FCP
  10013. * pending I/Os.
  10014. **/
  10015. static void
  10016. lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
  10017. {
  10018. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10019. "2710 PCI channel disable preparing for reset\n");
  10020. /* Block any management I/Os to the device */
  10021. lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
  10022. /* Block all SCSI devices' I/Os on the host */
  10023. lpfc_scsi_dev_block(phba);
  10024. /* Flush all driver's outstanding SCSI I/Os as we are to reset */
  10025. lpfc_sli_flush_fcp_rings(phba);
  10026. /* stop all timers */
  10027. lpfc_stop_hba_timers(phba);
  10028. /* Disable interrupt and pci device */
  10029. lpfc_sli_disable_intr(phba);
  10030. pci_disable_device(phba->pcidev);
  10031. }
  10032. /**
  10033. * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
  10034. * @phba: pointer to lpfc hba data structure.
  10035. *
  10036. * This routine is called to prepare the SLI3 device for PCI slot permanently
  10037. * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
  10038. * pending I/Os.
  10039. **/
  10040. static void
  10041. lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
  10042. {
  10043. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10044. "2711 PCI channel permanent disable for failure\n");
  10045. /* Block all SCSI devices' I/Os on the host */
  10046. lpfc_scsi_dev_block(phba);
  10047. /* stop all timers */
  10048. lpfc_stop_hba_timers(phba);
  10049. /* Clean up all driver's outstanding SCSI I/Os */
  10050. lpfc_sli_flush_fcp_rings(phba);
  10051. }
  10052. /**
  10053. * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
  10054. * @pdev: pointer to PCI device.
  10055. * @state: the current PCI connection state.
  10056. *
  10057. * This routine is called from the PCI subsystem for I/O error handling to
  10058. * device with SLI-3 interface spec. This function is called by the PCI
  10059. * subsystem after a PCI bus error affecting this device has been detected.
  10060. * When this function is invoked, it will need to stop all the I/Os and
  10061. * interrupt(s) to the device. Once that is done, it will return
  10062. * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
  10063. * as desired.
  10064. *
  10065. * Return codes
  10066. * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
  10067. * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
  10068. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  10069. **/
  10070. static pci_ers_result_t
  10071. lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
  10072. {
  10073. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10074. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10075. switch (state) {
  10076. case pci_channel_io_normal:
  10077. /* Non-fatal error, prepare for recovery */
  10078. lpfc_sli_prep_dev_for_recover(phba);
  10079. return PCI_ERS_RESULT_CAN_RECOVER;
  10080. case pci_channel_io_frozen:
  10081. /* Fatal error, prepare for slot reset */
  10082. lpfc_sli_prep_dev_for_reset(phba);
  10083. return PCI_ERS_RESULT_NEED_RESET;
  10084. case pci_channel_io_perm_failure:
  10085. /* Permanent failure, prepare for device down */
  10086. lpfc_sli_prep_dev_for_perm_failure(phba);
  10087. return PCI_ERS_RESULT_DISCONNECT;
  10088. default:
  10089. /* Unknown state, prepare and request slot reset */
  10090. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10091. "0472 Unknown PCI error state: x%x\n", state);
  10092. lpfc_sli_prep_dev_for_reset(phba);
  10093. return PCI_ERS_RESULT_NEED_RESET;
  10094. }
  10095. }
  10096. /**
  10097. * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
  10098. * @pdev: pointer to PCI device.
  10099. *
  10100. * This routine is called from the PCI subsystem for error handling to
  10101. * device with SLI-3 interface spec. This is called after PCI bus has been
  10102. * reset to restart the PCI card from scratch, as if from a cold-boot.
  10103. * During the PCI subsystem error recovery, after driver returns
  10104. * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
  10105. * recovery and then call this routine before calling the .resume method
  10106. * to recover the device. This function will initialize the HBA device,
  10107. * enable the interrupt, but it will just put the HBA to offline state
  10108. * without passing any I/O traffic.
  10109. *
  10110. * Return codes
  10111. * PCI_ERS_RESULT_RECOVERED - the device has been recovered
  10112. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  10113. */
  10114. static pci_ers_result_t
  10115. lpfc_io_slot_reset_s3(struct pci_dev *pdev)
  10116. {
  10117. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10118. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10119. struct lpfc_sli *psli = &phba->sli;
  10120. uint32_t intr_mode;
  10121. dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
  10122. if (pci_enable_device_mem(pdev)) {
  10123. printk(KERN_ERR "lpfc: Cannot re-enable "
  10124. "PCI device after reset.\n");
  10125. return PCI_ERS_RESULT_DISCONNECT;
  10126. }
  10127. pci_restore_state(pdev);
  10128. /*
  10129. * As the new kernel behavior of pci_restore_state() API call clears
  10130. * device saved_state flag, need to save the restored state again.
  10131. */
  10132. pci_save_state(pdev);
  10133. if (pdev->is_busmaster)
  10134. pci_set_master(pdev);
  10135. spin_lock_irq(&phba->hbalock);
  10136. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  10137. spin_unlock_irq(&phba->hbalock);
  10138. /* Configure and enable interrupt */
  10139. intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
  10140. if (intr_mode == LPFC_INTR_ERROR) {
  10141. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10142. "0427 Cannot re-enable interrupt after "
  10143. "slot reset.\n");
  10144. return PCI_ERS_RESULT_DISCONNECT;
  10145. } else
  10146. phba->intr_mode = intr_mode;
  10147. /* Take device offline, it will perform cleanup */
  10148. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  10149. lpfc_offline(phba);
  10150. lpfc_sli_brdrestart(phba);
  10151. /* Log the current active interrupt mode */
  10152. lpfc_log_intr_mode(phba, phba->intr_mode);
  10153. return PCI_ERS_RESULT_RECOVERED;
  10154. }
  10155. /**
  10156. * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
  10157. * @pdev: pointer to PCI device
  10158. *
  10159. * This routine is called from the PCI subsystem for error handling to device
  10160. * with SLI-3 interface spec. It is called when kernel error recovery tells
  10161. * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
  10162. * error recovery. After this call, traffic can start to flow from this device
  10163. * again.
  10164. */
  10165. static void
  10166. lpfc_io_resume_s3(struct pci_dev *pdev)
  10167. {
  10168. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10169. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10170. /* Bring device online, it will be no-op for non-fatal error resume */
  10171. lpfc_online(phba);
  10172. /* Clean up Advanced Error Reporting (AER) if needed */
  10173. if (phba->hba_flag & HBA_AER_ENABLED)
  10174. pci_cleanup_aer_uncorrect_error_status(pdev);
  10175. }
  10176. /**
  10177. * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
  10178. * @phba: pointer to lpfc hba data structure.
  10179. *
  10180. * returns the number of ELS/CT IOCBs to reserve
  10181. **/
  10182. int
  10183. lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
  10184. {
  10185. int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
  10186. if (phba->sli_rev == LPFC_SLI_REV4) {
  10187. if (max_xri <= 100)
  10188. return 10;
  10189. else if (max_xri <= 256)
  10190. return 25;
  10191. else if (max_xri <= 512)
  10192. return 50;
  10193. else if (max_xri <= 1024)
  10194. return 100;
  10195. else if (max_xri <= 1536)
  10196. return 150;
  10197. else if (max_xri <= 2048)
  10198. return 200;
  10199. else
  10200. return 250;
  10201. } else
  10202. return 0;
  10203. }
  10204. /**
  10205. * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
  10206. * @phba: pointer to lpfc hba data structure.
  10207. *
  10208. * returns the number of ELS/CT + NVMET IOCBs to reserve
  10209. **/
  10210. int
  10211. lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
  10212. {
  10213. int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
  10214. if (phba->nvmet_support)
  10215. max_xri += LPFC_NVMET_BUF_POST;
  10216. return max_xri;
  10217. }
  10218. static void
  10219. lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
  10220. uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
  10221. const struct firmware *fw)
  10222. {
  10223. if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
  10224. (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
  10225. magic_number != MAGIC_NUMER_G6) ||
  10226. (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
  10227. magic_number != MAGIC_NUMER_G7))
  10228. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10229. "3030 This firmware version is not supported on "
  10230. "this HBA model. Device:%x Magic:%x Type:%x "
  10231. "ID:%x Size %d %zd\n",
  10232. phba->pcidev->device, magic_number, ftype, fid,
  10233. fsize, fw->size);
  10234. else
  10235. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10236. "3022 FW Download failed. Device:%x Magic:%x Type:%x "
  10237. "ID:%x Size %d %zd\n",
  10238. phba->pcidev->device, magic_number, ftype, fid,
  10239. fsize, fw->size);
  10240. }
  10241. /**
  10242. * lpfc_write_firmware - attempt to write a firmware image to the port
  10243. * @fw: pointer to firmware image returned from request_firmware.
  10244. * @phba: pointer to lpfc hba data structure.
  10245. *
  10246. **/
  10247. static void
  10248. lpfc_write_firmware(const struct firmware *fw, void *context)
  10249. {
  10250. struct lpfc_hba *phba = (struct lpfc_hba *)context;
  10251. char fwrev[FW_REV_STR_SIZE];
  10252. struct lpfc_grp_hdr *image;
  10253. struct list_head dma_buffer_list;
  10254. int i, rc = 0;
  10255. struct lpfc_dmabuf *dmabuf, *next;
  10256. uint32_t offset = 0, temp_offset = 0;
  10257. uint32_t magic_number, ftype, fid, fsize;
  10258. /* It can be null in no-wait mode, sanity check */
  10259. if (!fw) {
  10260. rc = -ENXIO;
  10261. goto out;
  10262. }
  10263. image = (struct lpfc_grp_hdr *)fw->data;
  10264. magic_number = be32_to_cpu(image->magic_number);
  10265. ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
  10266. fid = bf_get_be32(lpfc_grp_hdr_id, image);
  10267. fsize = be32_to_cpu(image->size);
  10268. INIT_LIST_HEAD(&dma_buffer_list);
  10269. lpfc_decode_firmware_rev(phba, fwrev, 1);
  10270. if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
  10271. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10272. "3023 Updating Firmware, Current Version:%s "
  10273. "New Version:%s\n",
  10274. fwrev, image->revision);
  10275. for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
  10276. dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
  10277. GFP_KERNEL);
  10278. if (!dmabuf) {
  10279. rc = -ENOMEM;
  10280. goto release_out;
  10281. }
  10282. dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
  10283. SLI4_PAGE_SIZE,
  10284. &dmabuf->phys,
  10285. GFP_KERNEL);
  10286. if (!dmabuf->virt) {
  10287. kfree(dmabuf);
  10288. rc = -ENOMEM;
  10289. goto release_out;
  10290. }
  10291. list_add_tail(&dmabuf->list, &dma_buffer_list);
  10292. }
  10293. while (offset < fw->size) {
  10294. temp_offset = offset;
  10295. list_for_each_entry(dmabuf, &dma_buffer_list, list) {
  10296. if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
  10297. memcpy(dmabuf->virt,
  10298. fw->data + temp_offset,
  10299. fw->size - temp_offset);
  10300. temp_offset = fw->size;
  10301. break;
  10302. }
  10303. memcpy(dmabuf->virt, fw->data + temp_offset,
  10304. SLI4_PAGE_SIZE);
  10305. temp_offset += SLI4_PAGE_SIZE;
  10306. }
  10307. rc = lpfc_wr_object(phba, &dma_buffer_list,
  10308. (fw->size - offset), &offset);
  10309. if (rc) {
  10310. lpfc_log_write_firmware_error(phba, offset,
  10311. magic_number, ftype, fid, fsize, fw);
  10312. goto release_out;
  10313. }
  10314. }
  10315. rc = offset;
  10316. } else
  10317. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10318. "3029 Skipped Firmware update, Current "
  10319. "Version:%s New Version:%s\n",
  10320. fwrev, image->revision);
  10321. release_out:
  10322. list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
  10323. list_del(&dmabuf->list);
  10324. dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
  10325. dmabuf->virt, dmabuf->phys);
  10326. kfree(dmabuf);
  10327. }
  10328. release_firmware(fw);
  10329. out:
  10330. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10331. "3024 Firmware update done: %d.\n", rc);
  10332. return;
  10333. }
  10334. /**
  10335. * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
  10336. * @phba: pointer to lpfc hba data structure.
  10337. *
  10338. * This routine is called to perform Linux generic firmware upgrade on device
  10339. * that supports such feature.
  10340. **/
  10341. int
  10342. lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
  10343. {
  10344. uint8_t file_name[ELX_MODEL_NAME_SIZE];
  10345. int ret;
  10346. const struct firmware *fw;
  10347. /* Only supported on SLI4 interface type 2 for now */
  10348. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
  10349. LPFC_SLI_INTF_IF_TYPE_2)
  10350. return -EPERM;
  10351. snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
  10352. if (fw_upgrade == INT_FW_UPGRADE) {
  10353. ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
  10354. file_name, &phba->pcidev->dev,
  10355. GFP_KERNEL, (void *)phba,
  10356. lpfc_write_firmware);
  10357. } else if (fw_upgrade == RUN_FW_UPGRADE) {
  10358. ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
  10359. if (!ret)
  10360. lpfc_write_firmware(fw, (void *)phba);
  10361. } else {
  10362. ret = -EINVAL;
  10363. }
  10364. return ret;
  10365. }
  10366. /**
  10367. * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
  10368. * @pdev: pointer to PCI device
  10369. * @pid: pointer to PCI device identifier
  10370. *
  10371. * This routine is called from the kernel's PCI subsystem to device with
  10372. * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
  10373. * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
  10374. * information of the device and driver to see if the driver state that it
  10375. * can support this kind of device. If the match is successful, the driver
  10376. * core invokes this routine. If this routine determines it can claim the HBA,
  10377. * it does all the initialization that it needs to do to handle the HBA
  10378. * properly.
  10379. *
  10380. * Return code
  10381. * 0 - driver can claim the device
  10382. * negative value - driver can not claim the device
  10383. **/
  10384. static int
  10385. lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
  10386. {
  10387. struct lpfc_hba *phba;
  10388. struct lpfc_vport *vport = NULL;
  10389. struct Scsi_Host *shost = NULL;
  10390. int error;
  10391. uint32_t cfg_mode, intr_mode;
  10392. /* Allocate memory for HBA structure */
  10393. phba = lpfc_hba_alloc(pdev);
  10394. if (!phba)
  10395. return -ENOMEM;
  10396. /* Perform generic PCI device enabling operation */
  10397. error = lpfc_enable_pci_dev(phba);
  10398. if (error)
  10399. goto out_free_phba;
  10400. /* Set up SLI API function jump table for PCI-device group-1 HBAs */
  10401. error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
  10402. if (error)
  10403. goto out_disable_pci_dev;
  10404. /* Set up SLI-4 specific device PCI memory space */
  10405. error = lpfc_sli4_pci_mem_setup(phba);
  10406. if (error) {
  10407. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10408. "1410 Failed to set up pci memory space.\n");
  10409. goto out_disable_pci_dev;
  10410. }
  10411. /* Set up SLI-4 Specific device driver resources */
  10412. error = lpfc_sli4_driver_resource_setup(phba);
  10413. if (error) {
  10414. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10415. "1412 Failed to set up driver resource.\n");
  10416. goto out_unset_pci_mem_s4;
  10417. }
  10418. INIT_LIST_HEAD(&phba->active_rrq_list);
  10419. INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
  10420. /* Set up common device driver resources */
  10421. error = lpfc_setup_driver_resource_phase2(phba);
  10422. if (error) {
  10423. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10424. "1414 Failed to set up driver resource.\n");
  10425. goto out_unset_driver_resource_s4;
  10426. }
  10427. /* Get the default values for Model Name and Description */
  10428. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  10429. /* Create SCSI host to the physical port */
  10430. error = lpfc_create_shost(phba);
  10431. if (error) {
  10432. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10433. "1415 Failed to create scsi host.\n");
  10434. goto out_unset_driver_resource;
  10435. }
  10436. /* Configure sysfs attributes */
  10437. vport = phba->pport;
  10438. error = lpfc_alloc_sysfs_attr(vport);
  10439. if (error) {
  10440. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10441. "1416 Failed to allocate sysfs attr\n");
  10442. goto out_destroy_shost;
  10443. }
  10444. shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
  10445. /* Now, trying to enable interrupt and bring up the device */
  10446. cfg_mode = phba->cfg_use_msi;
  10447. /* Put device to a known state before enabling interrupt */
  10448. lpfc_stop_port(phba);
  10449. /* Configure and enable interrupt */
  10450. intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
  10451. if (intr_mode == LPFC_INTR_ERROR) {
  10452. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10453. "0426 Failed to enable interrupt.\n");
  10454. error = -ENODEV;
  10455. goto out_free_sysfs_attr;
  10456. }
  10457. /* Default to single EQ for non-MSI-X */
  10458. if (phba->intr_type != MSIX) {
  10459. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
  10460. phba->cfg_fcp_io_channel = 1;
  10461. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  10462. phba->cfg_nvme_io_channel = 1;
  10463. if (phba->nvmet_support)
  10464. phba->cfg_nvmet_mrq = 1;
  10465. }
  10466. phba->io_channel_irqs = 1;
  10467. }
  10468. /* Set up SLI-4 HBA */
  10469. if (lpfc_sli4_hba_setup(phba)) {
  10470. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10471. "1421 Failed to set up hba\n");
  10472. error = -ENODEV;
  10473. goto out_disable_intr;
  10474. }
  10475. /* Log the current active interrupt mode */
  10476. phba->intr_mode = intr_mode;
  10477. lpfc_log_intr_mode(phba, intr_mode);
  10478. /* Perform post initialization setup */
  10479. lpfc_post_init_setup(phba);
  10480. /* NVME support in FW earlier in the driver load corrects the
  10481. * FC4 type making a check for nvme_support unnecessary.
  10482. */
  10483. if ((phba->nvmet_support == 0) &&
  10484. (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
  10485. /* Create NVME binding with nvme_fc_transport. This
  10486. * ensures the vport is initialized. If the localport
  10487. * create fails, it should not unload the driver to
  10488. * support field issues.
  10489. */
  10490. error = lpfc_nvme_create_localport(vport);
  10491. if (error) {
  10492. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10493. "6004 NVME registration failed, "
  10494. "error x%x\n",
  10495. error);
  10496. }
  10497. }
  10498. /* check for firmware upgrade or downgrade */
  10499. if (phba->cfg_request_firmware_upgrade)
  10500. lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
  10501. /* Check if there are static vports to be created. */
  10502. lpfc_create_static_vport(phba);
  10503. return 0;
  10504. out_disable_intr:
  10505. lpfc_sli4_disable_intr(phba);
  10506. out_free_sysfs_attr:
  10507. lpfc_free_sysfs_attr(vport);
  10508. out_destroy_shost:
  10509. lpfc_destroy_shost(phba);
  10510. out_unset_driver_resource:
  10511. lpfc_unset_driver_resource_phase2(phba);
  10512. out_unset_driver_resource_s4:
  10513. lpfc_sli4_driver_resource_unset(phba);
  10514. out_unset_pci_mem_s4:
  10515. lpfc_sli4_pci_mem_unset(phba);
  10516. out_disable_pci_dev:
  10517. lpfc_disable_pci_dev(phba);
  10518. if (shost)
  10519. scsi_host_put(shost);
  10520. out_free_phba:
  10521. lpfc_hba_free(phba);
  10522. return error;
  10523. }
  10524. /**
  10525. * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
  10526. * @pdev: pointer to PCI device
  10527. *
  10528. * This routine is called from the kernel's PCI subsystem to device with
  10529. * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
  10530. * removed from PCI bus, it performs all the necessary cleanup for the HBA
  10531. * device to be removed from the PCI subsystem properly.
  10532. **/
  10533. static void
  10534. lpfc_pci_remove_one_s4(struct pci_dev *pdev)
  10535. {
  10536. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10537. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  10538. struct lpfc_vport **vports;
  10539. struct lpfc_hba *phba = vport->phba;
  10540. int i;
  10541. /* Mark the device unloading flag */
  10542. spin_lock_irq(&phba->hbalock);
  10543. vport->load_flag |= FC_UNLOADING;
  10544. spin_unlock_irq(&phba->hbalock);
  10545. /* Free the HBA sysfs attributes */
  10546. lpfc_free_sysfs_attr(vport);
  10547. /* Release all the vports against this physical port */
  10548. vports = lpfc_create_vport_work_array(phba);
  10549. if (vports != NULL)
  10550. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  10551. if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
  10552. continue;
  10553. fc_vport_terminate(vports[i]->fc_vport);
  10554. }
  10555. lpfc_destroy_vport_work_array(phba, vports);
  10556. /* Remove FC host and then SCSI host with the physical port */
  10557. fc_remove_host(shost);
  10558. scsi_remove_host(shost);
  10559. /* Perform ndlp cleanup on the physical port. The nvme and nvmet
  10560. * localports are destroyed after to cleanup all transport memory.
  10561. */
  10562. lpfc_cleanup(vport);
  10563. lpfc_nvmet_destroy_targetport(phba);
  10564. lpfc_nvme_destroy_localport(vport);
  10565. /*
  10566. * Bring down the SLI Layer. This step disables all interrupts,
  10567. * clears the rings, discards all mailbox commands, and resets
  10568. * the HBA FCoE function.
  10569. */
  10570. lpfc_debugfs_terminate(vport);
  10571. lpfc_sli4_hba_unset(phba);
  10572. lpfc_stop_hba_timers(phba);
  10573. spin_lock_irq(&phba->hbalock);
  10574. list_del_init(&vport->listentry);
  10575. spin_unlock_irq(&phba->hbalock);
  10576. /* Perform scsi free before driver resource_unset since scsi
  10577. * buffers are released to their corresponding pools here.
  10578. */
  10579. lpfc_scsi_free(phba);
  10580. lpfc_nvme_free(phba);
  10581. lpfc_free_iocb_list(phba);
  10582. lpfc_unset_driver_resource_phase2(phba);
  10583. lpfc_sli4_driver_resource_unset(phba);
  10584. /* Unmap adapter Control and Doorbell registers */
  10585. lpfc_sli4_pci_mem_unset(phba);
  10586. /* Release PCI resources and disable device's PCI function */
  10587. scsi_host_put(shost);
  10588. lpfc_disable_pci_dev(phba);
  10589. /* Finally, free the driver's device data structure */
  10590. lpfc_hba_free(phba);
  10591. return;
  10592. }
  10593. /**
  10594. * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
  10595. * @pdev: pointer to PCI device
  10596. * @msg: power management message
  10597. *
  10598. * This routine is called from the kernel's PCI subsystem to support system
  10599. * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
  10600. * this method, it quiesces the device by stopping the driver's worker
  10601. * thread for the device, turning off device's interrupt and DMA, and bring
  10602. * the device offline. Note that as the driver implements the minimum PM
  10603. * requirements to a power-aware driver's PM support for suspend/resume -- all
  10604. * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
  10605. * method call will be treated as SUSPEND and the driver will fully
  10606. * reinitialize its device during resume() method call, the driver will set
  10607. * device to PCI_D3hot state in PCI config space instead of setting it
  10608. * according to the @msg provided by the PM.
  10609. *
  10610. * Return code
  10611. * 0 - driver suspended the device
  10612. * Error otherwise
  10613. **/
  10614. static int
  10615. lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
  10616. {
  10617. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10618. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10619. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  10620. "2843 PCI device Power Management suspend.\n");
  10621. /* Bring down the device */
  10622. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  10623. lpfc_offline(phba);
  10624. kthread_stop(phba->worker_thread);
  10625. /* Disable interrupt from device */
  10626. lpfc_sli4_disable_intr(phba);
  10627. lpfc_sli4_queue_destroy(phba);
  10628. /* Save device state to PCI config space */
  10629. pci_save_state(pdev);
  10630. pci_set_power_state(pdev, PCI_D3hot);
  10631. return 0;
  10632. }
  10633. /**
  10634. * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
  10635. * @pdev: pointer to PCI device
  10636. *
  10637. * This routine is called from the kernel's PCI subsystem to support system
  10638. * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
  10639. * this method, it restores the device's PCI config space state and fully
  10640. * reinitializes the device and brings it online. Note that as the driver
  10641. * implements the minimum PM requirements to a power-aware driver's PM for
  10642. * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
  10643. * to the suspend() method call will be treated as SUSPEND and the driver
  10644. * will fully reinitialize its device during resume() method call, the device
  10645. * will be set to PCI_D0 directly in PCI config space before restoring the
  10646. * state.
  10647. *
  10648. * Return code
  10649. * 0 - driver suspended the device
  10650. * Error otherwise
  10651. **/
  10652. static int
  10653. lpfc_pci_resume_one_s4(struct pci_dev *pdev)
  10654. {
  10655. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10656. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10657. uint32_t intr_mode;
  10658. int error;
  10659. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  10660. "0292 PCI device Power Management resume.\n");
  10661. /* Restore device state from PCI config space */
  10662. pci_set_power_state(pdev, PCI_D0);
  10663. pci_restore_state(pdev);
  10664. /*
  10665. * As the new kernel behavior of pci_restore_state() API call clears
  10666. * device saved_state flag, need to save the restored state again.
  10667. */
  10668. pci_save_state(pdev);
  10669. if (pdev->is_busmaster)
  10670. pci_set_master(pdev);
  10671. /* Startup the kernel thread for this host adapter. */
  10672. phba->worker_thread = kthread_run(lpfc_do_work, phba,
  10673. "lpfc_worker_%d", phba->brd_no);
  10674. if (IS_ERR(phba->worker_thread)) {
  10675. error = PTR_ERR(phba->worker_thread);
  10676. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10677. "0293 PM resume failed to start worker "
  10678. "thread: error=x%x.\n", error);
  10679. return error;
  10680. }
  10681. /* Configure and enable interrupt */
  10682. intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
  10683. if (intr_mode == LPFC_INTR_ERROR) {
  10684. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10685. "0294 PM resume Failed to enable interrupt\n");
  10686. return -EIO;
  10687. } else
  10688. phba->intr_mode = intr_mode;
  10689. /* Restart HBA and bring it online */
  10690. lpfc_sli_brdrestart(phba);
  10691. lpfc_online(phba);
  10692. /* Log the current active interrupt mode */
  10693. lpfc_log_intr_mode(phba, phba->intr_mode);
  10694. return 0;
  10695. }
  10696. /**
  10697. * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
  10698. * @phba: pointer to lpfc hba data structure.
  10699. *
  10700. * This routine is called to prepare the SLI4 device for PCI slot recover. It
  10701. * aborts all the outstanding SCSI I/Os to the pci device.
  10702. **/
  10703. static void
  10704. lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
  10705. {
  10706. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10707. "2828 PCI channel I/O abort preparing for recovery\n");
  10708. /*
  10709. * There may be errored I/Os through HBA, abort all I/Os on txcmplq
  10710. * and let the SCSI mid-layer to retry them to recover.
  10711. */
  10712. lpfc_sli_abort_fcp_rings(phba);
  10713. }
  10714. /**
  10715. * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
  10716. * @phba: pointer to lpfc hba data structure.
  10717. *
  10718. * This routine is called to prepare the SLI4 device for PCI slot reset. It
  10719. * disables the device interrupt and pci device, and aborts the internal FCP
  10720. * pending I/Os.
  10721. **/
  10722. static void
  10723. lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
  10724. {
  10725. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10726. "2826 PCI channel disable preparing for reset\n");
  10727. /* Block any management I/Os to the device */
  10728. lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
  10729. /* Block all SCSI devices' I/Os on the host */
  10730. lpfc_scsi_dev_block(phba);
  10731. /* Flush all driver's outstanding SCSI I/Os as we are to reset */
  10732. lpfc_sli_flush_fcp_rings(phba);
  10733. /* Flush the outstanding NVME IOs if fc4 type enabled. */
  10734. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
  10735. lpfc_sli_flush_nvme_rings(phba);
  10736. /* stop all timers */
  10737. lpfc_stop_hba_timers(phba);
  10738. /* Disable interrupt and pci device */
  10739. lpfc_sli4_disable_intr(phba);
  10740. lpfc_sli4_queue_destroy(phba);
  10741. pci_disable_device(phba->pcidev);
  10742. }
  10743. /**
  10744. * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
  10745. * @phba: pointer to lpfc hba data structure.
  10746. *
  10747. * This routine is called to prepare the SLI4 device for PCI slot permanently
  10748. * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
  10749. * pending I/Os.
  10750. **/
  10751. static void
  10752. lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
  10753. {
  10754. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10755. "2827 PCI channel permanent disable for failure\n");
  10756. /* Block all SCSI devices' I/Os on the host */
  10757. lpfc_scsi_dev_block(phba);
  10758. /* stop all timers */
  10759. lpfc_stop_hba_timers(phba);
  10760. /* Clean up all driver's outstanding SCSI I/Os */
  10761. lpfc_sli_flush_fcp_rings(phba);
  10762. /* Flush the outstanding NVME IOs if fc4 type enabled. */
  10763. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
  10764. lpfc_sli_flush_nvme_rings(phba);
  10765. }
  10766. /**
  10767. * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
  10768. * @pdev: pointer to PCI device.
  10769. * @state: the current PCI connection state.
  10770. *
  10771. * This routine is called from the PCI subsystem for error handling to device
  10772. * with SLI-4 interface spec. This function is called by the PCI subsystem
  10773. * after a PCI bus error affecting this device has been detected. When this
  10774. * function is invoked, it will need to stop all the I/Os and interrupt(s)
  10775. * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
  10776. * for the PCI subsystem to perform proper recovery as desired.
  10777. *
  10778. * Return codes
  10779. * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
  10780. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  10781. **/
  10782. static pci_ers_result_t
  10783. lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
  10784. {
  10785. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10786. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10787. switch (state) {
  10788. case pci_channel_io_normal:
  10789. /* Non-fatal error, prepare for recovery */
  10790. lpfc_sli4_prep_dev_for_recover(phba);
  10791. return PCI_ERS_RESULT_CAN_RECOVER;
  10792. case pci_channel_io_frozen:
  10793. /* Fatal error, prepare for slot reset */
  10794. lpfc_sli4_prep_dev_for_reset(phba);
  10795. return PCI_ERS_RESULT_NEED_RESET;
  10796. case pci_channel_io_perm_failure:
  10797. /* Permanent failure, prepare for device down */
  10798. lpfc_sli4_prep_dev_for_perm_failure(phba);
  10799. return PCI_ERS_RESULT_DISCONNECT;
  10800. default:
  10801. /* Unknown state, prepare and request slot reset */
  10802. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10803. "2825 Unknown PCI error state: x%x\n", state);
  10804. lpfc_sli4_prep_dev_for_reset(phba);
  10805. return PCI_ERS_RESULT_NEED_RESET;
  10806. }
  10807. }
  10808. /**
  10809. * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
  10810. * @pdev: pointer to PCI device.
  10811. *
  10812. * This routine is called from the PCI subsystem for error handling to device
  10813. * with SLI-4 interface spec. It is called after PCI bus has been reset to
  10814. * restart the PCI card from scratch, as if from a cold-boot. During the
  10815. * PCI subsystem error recovery, after the driver returns
  10816. * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
  10817. * recovery and then call this routine before calling the .resume method to
  10818. * recover the device. This function will initialize the HBA device, enable
  10819. * the interrupt, but it will just put the HBA to offline state without
  10820. * passing any I/O traffic.
  10821. *
  10822. * Return codes
  10823. * PCI_ERS_RESULT_RECOVERED - the device has been recovered
  10824. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  10825. */
  10826. static pci_ers_result_t
  10827. lpfc_io_slot_reset_s4(struct pci_dev *pdev)
  10828. {
  10829. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10830. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10831. struct lpfc_sli *psli = &phba->sli;
  10832. uint32_t intr_mode;
  10833. dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
  10834. if (pci_enable_device_mem(pdev)) {
  10835. printk(KERN_ERR "lpfc: Cannot re-enable "
  10836. "PCI device after reset.\n");
  10837. return PCI_ERS_RESULT_DISCONNECT;
  10838. }
  10839. pci_restore_state(pdev);
  10840. /*
  10841. * As the new kernel behavior of pci_restore_state() API call clears
  10842. * device saved_state flag, need to save the restored state again.
  10843. */
  10844. pci_save_state(pdev);
  10845. if (pdev->is_busmaster)
  10846. pci_set_master(pdev);
  10847. spin_lock_irq(&phba->hbalock);
  10848. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  10849. spin_unlock_irq(&phba->hbalock);
  10850. /* Configure and enable interrupt */
  10851. intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
  10852. if (intr_mode == LPFC_INTR_ERROR) {
  10853. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10854. "2824 Cannot re-enable interrupt after "
  10855. "slot reset.\n");
  10856. return PCI_ERS_RESULT_DISCONNECT;
  10857. } else
  10858. phba->intr_mode = intr_mode;
  10859. /* Log the current active interrupt mode */
  10860. lpfc_log_intr_mode(phba, phba->intr_mode);
  10861. return PCI_ERS_RESULT_RECOVERED;
  10862. }
  10863. /**
  10864. * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
  10865. * @pdev: pointer to PCI device
  10866. *
  10867. * This routine is called from the PCI subsystem for error handling to device
  10868. * with SLI-4 interface spec. It is called when kernel error recovery tells
  10869. * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
  10870. * error recovery. After this call, traffic can start to flow from this device
  10871. * again.
  10872. **/
  10873. static void
  10874. lpfc_io_resume_s4(struct pci_dev *pdev)
  10875. {
  10876. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10877. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10878. /*
  10879. * In case of slot reset, as function reset is performed through
  10880. * mailbox command which needs DMA to be enabled, this operation
  10881. * has to be moved to the io resume phase. Taking device offline
  10882. * will perform the necessary cleanup.
  10883. */
  10884. if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
  10885. /* Perform device reset */
  10886. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  10887. lpfc_offline(phba);
  10888. lpfc_sli_brdrestart(phba);
  10889. /* Bring the device back online */
  10890. lpfc_online(phba);
  10891. }
  10892. /* Clean up Advanced Error Reporting (AER) if needed */
  10893. if (phba->hba_flag & HBA_AER_ENABLED)
  10894. pci_cleanup_aer_uncorrect_error_status(pdev);
  10895. }
  10896. /**
  10897. * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
  10898. * @pdev: pointer to PCI device
  10899. * @pid: pointer to PCI device identifier
  10900. *
  10901. * This routine is to be registered to the kernel's PCI subsystem. When an
  10902. * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
  10903. * at PCI device-specific information of the device and driver to see if the
  10904. * driver state that it can support this kind of device. If the match is
  10905. * successful, the driver core invokes this routine. This routine dispatches
  10906. * the action to the proper SLI-3 or SLI-4 device probing routine, which will
  10907. * do all the initialization that it needs to do to handle the HBA device
  10908. * properly.
  10909. *
  10910. * Return code
  10911. * 0 - driver can claim the device
  10912. * negative value - driver can not claim the device
  10913. **/
  10914. static int
  10915. lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
  10916. {
  10917. int rc;
  10918. struct lpfc_sli_intf intf;
  10919. if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
  10920. return -ENODEV;
  10921. if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
  10922. (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
  10923. rc = lpfc_pci_probe_one_s4(pdev, pid);
  10924. else
  10925. rc = lpfc_pci_probe_one_s3(pdev, pid);
  10926. return rc;
  10927. }
  10928. /**
  10929. * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
  10930. * @pdev: pointer to PCI device
  10931. *
  10932. * This routine is to be registered to the kernel's PCI subsystem. When an
  10933. * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
  10934. * This routine dispatches the action to the proper SLI-3 or SLI-4 device
  10935. * remove routine, which will perform all the necessary cleanup for the
  10936. * device to be removed from the PCI subsystem properly.
  10937. **/
  10938. static void
  10939. lpfc_pci_remove_one(struct pci_dev *pdev)
  10940. {
  10941. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10942. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10943. switch (phba->pci_dev_grp) {
  10944. case LPFC_PCI_DEV_LP:
  10945. lpfc_pci_remove_one_s3(pdev);
  10946. break;
  10947. case LPFC_PCI_DEV_OC:
  10948. lpfc_pci_remove_one_s4(pdev);
  10949. break;
  10950. default:
  10951. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10952. "1424 Invalid PCI device group: 0x%x\n",
  10953. phba->pci_dev_grp);
  10954. break;
  10955. }
  10956. return;
  10957. }
  10958. /**
  10959. * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
  10960. * @pdev: pointer to PCI device
  10961. * @msg: power management message
  10962. *
  10963. * This routine is to be registered to the kernel's PCI subsystem to support
  10964. * system Power Management (PM). When PM invokes this method, it dispatches
  10965. * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
  10966. * suspend the device.
  10967. *
  10968. * Return code
  10969. * 0 - driver suspended the device
  10970. * Error otherwise
  10971. **/
  10972. static int
  10973. lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
  10974. {
  10975. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10976. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10977. int rc = -ENODEV;
  10978. switch (phba->pci_dev_grp) {
  10979. case LPFC_PCI_DEV_LP:
  10980. rc = lpfc_pci_suspend_one_s3(pdev, msg);
  10981. break;
  10982. case LPFC_PCI_DEV_OC:
  10983. rc = lpfc_pci_suspend_one_s4(pdev, msg);
  10984. break;
  10985. default:
  10986. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10987. "1425 Invalid PCI device group: 0x%x\n",
  10988. phba->pci_dev_grp);
  10989. break;
  10990. }
  10991. return rc;
  10992. }
  10993. /**
  10994. * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
  10995. * @pdev: pointer to PCI device
  10996. *
  10997. * This routine is to be registered to the kernel's PCI subsystem to support
  10998. * system Power Management (PM). When PM invokes this method, it dispatches
  10999. * the action to the proper SLI-3 or SLI-4 device resume routine, which will
  11000. * resume the device.
  11001. *
  11002. * Return code
  11003. * 0 - driver suspended the device
  11004. * Error otherwise
  11005. **/
  11006. static int
  11007. lpfc_pci_resume_one(struct pci_dev *pdev)
  11008. {
  11009. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  11010. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  11011. int rc = -ENODEV;
  11012. switch (phba->pci_dev_grp) {
  11013. case LPFC_PCI_DEV_LP:
  11014. rc = lpfc_pci_resume_one_s3(pdev);
  11015. break;
  11016. case LPFC_PCI_DEV_OC:
  11017. rc = lpfc_pci_resume_one_s4(pdev);
  11018. break;
  11019. default:
  11020. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  11021. "1426 Invalid PCI device group: 0x%x\n",
  11022. phba->pci_dev_grp);
  11023. break;
  11024. }
  11025. return rc;
  11026. }
  11027. /**
  11028. * lpfc_io_error_detected - lpfc method for handling PCI I/O error
  11029. * @pdev: pointer to PCI device.
  11030. * @state: the current PCI connection state.
  11031. *
  11032. * This routine is registered to the PCI subsystem for error handling. This
  11033. * function is called by the PCI subsystem after a PCI bus error affecting
  11034. * this device has been detected. When this routine is invoked, it dispatches
  11035. * the action to the proper SLI-3 or SLI-4 device error detected handling
  11036. * routine, which will perform the proper error detected operation.
  11037. *
  11038. * Return codes
  11039. * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
  11040. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  11041. **/
  11042. static pci_ers_result_t
  11043. lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
  11044. {
  11045. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  11046. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  11047. pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
  11048. switch (phba->pci_dev_grp) {
  11049. case LPFC_PCI_DEV_LP:
  11050. rc = lpfc_io_error_detected_s3(pdev, state);
  11051. break;
  11052. case LPFC_PCI_DEV_OC:
  11053. rc = lpfc_io_error_detected_s4(pdev, state);
  11054. break;
  11055. default:
  11056. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  11057. "1427 Invalid PCI device group: 0x%x\n",
  11058. phba->pci_dev_grp);
  11059. break;
  11060. }
  11061. return rc;
  11062. }
  11063. /**
  11064. * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
  11065. * @pdev: pointer to PCI device.
  11066. *
  11067. * This routine is registered to the PCI subsystem for error handling. This
  11068. * function is called after PCI bus has been reset to restart the PCI card
  11069. * from scratch, as if from a cold-boot. When this routine is invoked, it
  11070. * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
  11071. * routine, which will perform the proper device reset.
  11072. *
  11073. * Return codes
  11074. * PCI_ERS_RESULT_RECOVERED - the device has been recovered
  11075. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  11076. **/
  11077. static pci_ers_result_t
  11078. lpfc_io_slot_reset(struct pci_dev *pdev)
  11079. {
  11080. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  11081. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  11082. pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
  11083. switch (phba->pci_dev_grp) {
  11084. case LPFC_PCI_DEV_LP:
  11085. rc = lpfc_io_slot_reset_s3(pdev);
  11086. break;
  11087. case LPFC_PCI_DEV_OC:
  11088. rc = lpfc_io_slot_reset_s4(pdev);
  11089. break;
  11090. default:
  11091. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  11092. "1428 Invalid PCI device group: 0x%x\n",
  11093. phba->pci_dev_grp);
  11094. break;
  11095. }
  11096. return rc;
  11097. }
  11098. /**
  11099. * lpfc_io_resume - lpfc method for resuming PCI I/O operation
  11100. * @pdev: pointer to PCI device
  11101. *
  11102. * This routine is registered to the PCI subsystem for error handling. It
  11103. * is called when kernel error recovery tells the lpfc driver that it is
  11104. * OK to resume normal PCI operation after PCI bus error recovery. When
  11105. * this routine is invoked, it dispatches the action to the proper SLI-3
  11106. * or SLI-4 device io_resume routine, which will resume the device operation.
  11107. **/
  11108. static void
  11109. lpfc_io_resume(struct pci_dev *pdev)
  11110. {
  11111. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  11112. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  11113. switch (phba->pci_dev_grp) {
  11114. case LPFC_PCI_DEV_LP:
  11115. lpfc_io_resume_s3(pdev);
  11116. break;
  11117. case LPFC_PCI_DEV_OC:
  11118. lpfc_io_resume_s4(pdev);
  11119. break;
  11120. default:
  11121. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  11122. "1429 Invalid PCI device group: 0x%x\n",
  11123. phba->pci_dev_grp);
  11124. break;
  11125. }
  11126. return;
  11127. }
  11128. /**
  11129. * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
  11130. * @phba: pointer to lpfc hba data structure.
  11131. *
  11132. * This routine checks to see if OAS is supported for this adapter. If
  11133. * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
  11134. * the enable oas flag is cleared and the pool created for OAS device data
  11135. * is destroyed.
  11136. *
  11137. **/
  11138. void
  11139. lpfc_sli4_oas_verify(struct lpfc_hba *phba)
  11140. {
  11141. if (!phba->cfg_EnableXLane)
  11142. return;
  11143. if (phba->sli4_hba.pc_sli4_params.oas_supported) {
  11144. phba->cfg_fof = 1;
  11145. } else {
  11146. phba->cfg_fof = 0;
  11147. if (phba->device_data_mem_pool)
  11148. mempool_destroy(phba->device_data_mem_pool);
  11149. phba->device_data_mem_pool = NULL;
  11150. }
  11151. return;
  11152. }
  11153. /**
  11154. * lpfc_fof_queue_setup - Set up all the fof queues
  11155. * @phba: pointer to lpfc hba data structure.
  11156. *
  11157. * This routine is invoked to set up all the fof queues for the FC HBA
  11158. * operation.
  11159. *
  11160. * Return codes
  11161. * 0 - successful
  11162. * -ENOMEM - No available memory
  11163. **/
  11164. int
  11165. lpfc_fof_queue_setup(struct lpfc_hba *phba)
  11166. {
  11167. struct lpfc_sli_ring *pring;
  11168. int rc;
  11169. rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
  11170. if (rc)
  11171. return -ENOMEM;
  11172. if (phba->cfg_fof) {
  11173. rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
  11174. phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
  11175. if (rc)
  11176. goto out_oas_cq;
  11177. rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
  11178. phba->sli4_hba.oas_cq, LPFC_FCP);
  11179. if (rc)
  11180. goto out_oas_wq;
  11181. /* Bind this CQ/WQ to the NVME ring */
  11182. pring = phba->sli4_hba.oas_wq->pring;
  11183. pring->sli.sli4.wqp =
  11184. (void *)phba->sli4_hba.oas_wq;
  11185. phba->sli4_hba.oas_cq->pring = pring;
  11186. }
  11187. return 0;
  11188. out_oas_wq:
  11189. lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
  11190. out_oas_cq:
  11191. lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
  11192. return rc;
  11193. }
  11194. /**
  11195. * lpfc_fof_queue_create - Create all the fof queues
  11196. * @phba: pointer to lpfc hba data structure.
  11197. *
  11198. * This routine is invoked to allocate all the fof queues for the FC HBA
  11199. * operation. For each SLI4 queue type, the parameters such as queue entry
  11200. * count (queue depth) shall be taken from the module parameter. For now,
  11201. * we just use some constant number as place holder.
  11202. *
  11203. * Return codes
  11204. * 0 - successful
  11205. * -ENOMEM - No availble memory
  11206. * -EIO - The mailbox failed to complete successfully.
  11207. **/
  11208. int
  11209. lpfc_fof_queue_create(struct lpfc_hba *phba)
  11210. {
  11211. struct lpfc_queue *qdesc;
  11212. uint32_t wqesize;
  11213. /* Create FOF EQ */
  11214. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
  11215. phba->sli4_hba.eq_esize,
  11216. phba->sli4_hba.eq_ecount);
  11217. if (!qdesc)
  11218. goto out_error;
  11219. qdesc->qe_valid = 1;
  11220. phba->sli4_hba.fof_eq = qdesc;
  11221. if (phba->cfg_fof) {
  11222. /* Create OAS CQ */
  11223. if (phba->enab_exp_wqcq_pages)
  11224. qdesc = lpfc_sli4_queue_alloc(phba,
  11225. LPFC_EXPANDED_PAGE_SIZE,
  11226. phba->sli4_hba.cq_esize,
  11227. LPFC_CQE_EXP_COUNT);
  11228. else
  11229. qdesc = lpfc_sli4_queue_alloc(phba,
  11230. LPFC_DEFAULT_PAGE_SIZE,
  11231. phba->sli4_hba.cq_esize,
  11232. phba->sli4_hba.cq_ecount);
  11233. if (!qdesc)
  11234. goto out_error;
  11235. qdesc->qe_valid = 1;
  11236. phba->sli4_hba.oas_cq = qdesc;
  11237. /* Create OAS WQ */
  11238. if (phba->enab_exp_wqcq_pages) {
  11239. wqesize = (phba->fcp_embed_io) ?
  11240. LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
  11241. qdesc = lpfc_sli4_queue_alloc(phba,
  11242. LPFC_EXPANDED_PAGE_SIZE,
  11243. wqesize,
  11244. LPFC_WQE_EXP_COUNT);
  11245. } else
  11246. qdesc = lpfc_sli4_queue_alloc(phba,
  11247. LPFC_DEFAULT_PAGE_SIZE,
  11248. phba->sli4_hba.wq_esize,
  11249. phba->sli4_hba.wq_ecount);
  11250. if (!qdesc)
  11251. goto out_error;
  11252. phba->sli4_hba.oas_wq = qdesc;
  11253. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  11254. }
  11255. return 0;
  11256. out_error:
  11257. lpfc_fof_queue_destroy(phba);
  11258. return -ENOMEM;
  11259. }
  11260. /**
  11261. * lpfc_fof_queue_destroy - Destroy all the fof queues
  11262. * @phba: pointer to lpfc hba data structure.
  11263. *
  11264. * This routine is invoked to release all the SLI4 queues with the FC HBA
  11265. * operation.
  11266. *
  11267. * Return codes
  11268. * 0 - successful
  11269. **/
  11270. int
  11271. lpfc_fof_queue_destroy(struct lpfc_hba *phba)
  11272. {
  11273. /* Release FOF Event queue */
  11274. if (phba->sli4_hba.fof_eq != NULL) {
  11275. lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
  11276. phba->sli4_hba.fof_eq = NULL;
  11277. }
  11278. /* Release OAS Completion queue */
  11279. if (phba->sli4_hba.oas_cq != NULL) {
  11280. lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
  11281. phba->sli4_hba.oas_cq = NULL;
  11282. }
  11283. /* Release OAS Work queue */
  11284. if (phba->sli4_hba.oas_wq != NULL) {
  11285. lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
  11286. phba->sli4_hba.oas_wq = NULL;
  11287. }
  11288. return 0;
  11289. }
  11290. MODULE_DEVICE_TABLE(pci, lpfc_id_table);
  11291. static const struct pci_error_handlers lpfc_err_handler = {
  11292. .error_detected = lpfc_io_error_detected,
  11293. .slot_reset = lpfc_io_slot_reset,
  11294. .resume = lpfc_io_resume,
  11295. };
  11296. static struct pci_driver lpfc_driver = {
  11297. .name = LPFC_DRIVER_NAME,
  11298. .id_table = lpfc_id_table,
  11299. .probe = lpfc_pci_probe_one,
  11300. .remove = lpfc_pci_remove_one,
  11301. .shutdown = lpfc_pci_remove_one,
  11302. .suspend = lpfc_pci_suspend_one,
  11303. .resume = lpfc_pci_resume_one,
  11304. .err_handler = &lpfc_err_handler,
  11305. };
  11306. static const struct file_operations lpfc_mgmt_fop = {
  11307. .owner = THIS_MODULE,
  11308. };
  11309. static struct miscdevice lpfc_mgmt_dev = {
  11310. .minor = MISC_DYNAMIC_MINOR,
  11311. .name = "lpfcmgmt",
  11312. .fops = &lpfc_mgmt_fop,
  11313. };
  11314. /**
  11315. * lpfc_init - lpfc module initialization routine
  11316. *
  11317. * This routine is to be invoked when the lpfc module is loaded into the
  11318. * kernel. The special kernel macro module_init() is used to indicate the
  11319. * role of this routine to the kernel as lpfc module entry point.
  11320. *
  11321. * Return codes
  11322. * 0 - successful
  11323. * -ENOMEM - FC attach transport failed
  11324. * all others - failed
  11325. */
  11326. static int __init
  11327. lpfc_init(void)
  11328. {
  11329. int error = 0;
  11330. printk(LPFC_MODULE_DESC "\n");
  11331. printk(LPFC_COPYRIGHT "\n");
  11332. error = misc_register(&lpfc_mgmt_dev);
  11333. if (error)
  11334. printk(KERN_ERR "Could not register lpfcmgmt device, "
  11335. "misc_register returned with status %d", error);
  11336. lpfc_transport_functions.vport_create = lpfc_vport_create;
  11337. lpfc_transport_functions.vport_delete = lpfc_vport_delete;
  11338. lpfc_transport_template =
  11339. fc_attach_transport(&lpfc_transport_functions);
  11340. if (lpfc_transport_template == NULL)
  11341. return -ENOMEM;
  11342. lpfc_vport_transport_template =
  11343. fc_attach_transport(&lpfc_vport_transport_functions);
  11344. if (lpfc_vport_transport_template == NULL) {
  11345. fc_release_transport(lpfc_transport_template);
  11346. return -ENOMEM;
  11347. }
  11348. lpfc_nvme_cmd_template();
  11349. lpfc_nvmet_cmd_template();
  11350. /* Initialize in case vector mapping is needed */
  11351. lpfc_used_cpu = NULL;
  11352. lpfc_present_cpu = num_present_cpus();
  11353. error = pci_register_driver(&lpfc_driver);
  11354. if (error) {
  11355. fc_release_transport(lpfc_transport_template);
  11356. fc_release_transport(lpfc_vport_transport_template);
  11357. }
  11358. return error;
  11359. }
  11360. /**
  11361. * lpfc_exit - lpfc module removal routine
  11362. *
  11363. * This routine is invoked when the lpfc module is removed from the kernel.
  11364. * The special kernel macro module_exit() is used to indicate the role of
  11365. * this routine to the kernel as lpfc module exit point.
  11366. */
  11367. static void __exit
  11368. lpfc_exit(void)
  11369. {
  11370. misc_deregister(&lpfc_mgmt_dev);
  11371. pci_unregister_driver(&lpfc_driver);
  11372. fc_release_transport(lpfc_transport_template);
  11373. fc_release_transport(lpfc_vport_transport_template);
  11374. if (_dump_buf_data) {
  11375. printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
  11376. "_dump_buf_data at 0x%p\n",
  11377. (1L << _dump_buf_data_order), _dump_buf_data);
  11378. free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
  11379. }
  11380. if (_dump_buf_dif) {
  11381. printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
  11382. "_dump_buf_dif at 0x%p\n",
  11383. (1L << _dump_buf_dif_order), _dump_buf_dif);
  11384. free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
  11385. }
  11386. kfree(lpfc_used_cpu);
  11387. idr_destroy(&lpfc_hba_index);
  11388. }
  11389. module_init(lpfc_init);
  11390. module_exit(lpfc_exit);
  11391. MODULE_LICENSE("GPL");
  11392. MODULE_DESCRIPTION(LPFC_MODULE_DESC);
  11393. MODULE_AUTHOR("Broadcom");
  11394. MODULE_VERSION("0:" LPFC_DRIVER_VERSION);