qla_init.c 228 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2014 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_gbl.h"
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include <linux/vmalloc.h>
  12. #include "qla_devtbl.h"
  13. #ifdef CONFIG_SPARC
  14. #include <asm/prom.h>
  15. #endif
  16. #include <target/target_core_base.h>
  17. #include "qla_target.h"
  18. /*
  19. * QLogic ISP2x00 Hardware Support Function Prototypes.
  20. */
  21. static int qla2x00_isp_firmware(scsi_qla_host_t *);
  22. static int qla2x00_setup_chip(scsi_qla_host_t *);
  23. static int qla2x00_fw_ready(scsi_qla_host_t *);
  24. static int qla2x00_configure_hba(scsi_qla_host_t *);
  25. static int qla2x00_configure_loop(scsi_qla_host_t *);
  26. static int qla2x00_configure_local_loop(scsi_qla_host_t *);
  27. static int qla2x00_configure_fabric(scsi_qla_host_t *);
  28. static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
  29. static int qla2x00_restart_isp(scsi_qla_host_t *);
  30. static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
  31. static int qla84xx_init_chip(scsi_qla_host_t *);
  32. static int qla25xx_init_queues(struct qla_hw_data *);
  33. static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
  34. static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
  35. struct event_arg *);
  36. static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
  37. struct event_arg *);
  38. static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
  39. /* SRB Extensions ---------------------------------------------------------- */
  40. void
  41. qla2x00_sp_timeout(struct timer_list *t)
  42. {
  43. srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
  44. struct srb_iocb *iocb;
  45. struct req_que *req;
  46. unsigned long flags;
  47. struct qla_hw_data *ha = sp->vha->hw;
  48. WARN_ON_ONCE(irqs_disabled());
  49. spin_lock_irqsave(&ha->hardware_lock, flags);
  50. req = sp->qpair->req;
  51. req->outstanding_cmds[sp->handle] = NULL;
  52. iocb = &sp->u.iocb_cmd;
  53. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  54. iocb->timeout(sp);
  55. }
  56. void
  57. qla2x00_sp_free(void *ptr)
  58. {
  59. srb_t *sp = ptr;
  60. struct srb_iocb *iocb = &sp->u.iocb_cmd;
  61. del_timer(&iocb->timer);
  62. qla2x00_rel_sp(sp);
  63. }
  64. /* Asynchronous Login/Logout Routines -------------------------------------- */
  65. unsigned long
  66. qla2x00_get_async_timeout(struct scsi_qla_host *vha)
  67. {
  68. unsigned long tmo;
  69. struct qla_hw_data *ha = vha->hw;
  70. /* Firmware should use switch negotiated r_a_tov for timeout. */
  71. tmo = ha->r_a_tov / 10 * 2;
  72. if (IS_QLAFX00(ha)) {
  73. tmo = FX00_DEF_RATOV * 2;
  74. } else if (!IS_FWI2_CAPABLE(ha)) {
  75. /*
  76. * Except for earlier ISPs where the timeout is seeded from the
  77. * initialization control block.
  78. */
  79. tmo = ha->login_timeout;
  80. }
  81. return tmo;
  82. }
  83. void
  84. qla2x00_async_iocb_timeout(void *data)
  85. {
  86. srb_t *sp = data;
  87. fc_port_t *fcport = sp->fcport;
  88. struct srb_iocb *lio = &sp->u.iocb_cmd;
  89. int rc, h;
  90. unsigned long flags;
  91. if (fcport) {
  92. ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
  93. "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
  94. sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
  95. fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  96. } else {
  97. pr_info("Async-%s timeout - hdl=%x.\n",
  98. sp->name, sp->handle);
  99. }
  100. switch (sp->type) {
  101. case SRB_LOGIN_CMD:
  102. rc = qla24xx_async_abort_cmd(sp, false);
  103. if (rc) {
  104. /* Retry as needed. */
  105. lio->u.logio.data[0] = MBS_COMMAND_ERROR;
  106. lio->u.logio.data[1] =
  107. lio->u.logio.flags & SRB_LOGIN_RETRIED ?
  108. QLA_LOGIO_LOGIN_RETRIED : 0;
  109. spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
  110. for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
  111. h++) {
  112. if (sp->qpair->req->outstanding_cmds[h] ==
  113. sp) {
  114. sp->qpair->req->outstanding_cmds[h] =
  115. NULL;
  116. break;
  117. }
  118. }
  119. spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
  120. sp->done(sp, QLA_FUNCTION_TIMEOUT);
  121. }
  122. break;
  123. case SRB_LOGOUT_CMD:
  124. case SRB_CT_PTHRU_CMD:
  125. case SRB_MB_IOCB:
  126. case SRB_NACK_PLOGI:
  127. case SRB_NACK_PRLI:
  128. case SRB_NACK_LOGO:
  129. case SRB_CTRL_VP:
  130. rc = qla24xx_async_abort_cmd(sp, false);
  131. if (rc) {
  132. spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
  133. for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
  134. h++) {
  135. if (sp->qpair->req->outstanding_cmds[h] ==
  136. sp) {
  137. sp->qpair->req->outstanding_cmds[h] =
  138. NULL;
  139. break;
  140. }
  141. }
  142. spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
  143. sp->done(sp, QLA_FUNCTION_TIMEOUT);
  144. }
  145. break;
  146. }
  147. }
  148. static void
  149. qla2x00_async_login_sp_done(void *ptr, int res)
  150. {
  151. srb_t *sp = ptr;
  152. struct scsi_qla_host *vha = sp->vha;
  153. struct srb_iocb *lio = &sp->u.iocb_cmd;
  154. struct event_arg ea;
  155. ql_dbg(ql_dbg_disc, vha, 0x20dd,
  156. "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
  157. sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  158. if (!test_bit(UNLOADING, &vha->dpc_flags)) {
  159. memset(&ea, 0, sizeof(ea));
  160. ea.event = FCME_PLOGI_DONE;
  161. ea.fcport = sp->fcport;
  162. ea.data[0] = lio->u.logio.data[0];
  163. ea.data[1] = lio->u.logio.data[1];
  164. ea.iop[0] = lio->u.logio.iop[0];
  165. ea.iop[1] = lio->u.logio.iop[1];
  166. ea.sp = sp;
  167. qla2x00_fcport_event_handler(vha, &ea);
  168. }
  169. sp->free(sp);
  170. }
  171. static inline bool
  172. fcport_is_smaller(fc_port_t *fcport)
  173. {
  174. if (wwn_to_u64(fcport->port_name) <
  175. wwn_to_u64(fcport->vha->port_name))
  176. return true;
  177. else
  178. return false;
  179. }
  180. static inline bool
  181. fcport_is_bigger(fc_port_t *fcport)
  182. {
  183. return !fcport_is_smaller(fcport);
  184. }
  185. int
  186. qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
  187. uint16_t *data)
  188. {
  189. srb_t *sp;
  190. struct srb_iocb *lio;
  191. int rval = QLA_FUNCTION_FAILED;
  192. if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
  193. fcport->loop_id == FC_NO_LOOP_ID) {
  194. ql_log(ql_log_warn, vha, 0xffff,
  195. "%s: %8phC - not sending command.\n",
  196. __func__, fcport->port_name);
  197. return rval;
  198. }
  199. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  200. if (!sp)
  201. goto done;
  202. fcport->flags |= FCF_ASYNC_SENT;
  203. fcport->logout_completed = 0;
  204. fcport->disc_state = DSC_LOGIN_PEND;
  205. sp->type = SRB_LOGIN_CMD;
  206. sp->name = "login";
  207. sp->gen1 = fcport->rscn_gen;
  208. sp->gen2 = fcport->login_gen;
  209. lio = &sp->u.iocb_cmd;
  210. lio->timeout = qla2x00_async_iocb_timeout;
  211. qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
  212. sp->done = qla2x00_async_login_sp_done;
  213. if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
  214. lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
  215. else
  216. lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
  217. if (fcport->fc4f_nvme)
  218. lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
  219. ql_dbg(ql_dbg_disc, vha, 0x2072,
  220. "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
  221. "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
  222. fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
  223. fcport->login_retry);
  224. rval = qla2x00_start_sp(sp);
  225. if (rval != QLA_SUCCESS) {
  226. fcport->flags |= FCF_LOGIN_NEEDED;
  227. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  228. goto done_free_sp;
  229. }
  230. return rval;
  231. done_free_sp:
  232. sp->free(sp);
  233. fcport->flags &= ~FCF_ASYNC_SENT;
  234. done:
  235. fcport->flags &= ~FCF_ASYNC_ACTIVE;
  236. return rval;
  237. }
  238. static void
  239. qla2x00_async_logout_sp_done(void *ptr, int res)
  240. {
  241. srb_t *sp = ptr;
  242. sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  243. sp->fcport->login_gen++;
  244. qlt_logo_completion_handler(sp->fcport, res);
  245. sp->free(sp);
  246. }
  247. int
  248. qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
  249. {
  250. srb_t *sp;
  251. struct srb_iocb *lio;
  252. int rval = QLA_FUNCTION_FAILED;
  253. fcport->flags |= FCF_ASYNC_SENT;
  254. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  255. if (!sp)
  256. goto done;
  257. sp->type = SRB_LOGOUT_CMD;
  258. sp->name = "logout";
  259. lio = &sp->u.iocb_cmd;
  260. lio->timeout = qla2x00_async_iocb_timeout;
  261. qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
  262. sp->done = qla2x00_async_logout_sp_done;
  263. ql_dbg(ql_dbg_disc, vha, 0x2070,
  264. "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
  265. sp->handle, fcport->loop_id, fcport->d_id.b.domain,
  266. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  267. fcport->port_name);
  268. rval = qla2x00_start_sp(sp);
  269. if (rval != QLA_SUCCESS)
  270. goto done_free_sp;
  271. return rval;
  272. done_free_sp:
  273. sp->free(sp);
  274. done:
  275. fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  276. return rval;
  277. }
  278. void
  279. qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
  280. uint16_t *data)
  281. {
  282. fcport->flags &= ~FCF_ASYNC_ACTIVE;
  283. /* Don't re-login in target mode */
  284. if (!fcport->tgt_session)
  285. qla2x00_mark_device_lost(vha, fcport, 1, 0);
  286. qlt_logo_completion_handler(fcport, data[0]);
  287. }
  288. static void
  289. qla2x00_async_prlo_sp_done(void *s, int res)
  290. {
  291. srb_t *sp = (srb_t *)s;
  292. struct srb_iocb *lio = &sp->u.iocb_cmd;
  293. struct scsi_qla_host *vha = sp->vha;
  294. sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
  295. if (!test_bit(UNLOADING, &vha->dpc_flags))
  296. qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
  297. lio->u.logio.data);
  298. sp->free(sp);
  299. }
  300. int
  301. qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
  302. {
  303. srb_t *sp;
  304. struct srb_iocb *lio;
  305. int rval;
  306. rval = QLA_FUNCTION_FAILED;
  307. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  308. if (!sp)
  309. goto done;
  310. sp->type = SRB_PRLO_CMD;
  311. sp->name = "prlo";
  312. lio = &sp->u.iocb_cmd;
  313. lio->timeout = qla2x00_async_iocb_timeout;
  314. qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
  315. sp->done = qla2x00_async_prlo_sp_done;
  316. rval = qla2x00_start_sp(sp);
  317. if (rval != QLA_SUCCESS)
  318. goto done_free_sp;
  319. ql_dbg(ql_dbg_disc, vha, 0x2070,
  320. "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
  321. sp->handle, fcport->loop_id, fcport->d_id.b.domain,
  322. fcport->d_id.b.area, fcport->d_id.b.al_pa);
  323. return rval;
  324. done_free_sp:
  325. sp->free(sp);
  326. done:
  327. fcport->flags &= ~FCF_ASYNC_ACTIVE;
  328. return rval;
  329. }
  330. static
  331. void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
  332. {
  333. struct fc_port *fcport = ea->fcport;
  334. ql_dbg(ql_dbg_disc, vha, 0x20d2,
  335. "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
  336. __func__, fcport->port_name, fcport->disc_state,
  337. fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
  338. fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
  339. if (ea->data[0] != MBS_COMMAND_COMPLETE) {
  340. ql_dbg(ql_dbg_disc, vha, 0x2066,
  341. "%s %8phC: adisc fail: post delete\n",
  342. __func__, ea->fcport->port_name);
  343. qlt_schedule_sess_for_deletion(ea->fcport);
  344. return;
  345. }
  346. if (ea->fcport->disc_state == DSC_DELETE_PEND)
  347. return;
  348. if (ea->sp->gen2 != ea->fcport->login_gen) {
  349. /* target side must have changed it. */
  350. ql_dbg(ql_dbg_disc, vha, 0x20d3,
  351. "%s %8phC generation changed\n",
  352. __func__, ea->fcport->port_name);
  353. return;
  354. } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
  355. ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
  356. __func__, __LINE__, ea->fcport->port_name);
  357. qla24xx_post_gidpn_work(vha, ea->fcport);
  358. return;
  359. }
  360. __qla24xx_handle_gpdb_event(vha, ea);
  361. }
  362. int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
  363. {
  364. struct qla_work_evt *e;
  365. e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
  366. if (!e)
  367. return QLA_FUNCTION_FAILED;
  368. e->u.fcport.fcport = fcport;
  369. fcport->flags |= FCF_ASYNC_ACTIVE;
  370. fcport->disc_state = DSC_LOGIN_PEND;
  371. return qla2x00_post_work(vha, e);
  372. }
  373. static void
  374. qla2x00_async_adisc_sp_done(void *ptr, int res)
  375. {
  376. srb_t *sp = ptr;
  377. struct scsi_qla_host *vha = sp->vha;
  378. struct event_arg ea;
  379. struct srb_iocb *lio = &sp->u.iocb_cmd;
  380. ql_dbg(ql_dbg_disc, vha, 0x2066,
  381. "Async done-%s res %x %8phC\n",
  382. sp->name, res, sp->fcport->port_name);
  383. sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  384. memset(&ea, 0, sizeof(ea));
  385. ea.event = FCME_ADISC_DONE;
  386. ea.rc = res;
  387. ea.data[0] = lio->u.logio.data[0];
  388. ea.data[1] = lio->u.logio.data[1];
  389. ea.iop[0] = lio->u.logio.iop[0];
  390. ea.iop[1] = lio->u.logio.iop[1];
  391. ea.fcport = sp->fcport;
  392. ea.sp = sp;
  393. qla2x00_fcport_event_handler(vha, &ea);
  394. sp->free(sp);
  395. }
  396. int
  397. qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
  398. uint16_t *data)
  399. {
  400. srb_t *sp;
  401. struct srb_iocb *lio;
  402. int rval;
  403. rval = QLA_FUNCTION_FAILED;
  404. fcport->flags |= FCF_ASYNC_SENT;
  405. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  406. if (!sp)
  407. goto done;
  408. sp->type = SRB_ADISC_CMD;
  409. sp->name = "adisc";
  410. lio = &sp->u.iocb_cmd;
  411. lio->timeout = qla2x00_async_iocb_timeout;
  412. sp->gen1 = fcport->rscn_gen;
  413. sp->gen2 = fcport->login_gen;
  414. qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
  415. sp->done = qla2x00_async_adisc_sp_done;
  416. if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
  417. lio->u.logio.flags |= SRB_LOGIN_RETRIED;
  418. ql_dbg(ql_dbg_disc, vha, 0x206f,
  419. "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
  420. sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
  421. rval = qla2x00_start_sp(sp);
  422. if (rval != QLA_SUCCESS)
  423. goto done_free_sp;
  424. return rval;
  425. done_free_sp:
  426. sp->free(sp);
  427. done:
  428. fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  429. qla2x00_post_async_adisc_work(vha, fcport, data);
  430. return rval;
  431. }
  432. static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
  433. struct event_arg *ea)
  434. {
  435. fc_port_t *fcport, *conflict_fcport;
  436. struct get_name_list_extended *e;
  437. u16 i, n, found = 0, loop_id;
  438. port_id_t id;
  439. u64 wwn;
  440. u16 data[2];
  441. u8 current_login_state;
  442. fcport = ea->fcport;
  443. ql_dbg(ql_dbg_disc, vha, 0xffff,
  444. "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
  445. __func__, fcport->port_name, fcport->disc_state,
  446. fcport->fw_login_state, ea->rc,
  447. fcport->login_gen, fcport->last_login_gen,
  448. fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
  449. if (fcport->disc_state == DSC_DELETE_PEND)
  450. return;
  451. if (ea->rc) { /* rval */
  452. if (fcport->login_retry == 0) {
  453. ql_dbg(ql_dbg_disc, vha, 0x20de,
  454. "GNL failed Port login retry %8phN, retry cnt=%d.\n",
  455. fcport->port_name, fcport->login_retry);
  456. }
  457. return;
  458. }
  459. if (fcport->last_rscn_gen != fcport->rscn_gen) {
  460. ql_dbg(ql_dbg_disc, vha, 0x20df,
  461. "%s %8phC rscn gen changed rscn %d|%d \n",
  462. __func__, fcport->port_name,
  463. fcport->last_rscn_gen, fcport->rscn_gen);
  464. qla24xx_post_gidpn_work(vha, fcport);
  465. return;
  466. } else if (fcport->last_login_gen != fcport->login_gen) {
  467. ql_dbg(ql_dbg_disc, vha, 0x20e0,
  468. "%s %8phC login gen changed\n",
  469. __func__, fcport->port_name);
  470. return;
  471. }
  472. n = ea->data[0] / sizeof(struct get_name_list_extended);
  473. ql_dbg(ql_dbg_disc, vha, 0x20e1,
  474. "%s %d %8phC n %d %02x%02x%02x lid %d \n",
  475. __func__, __LINE__, fcport->port_name, n,
  476. fcport->d_id.b.domain, fcport->d_id.b.area,
  477. fcport->d_id.b.al_pa, fcport->loop_id);
  478. for (i = 0; i < n; i++) {
  479. e = &vha->gnl.l[i];
  480. wwn = wwn_to_u64(e->port_name);
  481. id.b.domain = e->port_id[2];
  482. id.b.area = e->port_id[1];
  483. id.b.al_pa = e->port_id[0];
  484. id.b.rsvd_1 = 0;
  485. if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
  486. continue;
  487. if (IS_SW_RESV_ADDR(id))
  488. continue;
  489. found = 1;
  490. loop_id = le16_to_cpu(e->nport_handle);
  491. loop_id = (loop_id & 0x7fff);
  492. if (fcport->fc4f_nvme)
  493. current_login_state = e->current_login_state >> 4;
  494. else
  495. current_login_state = e->current_login_state & 0xf;
  496. ql_dbg(ql_dbg_disc, vha, 0x20e2,
  497. "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
  498. __func__, fcport->port_name,
  499. e->current_login_state, fcport->fw_login_state,
  500. fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
  501. fcport->d_id.b.domain, fcport->d_id.b.area,
  502. fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
  503. switch (fcport->disc_state) {
  504. case DSC_DELETE_PEND:
  505. case DSC_DELETED:
  506. break;
  507. default:
  508. if ((id.b24 != fcport->d_id.b24 &&
  509. fcport->d_id.b24) ||
  510. (fcport->loop_id != FC_NO_LOOP_ID &&
  511. fcport->loop_id != loop_id)) {
  512. ql_dbg(ql_dbg_disc, vha, 0x20e3,
  513. "%s %d %8phC post del sess\n",
  514. __func__, __LINE__, fcport->port_name);
  515. qlt_schedule_sess_for_deletion(fcport);
  516. return;
  517. }
  518. break;
  519. }
  520. fcport->loop_id = loop_id;
  521. wwn = wwn_to_u64(fcport->port_name);
  522. qlt_find_sess_invalidate_other(vha, wwn,
  523. id, loop_id, &conflict_fcport);
  524. if (conflict_fcport) {
  525. /*
  526. * Another share fcport share the same loop_id &
  527. * nport id. Conflict fcport needs to finish
  528. * cleanup before this fcport can proceed to login.
  529. */
  530. conflict_fcport->conflict = fcport;
  531. fcport->login_pause = 1;
  532. }
  533. switch (vha->hw->current_topology) {
  534. default:
  535. switch (current_login_state) {
  536. case DSC_LS_PRLI_COMP:
  537. ql_dbg(ql_dbg_disc + ql_dbg_verbose,
  538. vha, 0x20e4, "%s %d %8phC post gpdb\n",
  539. __func__, __LINE__, fcport->port_name);
  540. if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
  541. fcport->port_type = FCT_INITIATOR;
  542. else
  543. fcport->port_type = FCT_TARGET;
  544. data[0] = data[1] = 0;
  545. qla2x00_post_async_adisc_work(vha, fcport,
  546. data);
  547. break;
  548. case DSC_LS_PORT_UNAVAIL:
  549. default:
  550. if (fcport->loop_id == FC_NO_LOOP_ID) {
  551. qla2x00_find_new_loop_id(vha, fcport);
  552. fcport->fw_login_state =
  553. DSC_LS_PORT_UNAVAIL;
  554. }
  555. ql_dbg(ql_dbg_disc, vha, 0x20e5,
  556. "%s %d %8phC\n", __func__, __LINE__,
  557. fcport->port_name);
  558. qla24xx_fcport_handle_login(vha, fcport);
  559. break;
  560. }
  561. break;
  562. case ISP_CFG_N:
  563. fcport->fw_login_state = current_login_state;
  564. fcport->d_id = id;
  565. switch (current_login_state) {
  566. case DSC_LS_PRLI_COMP:
  567. if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
  568. fcport->port_type = FCT_INITIATOR;
  569. else
  570. fcport->port_type = FCT_TARGET;
  571. data[0] = data[1] = 0;
  572. qla2x00_post_async_adisc_work(vha, fcport,
  573. data);
  574. break;
  575. case DSC_LS_PLOGI_COMP:
  576. if (fcport_is_bigger(fcport)) {
  577. /* local adapter is smaller */
  578. if (fcport->loop_id != FC_NO_LOOP_ID)
  579. qla2x00_clear_loop_id(fcport);
  580. fcport->loop_id = loop_id;
  581. qla24xx_fcport_handle_login(vha,
  582. fcport);
  583. break;
  584. }
  585. /* drop through */
  586. default:
  587. if (fcport_is_smaller(fcport)) {
  588. /* local adapter is bigger */
  589. if (fcport->loop_id != FC_NO_LOOP_ID)
  590. qla2x00_clear_loop_id(fcport);
  591. fcport->loop_id = loop_id;
  592. qla24xx_fcport_handle_login(vha,
  593. fcport);
  594. }
  595. break;
  596. }
  597. break;
  598. } /* switch (ha->current_topology) */
  599. }
  600. if (!found) {
  601. switch (vha->hw->current_topology) {
  602. case ISP_CFG_F:
  603. case ISP_CFG_FL:
  604. for (i = 0; i < n; i++) {
  605. e = &vha->gnl.l[i];
  606. id.b.domain = e->port_id[0];
  607. id.b.area = e->port_id[1];
  608. id.b.al_pa = e->port_id[2];
  609. id.b.rsvd_1 = 0;
  610. loop_id = le16_to_cpu(e->nport_handle);
  611. if (fcport->d_id.b24 == id.b24) {
  612. conflict_fcport =
  613. qla2x00_find_fcport_by_wwpn(vha,
  614. e->port_name, 0);
  615. if (conflict_fcport) {
  616. ql_dbg(ql_dbg_disc + ql_dbg_verbose,
  617. vha, 0x20e5,
  618. "%s %d %8phC post del sess\n",
  619. __func__, __LINE__,
  620. conflict_fcport->port_name);
  621. qlt_schedule_sess_for_deletion
  622. (conflict_fcport);
  623. }
  624. }
  625. /*
  626. * FW already picked this loop id for
  627. * another fcport
  628. */
  629. if (fcport->loop_id == loop_id)
  630. fcport->loop_id = FC_NO_LOOP_ID;
  631. }
  632. qla24xx_fcport_handle_login(vha, fcport);
  633. break;
  634. case ISP_CFG_N:
  635. fcport->disc_state = DSC_DELETED;
  636. if (time_after_eq(jiffies, fcport->dm_login_expire)) {
  637. if (fcport->n2n_link_reset_cnt < 2) {
  638. fcport->n2n_link_reset_cnt++;
  639. /*
  640. * remote port is not sending PLOGI.
  641. * Reset link to kick start his state
  642. * machine
  643. */
  644. set_bit(N2N_LINK_RESET,
  645. &vha->dpc_flags);
  646. } else {
  647. if (fcport->n2n_chip_reset < 1) {
  648. ql_log(ql_log_info, vha, 0x705d,
  649. "Chip reset to bring laser down");
  650. set_bit(ISP_ABORT_NEEDED,
  651. &vha->dpc_flags);
  652. fcport->n2n_chip_reset++;
  653. } else {
  654. ql_log(ql_log_info, vha, 0x705d,
  655. "Remote port %8ph is not coming back\n",
  656. fcport->port_name);
  657. fcport->scan_state = 0;
  658. }
  659. }
  660. qla2xxx_wake_dpc(vha);
  661. } else {
  662. /*
  663. * report port suppose to do PLOGI. Give him
  664. * more time. FW will catch it.
  665. */
  666. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  667. }
  668. break;
  669. default:
  670. break;
  671. }
  672. }
  673. } /* gnl_event */
  674. static void
  675. qla24xx_async_gnl_sp_done(void *s, int res)
  676. {
  677. struct srb *sp = s;
  678. struct scsi_qla_host *vha = sp->vha;
  679. unsigned long flags;
  680. struct fc_port *fcport = NULL, *tf;
  681. u16 i, n = 0, loop_id;
  682. struct event_arg ea;
  683. struct get_name_list_extended *e;
  684. u64 wwn;
  685. struct list_head h;
  686. bool found = false;
  687. ql_dbg(ql_dbg_disc, vha, 0x20e7,
  688. "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
  689. sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
  690. sp->u.iocb_cmd.u.mbx.in_mb[2]);
  691. if (res == QLA_FUNCTION_TIMEOUT)
  692. return;
  693. sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
  694. memset(&ea, 0, sizeof(ea));
  695. ea.sp = sp;
  696. ea.rc = res;
  697. ea.event = FCME_GNL_DONE;
  698. if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
  699. sizeof(struct get_name_list_extended)) {
  700. n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
  701. sizeof(struct get_name_list_extended);
  702. ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
  703. }
  704. for (i = 0; i < n; i++) {
  705. e = &vha->gnl.l[i];
  706. loop_id = le16_to_cpu(e->nport_handle);
  707. /* mask out reserve bit */
  708. loop_id = (loop_id & 0x7fff);
  709. set_bit(loop_id, vha->hw->loop_id_map);
  710. wwn = wwn_to_u64(e->port_name);
  711. ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
  712. "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
  713. __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
  714. e->port_id[0], e->current_login_state, e->last_login_state,
  715. (loop_id & 0x7fff));
  716. }
  717. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  718. INIT_LIST_HEAD(&h);
  719. fcport = tf = NULL;
  720. if (!list_empty(&vha->gnl.fcports))
  721. list_splice_init(&vha->gnl.fcports, &h);
  722. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  723. list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
  724. list_del_init(&fcport->gnl_entry);
  725. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  726. fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  727. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  728. ea.fcport = fcport;
  729. qla2x00_fcport_event_handler(vha, &ea);
  730. }
  731. /* create new fcport if fw has knowledge of new sessions */
  732. for (i = 0; i < n; i++) {
  733. port_id_t id;
  734. u64 wwnn;
  735. e = &vha->gnl.l[i];
  736. wwn = wwn_to_u64(e->port_name);
  737. found = false;
  738. list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
  739. if (!memcmp((u8 *)&wwn, fcport->port_name,
  740. WWN_SIZE)) {
  741. found = true;
  742. break;
  743. }
  744. }
  745. id.b.domain = e->port_id[2];
  746. id.b.area = e->port_id[1];
  747. id.b.al_pa = e->port_id[0];
  748. id.b.rsvd_1 = 0;
  749. if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
  750. ql_dbg(ql_dbg_disc, vha, 0x2065,
  751. "%s %d %8phC %06x post new sess\n",
  752. __func__, __LINE__, (u8 *)&wwn, id.b24);
  753. wwnn = wwn_to_u64(e->node_name);
  754. qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
  755. (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
  756. }
  757. }
  758. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  759. vha->gnl.sent = 0;
  760. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  761. sp->free(sp);
  762. }
  763. int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
  764. {
  765. srb_t *sp;
  766. struct srb_iocb *mbx;
  767. int rval = QLA_FUNCTION_FAILED;
  768. unsigned long flags;
  769. u16 *mb;
  770. if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
  771. return rval;
  772. ql_dbg(ql_dbg_disc, vha, 0x20d9,
  773. "Async-gnlist WWPN %8phC \n", fcport->port_name);
  774. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  775. fcport->flags |= FCF_ASYNC_SENT;
  776. fcport->disc_state = DSC_GNL;
  777. fcport->last_rscn_gen = fcport->rscn_gen;
  778. fcport->last_login_gen = fcport->login_gen;
  779. list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
  780. if (vha->gnl.sent) {
  781. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  782. return QLA_SUCCESS;
  783. }
  784. vha->gnl.sent = 1;
  785. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  786. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  787. if (!sp)
  788. goto done;
  789. sp->type = SRB_MB_IOCB;
  790. sp->name = "gnlist";
  791. sp->gen1 = fcport->rscn_gen;
  792. sp->gen2 = fcport->login_gen;
  793. mbx = &sp->u.iocb_cmd;
  794. mbx->timeout = qla2x00_async_iocb_timeout;
  795. qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
  796. mb = sp->u.iocb_cmd.u.mbx.out_mb;
  797. mb[0] = MBC_PORT_NODE_NAME_LIST;
  798. mb[1] = BIT_2 | BIT_3;
  799. mb[2] = MSW(vha->gnl.ldma);
  800. mb[3] = LSW(vha->gnl.ldma);
  801. mb[6] = MSW(MSD(vha->gnl.ldma));
  802. mb[7] = LSW(MSD(vha->gnl.ldma));
  803. mb[8] = vha->gnl.size;
  804. mb[9] = vha->vp_idx;
  805. sp->done = qla24xx_async_gnl_sp_done;
  806. rval = qla2x00_start_sp(sp);
  807. if (rval != QLA_SUCCESS)
  808. goto done_free_sp;
  809. ql_dbg(ql_dbg_disc, vha, 0x20da,
  810. "Async-%s - OUT WWPN %8phC hndl %x\n",
  811. sp->name, fcport->port_name, sp->handle);
  812. return rval;
  813. done_free_sp:
  814. sp->free(sp);
  815. fcport->flags &= ~FCF_ASYNC_SENT;
  816. done:
  817. return rval;
  818. }
  819. int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
  820. {
  821. struct qla_work_evt *e;
  822. e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
  823. if (!e)
  824. return QLA_FUNCTION_FAILED;
  825. e->u.fcport.fcport = fcport;
  826. fcport->flags |= FCF_ASYNC_ACTIVE;
  827. fcport->disc_state = DSC_LOGIN_PEND;
  828. return qla2x00_post_work(vha, e);
  829. }
  830. static
  831. void qla24xx_async_gpdb_sp_done(void *s, int res)
  832. {
  833. struct srb *sp = s;
  834. struct scsi_qla_host *vha = sp->vha;
  835. struct qla_hw_data *ha = vha->hw;
  836. fc_port_t *fcport = sp->fcport;
  837. u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
  838. struct event_arg ea;
  839. ql_dbg(ql_dbg_disc, vha, 0x20db,
  840. "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
  841. sp->name, res, fcport->port_name, mb[1], mb[2]);
  842. fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  843. if (res == QLA_FUNCTION_TIMEOUT)
  844. goto done;
  845. memset(&ea, 0, sizeof(ea));
  846. ea.event = FCME_GPDB_DONE;
  847. ea.fcport = fcport;
  848. ea.sp = sp;
  849. qla2x00_fcport_event_handler(vha, &ea);
  850. done:
  851. dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
  852. sp->u.iocb_cmd.u.mbx.in_dma);
  853. sp->free(sp);
  854. }
  855. static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
  856. {
  857. struct qla_work_evt *e;
  858. e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
  859. if (!e)
  860. return QLA_FUNCTION_FAILED;
  861. e->u.fcport.fcport = fcport;
  862. return qla2x00_post_work(vha, e);
  863. }
  864. static void
  865. qla2x00_async_prli_sp_done(void *ptr, int res)
  866. {
  867. srb_t *sp = ptr;
  868. struct scsi_qla_host *vha = sp->vha;
  869. struct srb_iocb *lio = &sp->u.iocb_cmd;
  870. struct event_arg ea;
  871. ql_dbg(ql_dbg_disc, vha, 0x2129,
  872. "%s %8phC res %d \n", __func__,
  873. sp->fcport->port_name, res);
  874. sp->fcport->flags &= ~FCF_ASYNC_SENT;
  875. if (!test_bit(UNLOADING, &vha->dpc_flags)) {
  876. memset(&ea, 0, sizeof(ea));
  877. ea.event = FCME_PRLI_DONE;
  878. ea.fcport = sp->fcport;
  879. ea.data[0] = lio->u.logio.data[0];
  880. ea.data[1] = lio->u.logio.data[1];
  881. ea.iop[0] = lio->u.logio.iop[0];
  882. ea.iop[1] = lio->u.logio.iop[1];
  883. ea.sp = sp;
  884. qla2x00_fcport_event_handler(vha, &ea);
  885. }
  886. sp->free(sp);
  887. }
  888. int
  889. qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
  890. {
  891. srb_t *sp;
  892. struct srb_iocb *lio;
  893. int rval = QLA_FUNCTION_FAILED;
  894. if (!vha->flags.online)
  895. return rval;
  896. if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
  897. fcport->fw_login_state == DSC_LS_PRLI_PEND)
  898. return rval;
  899. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  900. if (!sp)
  901. return rval;
  902. fcport->flags |= FCF_ASYNC_SENT;
  903. fcport->logout_completed = 0;
  904. sp->type = SRB_PRLI_CMD;
  905. sp->name = "prli";
  906. lio = &sp->u.iocb_cmd;
  907. lio->timeout = qla2x00_async_iocb_timeout;
  908. qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
  909. sp->done = qla2x00_async_prli_sp_done;
  910. lio->u.logio.flags = 0;
  911. if (fcport->fc4f_nvme)
  912. lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
  913. rval = qla2x00_start_sp(sp);
  914. if (rval != QLA_SUCCESS) {
  915. fcport->flags |= FCF_LOGIN_NEEDED;
  916. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  917. goto done_free_sp;
  918. }
  919. ql_dbg(ql_dbg_disc, vha, 0x211b,
  920. "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
  921. fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
  922. fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
  923. return rval;
  924. done_free_sp:
  925. sp->free(sp);
  926. fcport->flags &= ~FCF_ASYNC_SENT;
  927. return rval;
  928. }
  929. int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
  930. {
  931. struct qla_work_evt *e;
  932. e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
  933. if (!e)
  934. return QLA_FUNCTION_FAILED;
  935. e->u.fcport.fcport = fcport;
  936. e->u.fcport.opt = opt;
  937. fcport->flags |= FCF_ASYNC_ACTIVE;
  938. return qla2x00_post_work(vha, e);
  939. }
  940. int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
  941. {
  942. srb_t *sp;
  943. struct srb_iocb *mbx;
  944. int rval = QLA_FUNCTION_FAILED;
  945. u16 *mb;
  946. dma_addr_t pd_dma;
  947. struct port_database_24xx *pd;
  948. struct qla_hw_data *ha = vha->hw;
  949. if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
  950. fcport->loop_id == FC_NO_LOOP_ID) {
  951. ql_log(ql_log_warn, vha, 0xffff,
  952. "%s: %8phC - not sending command.\n",
  953. __func__, fcport->port_name);
  954. return rval;
  955. }
  956. fcport->disc_state = DSC_GPDB;
  957. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  958. if (!sp)
  959. goto done;
  960. fcport->flags |= FCF_ASYNC_SENT;
  961. sp->type = SRB_MB_IOCB;
  962. sp->name = "gpdb";
  963. sp->gen1 = fcport->rscn_gen;
  964. sp->gen2 = fcport->login_gen;
  965. mbx = &sp->u.iocb_cmd;
  966. mbx->timeout = qla2x00_async_iocb_timeout;
  967. qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
  968. pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
  969. if (pd == NULL) {
  970. ql_log(ql_log_warn, vha, 0xd043,
  971. "Failed to allocate port database structure.\n");
  972. goto done_free_sp;
  973. }
  974. mb = sp->u.iocb_cmd.u.mbx.out_mb;
  975. mb[0] = MBC_GET_PORT_DATABASE;
  976. mb[1] = fcport->loop_id;
  977. mb[2] = MSW(pd_dma);
  978. mb[3] = LSW(pd_dma);
  979. mb[6] = MSW(MSD(pd_dma));
  980. mb[7] = LSW(MSD(pd_dma));
  981. mb[9] = vha->vp_idx;
  982. mb[10] = opt;
  983. mbx->u.mbx.in = (void *)pd;
  984. mbx->u.mbx.in_dma = pd_dma;
  985. sp->done = qla24xx_async_gpdb_sp_done;
  986. ql_dbg(ql_dbg_disc, vha, 0x20dc,
  987. "Async-%s %8phC hndl %x opt %x\n",
  988. sp->name, fcport->port_name, sp->handle, opt);
  989. rval = qla2x00_start_sp(sp);
  990. if (rval != QLA_SUCCESS)
  991. goto done_free_sp;
  992. return rval;
  993. done_free_sp:
  994. if (pd)
  995. dma_pool_free(ha->s_dma_pool, pd, pd_dma);
  996. sp->free(sp);
  997. fcport->flags &= ~FCF_ASYNC_SENT;
  998. done:
  999. qla24xx_post_gpdb_work(vha, fcport, opt);
  1000. return rval;
  1001. }
  1002. static
  1003. void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
  1004. {
  1005. unsigned long flags;
  1006. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  1007. ea->fcport->login_gen++;
  1008. ea->fcport->deleted = 0;
  1009. ea->fcport->logout_on_delete = 1;
  1010. if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
  1011. vha->fcport_count++;
  1012. ea->fcport->login_succ = 1;
  1013. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  1014. qla24xx_sched_upd_fcport(ea->fcport);
  1015. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  1016. } else if (ea->fcport->login_succ) {
  1017. /*
  1018. * We have an existing session. A late RSCN delivery
  1019. * must have triggered the session to be re-validate.
  1020. * Session is still valid.
  1021. */
  1022. ql_dbg(ql_dbg_disc, vha, 0x20d6,
  1023. "%s %d %8phC session revalidate success\n",
  1024. __func__, __LINE__, ea->fcport->port_name);
  1025. ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
  1026. }
  1027. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  1028. }
  1029. static
  1030. void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
  1031. {
  1032. fc_port_t *fcport = ea->fcport;
  1033. struct port_database_24xx *pd;
  1034. struct srb *sp = ea->sp;
  1035. uint8_t ls;
  1036. pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
  1037. fcport->flags &= ~FCF_ASYNC_SENT;
  1038. ql_dbg(ql_dbg_disc, vha, 0x20d2,
  1039. "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
  1040. fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
  1041. ea->rc);
  1042. if (fcport->disc_state == DSC_DELETE_PEND)
  1043. return;
  1044. if (fcport->fc4f_nvme)
  1045. ls = pd->current_login_state >> 4;
  1046. else
  1047. ls = pd->current_login_state & 0xf;
  1048. switch (ls) {
  1049. case PDS_PRLI_COMPLETE:
  1050. __qla24xx_parse_gpdb(vha, fcport, pd);
  1051. break;
  1052. case PDS_PLOGI_PENDING:
  1053. case PDS_PLOGI_COMPLETE:
  1054. case PDS_PRLI_PENDING:
  1055. case PDS_PRLI2_PENDING:
  1056. /* Set discovery state back to GNL to Relogin attempt */
  1057. if (qla_dual_mode_enabled(vha) ||
  1058. qla_ini_mode_enabled(vha)) {
  1059. fcport->disc_state = DSC_GNL;
  1060. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1061. }
  1062. return;
  1063. case PDS_LOGO_PENDING:
  1064. case PDS_PORT_UNAVAILABLE:
  1065. default:
  1066. ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
  1067. __func__, __LINE__, fcport->port_name);
  1068. qlt_schedule_sess_for_deletion(fcport);
  1069. return;
  1070. }
  1071. __qla24xx_handle_gpdb_event(vha, ea);
  1072. } /* gpdb event */
  1073. static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
  1074. {
  1075. u8 login = 0;
  1076. int rc;
  1077. if (qla_tgt_mode_enabled(vha))
  1078. return;
  1079. if (qla_dual_mode_enabled(vha)) {
  1080. if (N2N_TOPO(vha->hw)) {
  1081. u64 mywwn, wwn;
  1082. mywwn = wwn_to_u64(vha->port_name);
  1083. wwn = wwn_to_u64(fcport->port_name);
  1084. if (mywwn > wwn)
  1085. login = 1;
  1086. else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
  1087. && time_after_eq(jiffies,
  1088. fcport->plogi_nack_done_deadline))
  1089. login = 1;
  1090. } else {
  1091. login = 1;
  1092. }
  1093. } else {
  1094. /* initiator mode */
  1095. login = 1;
  1096. }
  1097. if (login) {
  1098. if (fcport->loop_id == FC_NO_LOOP_ID) {
  1099. fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
  1100. rc = qla2x00_find_new_loop_id(vha, fcport);
  1101. if (rc) {
  1102. ql_dbg(ql_dbg_disc, vha, 0x20e6,
  1103. "%s %d %8phC post del sess - out of loopid\n",
  1104. __func__, __LINE__, fcport->port_name);
  1105. fcport->scan_state = 0;
  1106. qlt_schedule_sess_for_deletion(fcport);
  1107. return;
  1108. }
  1109. }
  1110. ql_dbg(ql_dbg_disc, vha, 0x20bf,
  1111. "%s %d %8phC post login\n",
  1112. __func__, __LINE__, fcport->port_name);
  1113. qla2x00_post_async_login_work(vha, fcport, NULL);
  1114. }
  1115. }
  1116. int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
  1117. {
  1118. u16 data[2];
  1119. u64 wwn;
  1120. u16 sec;
  1121. ql_dbg(ql_dbg_disc, vha, 0x20d8,
  1122. "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
  1123. __func__, fcport->port_name, fcport->disc_state,
  1124. fcport->fw_login_state, fcport->login_pause, fcport->flags,
  1125. fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
  1126. fcport->login_gen, fcport->login_retry,
  1127. fcport->loop_id, fcport->scan_state);
  1128. if (fcport->scan_state != QLA_FCPORT_FOUND)
  1129. return 0;
  1130. if ((fcport->loop_id != FC_NO_LOOP_ID) &&
  1131. ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
  1132. (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
  1133. return 0;
  1134. if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
  1135. if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
  1136. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1137. return 0;
  1138. }
  1139. }
  1140. /* for pure Target Mode. Login will not be initiated */
  1141. if (vha->host->active_mode == MODE_TARGET)
  1142. return 0;
  1143. if (fcport->flags & FCF_ASYNC_SENT) {
  1144. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1145. return 0;
  1146. }
  1147. switch (fcport->disc_state) {
  1148. case DSC_DELETED:
  1149. wwn = wwn_to_u64(fcport->node_name);
  1150. switch (vha->hw->current_topology) {
  1151. case ISP_CFG_N:
  1152. if (fcport_is_smaller(fcport)) {
  1153. /* this adapter is bigger */
  1154. if (fcport->login_retry) {
  1155. if (fcport->loop_id == FC_NO_LOOP_ID) {
  1156. qla2x00_find_new_loop_id(vha,
  1157. fcport);
  1158. fcport->fw_login_state =
  1159. DSC_LS_PORT_UNAVAIL;
  1160. }
  1161. fcport->login_retry--;
  1162. qla_post_els_plogi_work(vha, fcport);
  1163. } else {
  1164. ql_log(ql_log_info, vha, 0x705d,
  1165. "Unable to reach remote port %8phC",
  1166. fcport->port_name);
  1167. }
  1168. } else {
  1169. qla24xx_post_gnl_work(vha, fcport);
  1170. }
  1171. break;
  1172. default:
  1173. if (wwn == 0) {
  1174. ql_dbg(ql_dbg_disc, vha, 0xffff,
  1175. "%s %d %8phC post GNNID\n",
  1176. __func__, __LINE__, fcport->port_name);
  1177. qla24xx_post_gnnid_work(vha, fcport);
  1178. } else if (fcport->loop_id == FC_NO_LOOP_ID) {
  1179. ql_dbg(ql_dbg_disc, vha, 0x20bd,
  1180. "%s %d %8phC post gnl\n",
  1181. __func__, __LINE__, fcport->port_name);
  1182. qla24xx_post_gnl_work(vha, fcport);
  1183. } else {
  1184. qla_chk_n2n_b4_login(vha, fcport);
  1185. }
  1186. break;
  1187. }
  1188. break;
  1189. case DSC_GNL:
  1190. switch (vha->hw->current_topology) {
  1191. case ISP_CFG_N:
  1192. if ((fcport->current_login_state & 0xf) == 0x6) {
  1193. ql_dbg(ql_dbg_disc, vha, 0x2118,
  1194. "%s %d %8phC post GPDB work\n",
  1195. __func__, __LINE__, fcport->port_name);
  1196. fcport->chip_reset =
  1197. vha->hw->base_qpair->chip_reset;
  1198. qla24xx_post_gpdb_work(vha, fcport, 0);
  1199. } else {
  1200. ql_dbg(ql_dbg_disc, vha, 0x2118,
  1201. "%s %d %8phC post NVMe PRLI\n",
  1202. __func__, __LINE__, fcport->port_name);
  1203. qla24xx_post_prli_work(vha, fcport);
  1204. }
  1205. break;
  1206. default:
  1207. if (fcport->login_pause) {
  1208. fcport->last_rscn_gen = fcport->rscn_gen;
  1209. fcport->last_login_gen = fcport->login_gen;
  1210. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1211. break;
  1212. }
  1213. qla_chk_n2n_b4_login(vha, fcport);
  1214. break;
  1215. }
  1216. break;
  1217. case DSC_LOGIN_FAILED:
  1218. fcport->login_retry--;
  1219. ql_dbg(ql_dbg_disc, vha, 0x20d0,
  1220. "%s %d %8phC post gidpn\n",
  1221. __func__, __LINE__, fcport->port_name);
  1222. if (N2N_TOPO(vha->hw))
  1223. qla_chk_n2n_b4_login(vha, fcport);
  1224. else
  1225. qla24xx_post_gidpn_work(vha, fcport);
  1226. break;
  1227. case DSC_LOGIN_COMPLETE:
  1228. /* recheck login state */
  1229. ql_dbg(ql_dbg_disc, vha, 0x20d1,
  1230. "%s %d %8phC post adisc\n",
  1231. __func__, __LINE__, fcport->port_name);
  1232. fcport->login_retry--;
  1233. data[0] = data[1] = 0;
  1234. qla2x00_post_async_adisc_work(vha, fcport, data);
  1235. break;
  1236. case DSC_LOGIN_PEND:
  1237. if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
  1238. qla24xx_post_prli_work(vha, fcport);
  1239. break;
  1240. case DSC_UPD_FCPORT:
  1241. sec = jiffies_to_msecs(jiffies -
  1242. fcport->jiffies_at_registration)/1000;
  1243. if (fcport->sec_since_registration < sec && sec &&
  1244. !(sec % 60)) {
  1245. fcport->sec_since_registration = sec;
  1246. ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
  1247. "%s %8phC - Slow Rport registration(%d Sec)\n",
  1248. __func__, fcport->port_name, sec);
  1249. }
  1250. if (fcport->next_disc_state != DSC_DELETE_PEND)
  1251. fcport->next_disc_state = DSC_ADISC;
  1252. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1253. break;
  1254. default:
  1255. break;
  1256. }
  1257. return 0;
  1258. }
  1259. static
  1260. void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
  1261. {
  1262. fcport->rscn_gen++;
  1263. ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
  1264. "%s %8phC DS %d LS %d\n",
  1265. __func__, fcport->port_name, fcport->disc_state,
  1266. fcport->fw_login_state);
  1267. if (fcport->flags & FCF_ASYNC_SENT)
  1268. return;
  1269. switch (fcport->disc_state) {
  1270. case DSC_DELETED:
  1271. case DSC_LOGIN_COMPLETE:
  1272. qla24xx_post_gpnid_work(fcport->vha, &ea->id);
  1273. break;
  1274. default:
  1275. break;
  1276. }
  1277. }
  1278. int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
  1279. u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
  1280. {
  1281. struct qla_work_evt *e;
  1282. e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
  1283. if (!e)
  1284. return QLA_FUNCTION_FAILED;
  1285. e->u.new_sess.id = *id;
  1286. e->u.new_sess.pla = pla;
  1287. e->u.new_sess.fc4_type = fc4_type;
  1288. memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
  1289. if (node_name)
  1290. memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
  1291. return qla2x00_post_work(vha, e);
  1292. }
  1293. static
  1294. void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
  1295. struct event_arg *ea)
  1296. {
  1297. fc_port_t *fcport = ea->fcport;
  1298. ql_dbg(ql_dbg_disc, vha, 0x2102,
  1299. "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
  1300. __func__, fcport->port_name, fcport->disc_state,
  1301. fcport->fw_login_state, fcport->login_pause,
  1302. fcport->deleted, fcport->conflict,
  1303. fcport->last_rscn_gen, fcport->rscn_gen,
  1304. fcport->last_login_gen, fcport->login_gen,
  1305. fcport->flags);
  1306. if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
  1307. (fcport->fw_login_state == DSC_LS_PRLI_PEND))
  1308. return;
  1309. if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
  1310. if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
  1311. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1312. return;
  1313. }
  1314. }
  1315. if (fcport->last_rscn_gen != fcport->rscn_gen) {
  1316. ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
  1317. __func__, __LINE__, fcport->port_name);
  1318. qla24xx_post_gidpn_work(vha, fcport);
  1319. return;
  1320. }
  1321. qla24xx_fcport_handle_login(vha, fcport);
  1322. }
  1323. void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea)
  1324. {
  1325. ql_dbg(ql_dbg_disc, vha, 0x2118,
  1326. "%s %d %8phC post PRLI\n",
  1327. __func__, __LINE__, ea->fcport->port_name);
  1328. qla24xx_post_prli_work(vha, ea->fcport);
  1329. }
  1330. void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
  1331. {
  1332. fc_port_t *f, *tf;
  1333. uint32_t id = 0, mask, rid;
  1334. unsigned long flags;
  1335. fc_port_t *fcport;
  1336. switch (ea->event) {
  1337. case FCME_RELOGIN:
  1338. if (test_bit(UNLOADING, &vha->dpc_flags))
  1339. return;
  1340. qla24xx_handle_relogin_event(vha, ea);
  1341. break;
  1342. case FCME_RSCN:
  1343. if (test_bit(UNLOADING, &vha->dpc_flags))
  1344. return;
  1345. switch (ea->id.b.rsvd_1) {
  1346. case RSCN_PORT_ADDR:
  1347. fcport = qla2x00_find_fcport_by_nportid
  1348. (vha, &ea->id, 1);
  1349. if (fcport) {
  1350. fcport->scan_needed = 1;
  1351. fcport->rscn_gen++;
  1352. }
  1353. spin_lock_irqsave(&vha->work_lock, flags);
  1354. if (vha->scan.scan_flags == 0) {
  1355. ql_dbg(ql_dbg_disc, vha, 0xffff,
  1356. "%s: schedule\n", __func__);
  1357. vha->scan.scan_flags |= SF_QUEUED;
  1358. schedule_delayed_work(&vha->scan.scan_work, 5);
  1359. }
  1360. spin_unlock_irqrestore(&vha->work_lock, flags);
  1361. break;
  1362. case RSCN_AREA_ADDR:
  1363. case RSCN_DOM_ADDR:
  1364. if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
  1365. mask = 0xffff00;
  1366. ql_dbg(ql_dbg_async, vha, 0x5044,
  1367. "RSCN: Area 0x%06x was affected\n",
  1368. ea->id.b24);
  1369. } else {
  1370. mask = 0xff0000;
  1371. ql_dbg(ql_dbg_async, vha, 0x507a,
  1372. "RSCN: Domain 0x%06x was affected\n",
  1373. ea->id.b24);
  1374. }
  1375. rid = ea->id.b24 & mask;
  1376. list_for_each_entry_safe(f, tf, &vha->vp_fcports,
  1377. list) {
  1378. id = f->d_id.b24 & mask;
  1379. if (rid == id) {
  1380. ea->fcport = f;
  1381. qla24xx_handle_rscn_event(f, ea);
  1382. }
  1383. }
  1384. break;
  1385. case RSCN_FAB_ADDR:
  1386. default:
  1387. ql_log(ql_log_warn, vha, 0xd045,
  1388. "RSCN: Fabric was affected. Addr format %d\n",
  1389. ea->id.b.rsvd_1);
  1390. qla2x00_mark_all_devices_lost(vha, 1);
  1391. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  1392. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  1393. }
  1394. break;
  1395. case FCME_GIDPN_DONE:
  1396. qla24xx_handle_gidpn_event(vha, ea);
  1397. break;
  1398. case FCME_GNL_DONE:
  1399. qla24xx_handle_gnl_done_event(vha, ea);
  1400. break;
  1401. case FCME_GPSC_DONE:
  1402. qla24xx_handle_gpsc_event(vha, ea);
  1403. break;
  1404. case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
  1405. qla24xx_handle_plogi_done_event(vha, ea);
  1406. break;
  1407. case FCME_PRLI_DONE:
  1408. qla24xx_handle_prli_done_event(vha, ea);
  1409. break;
  1410. case FCME_GPDB_DONE:
  1411. qla24xx_handle_gpdb_event(vha, ea);
  1412. break;
  1413. case FCME_GPNID_DONE:
  1414. qla24xx_handle_gpnid_event(vha, ea);
  1415. break;
  1416. case FCME_GFFID_DONE:
  1417. qla24xx_handle_gffid_event(vha, ea);
  1418. break;
  1419. case FCME_ADISC_DONE:
  1420. qla24xx_handle_adisc_event(vha, ea);
  1421. break;
  1422. case FCME_GNNID_DONE:
  1423. qla24xx_handle_gnnid_event(vha, ea);
  1424. break;
  1425. case FCME_GFPNID_DONE:
  1426. qla24xx_handle_gfpnid_event(vha, ea);
  1427. break;
  1428. case FCME_ELS_PLOGI_DONE:
  1429. qla_handle_els_plogi_done(vha, ea);
  1430. break;
  1431. default:
  1432. BUG_ON(1);
  1433. break;
  1434. }
  1435. }
  1436. static void
  1437. qla2x00_tmf_iocb_timeout(void *data)
  1438. {
  1439. srb_t *sp = data;
  1440. struct srb_iocb *tmf = &sp->u.iocb_cmd;
  1441. tmf->u.tmf.comp_status = CS_TIMEOUT;
  1442. complete(&tmf->u.tmf.comp);
  1443. }
  1444. static void
  1445. qla2x00_tmf_sp_done(void *ptr, int res)
  1446. {
  1447. srb_t *sp = ptr;
  1448. struct srb_iocb *tmf = &sp->u.iocb_cmd;
  1449. complete(&tmf->u.tmf.comp);
  1450. }
  1451. int
  1452. qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
  1453. uint32_t tag)
  1454. {
  1455. struct scsi_qla_host *vha = fcport->vha;
  1456. struct srb_iocb *tm_iocb;
  1457. srb_t *sp;
  1458. int rval = QLA_FUNCTION_FAILED;
  1459. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  1460. if (!sp)
  1461. goto done;
  1462. tm_iocb = &sp->u.iocb_cmd;
  1463. sp->type = SRB_TM_CMD;
  1464. sp->name = "tmf";
  1465. tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
  1466. init_completion(&tm_iocb->u.tmf.comp);
  1467. qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
  1468. tm_iocb->u.tmf.flags = flags;
  1469. tm_iocb->u.tmf.lun = lun;
  1470. tm_iocb->u.tmf.data = tag;
  1471. sp->done = qla2x00_tmf_sp_done;
  1472. ql_dbg(ql_dbg_taskm, vha, 0x802f,
  1473. "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
  1474. sp->handle, fcport->loop_id, fcport->d_id.b.domain,
  1475. fcport->d_id.b.area, fcport->d_id.b.al_pa);
  1476. rval = qla2x00_start_sp(sp);
  1477. if (rval != QLA_SUCCESS)
  1478. goto done_free_sp;
  1479. wait_for_completion(&tm_iocb->u.tmf.comp);
  1480. rval = tm_iocb->u.tmf.data;
  1481. if (rval != QLA_SUCCESS) {
  1482. ql_log(ql_log_warn, vha, 0x8030,
  1483. "TM IOCB failed (%x).\n", rval);
  1484. }
  1485. if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
  1486. flags = tm_iocb->u.tmf.flags;
  1487. lun = (uint16_t)tm_iocb->u.tmf.lun;
  1488. /* Issue Marker IOCB */
  1489. qla2x00_marker(vha, vha->hw->req_q_map[0],
  1490. vha->hw->rsp_q_map[0], fcport->loop_id, lun,
  1491. flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
  1492. }
  1493. done_free_sp:
  1494. sp->free(sp);
  1495. fcport->flags &= ~FCF_ASYNC_SENT;
  1496. done:
  1497. return rval;
  1498. }
  1499. static void
  1500. qla24xx_abort_iocb_timeout(void *data)
  1501. {
  1502. srb_t *sp = data;
  1503. struct srb_iocb *abt = &sp->u.iocb_cmd;
  1504. abt->u.abt.comp_status = CS_TIMEOUT;
  1505. sp->done(sp, QLA_FUNCTION_TIMEOUT);
  1506. }
  1507. static void
  1508. qla24xx_abort_sp_done(void *ptr, int res)
  1509. {
  1510. srb_t *sp = ptr;
  1511. struct srb_iocb *abt = &sp->u.iocb_cmd;
  1512. if (del_timer(&sp->u.iocb_cmd.timer)) {
  1513. if (sp->flags & SRB_WAKEUP_ON_COMP)
  1514. complete(&abt->u.abt.comp);
  1515. else
  1516. sp->free(sp);
  1517. }
  1518. }
  1519. int
  1520. qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
  1521. {
  1522. scsi_qla_host_t *vha = cmd_sp->vha;
  1523. struct srb_iocb *abt_iocb;
  1524. srb_t *sp;
  1525. int rval = QLA_FUNCTION_FAILED;
  1526. sp = qla2xxx_get_qpair_sp(cmd_sp->qpair, cmd_sp->fcport, GFP_KERNEL);
  1527. if (!sp)
  1528. goto done;
  1529. abt_iocb = &sp->u.iocb_cmd;
  1530. sp->type = SRB_ABT_CMD;
  1531. sp->name = "abort";
  1532. sp->qpair = cmd_sp->qpair;
  1533. if (wait)
  1534. sp->flags = SRB_WAKEUP_ON_COMP;
  1535. abt_iocb->timeout = qla24xx_abort_iocb_timeout;
  1536. init_completion(&abt_iocb->u.abt.comp);
  1537. /* FW can send 2 x ABTS's timeout/20s */
  1538. qla2x00_init_timer(sp, 42);
  1539. abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
  1540. abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
  1541. sp->done = qla24xx_abort_sp_done;
  1542. ql_dbg(ql_dbg_async, vha, 0x507c,
  1543. "Abort command issued - hdl=%x, type=%x\n",
  1544. cmd_sp->handle, cmd_sp->type);
  1545. rval = qla2x00_start_sp(sp);
  1546. if (rval != QLA_SUCCESS)
  1547. goto done_free_sp;
  1548. if (wait) {
  1549. wait_for_completion(&abt_iocb->u.abt.comp);
  1550. rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
  1551. QLA_SUCCESS : QLA_FUNCTION_FAILED;
  1552. } else {
  1553. goto done;
  1554. }
  1555. done_free_sp:
  1556. sp->free(sp);
  1557. done:
  1558. return rval;
  1559. }
  1560. int
  1561. qla24xx_async_abort_command(srb_t *sp)
  1562. {
  1563. unsigned long flags = 0;
  1564. uint32_t handle;
  1565. fc_port_t *fcport = sp->fcport;
  1566. struct scsi_qla_host *vha = fcport->vha;
  1567. struct qla_hw_data *ha = vha->hw;
  1568. struct req_que *req = vha->req;
  1569. if (vha->flags.qpairs_available && sp->qpair)
  1570. req = sp->qpair->req;
  1571. spin_lock_irqsave(&ha->hardware_lock, flags);
  1572. for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
  1573. if (req->outstanding_cmds[handle] == sp)
  1574. break;
  1575. }
  1576. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1577. if (handle == req->num_outstanding_cmds) {
  1578. /* Command not found. */
  1579. return QLA_FUNCTION_FAILED;
  1580. }
  1581. if (sp->type == SRB_FXIOCB_DCMD)
  1582. return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
  1583. FXDISC_ABORT_IOCTL);
  1584. return qla24xx_async_abort_cmd(sp, true);
  1585. }
  1586. static void
  1587. qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
  1588. {
  1589. switch (ea->data[0]) {
  1590. case MBS_COMMAND_COMPLETE:
  1591. ql_dbg(ql_dbg_disc, vha, 0x2118,
  1592. "%s %d %8phC post gpdb\n",
  1593. __func__, __LINE__, ea->fcport->port_name);
  1594. ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
  1595. ea->fcport->logout_on_delete = 1;
  1596. qla24xx_post_gpdb_work(vha, ea->fcport, 0);
  1597. break;
  1598. default:
  1599. if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
  1600. (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */
  1601. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1602. ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
  1603. break;
  1604. }
  1605. if (ea->fcport->n2n_flag) {
  1606. ql_dbg(ql_dbg_disc, vha, 0x2118,
  1607. "%s %d %8phC post fc4 prli\n",
  1608. __func__, __LINE__, ea->fcport->port_name);
  1609. ea->fcport->fc4f_nvme = 0;
  1610. ea->fcport->n2n_flag = 0;
  1611. qla24xx_post_prli_work(vha, ea->fcport);
  1612. }
  1613. ql_dbg(ql_dbg_disc, vha, 0x2119,
  1614. "%s %d %8phC unhandle event of %x\n",
  1615. __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
  1616. break;
  1617. }
  1618. }
  1619. static void
  1620. qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
  1621. {
  1622. port_id_t cid; /* conflict Nport id */
  1623. u16 lid;
  1624. struct fc_port *conflict_fcport;
  1625. unsigned long flags;
  1626. struct fc_port *fcport = ea->fcport;
  1627. ql_dbg(ql_dbg_disc, vha, 0xffff,
  1628. "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
  1629. __func__, fcport->port_name, fcport->disc_state,
  1630. fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
  1631. ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
  1632. ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
  1633. if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
  1634. (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
  1635. ql_dbg(ql_dbg_disc, vha, 0x20ea,
  1636. "%s %d %8phC Remote is trying to login\n",
  1637. __func__, __LINE__, fcport->port_name);
  1638. return;
  1639. }
  1640. if ((fcport->disc_state == DSC_DELETE_PEND) ||
  1641. (fcport->disc_state == DSC_DELETED)) {
  1642. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1643. return;
  1644. }
  1645. if (ea->sp->gen2 != fcport->login_gen) {
  1646. /* target side must have changed it. */
  1647. ql_dbg(ql_dbg_disc, vha, 0x20d3,
  1648. "%s %8phC generation changed\n",
  1649. __func__, fcport->port_name);
  1650. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1651. return;
  1652. } else if (ea->sp->gen1 != fcport->rscn_gen) {
  1653. ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
  1654. __func__, __LINE__, fcport->port_name);
  1655. qla24xx_post_gidpn_work(vha, fcport);
  1656. return;
  1657. }
  1658. switch (ea->data[0]) {
  1659. case MBS_COMMAND_COMPLETE:
  1660. /*
  1661. * Driver must validate login state - If PRLI not complete,
  1662. * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
  1663. * requests.
  1664. */
  1665. if (ea->fcport->fc4f_nvme) {
  1666. ql_dbg(ql_dbg_disc, vha, 0x2117,
  1667. "%s %d %8phC post prli\n",
  1668. __func__, __LINE__, ea->fcport->port_name);
  1669. qla24xx_post_prli_work(vha, ea->fcport);
  1670. } else {
  1671. ql_dbg(ql_dbg_disc, vha, 0x20ea,
  1672. "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
  1673. __func__, __LINE__, ea->fcport->port_name,
  1674. ea->fcport->loop_id, ea->fcport->d_id.b24);
  1675. set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
  1676. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  1677. ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
  1678. ea->fcport->logout_on_delete = 1;
  1679. ea->fcport->send_els_logo = 0;
  1680. ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
  1681. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  1682. qla24xx_post_gpdb_work(vha, ea->fcport, 0);
  1683. }
  1684. break;
  1685. case MBS_COMMAND_ERROR:
  1686. ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
  1687. __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
  1688. ea->fcport->flags &= ~FCF_ASYNC_SENT;
  1689. ea->fcport->disc_state = DSC_LOGIN_FAILED;
  1690. if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
  1691. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1692. else
  1693. qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
  1694. break;
  1695. case MBS_LOOP_ID_USED:
  1696. /* data[1] = IO PARAM 1 = nport ID */
  1697. cid.b.domain = (ea->iop[1] >> 16) & 0xff;
  1698. cid.b.area = (ea->iop[1] >> 8) & 0xff;
  1699. cid.b.al_pa = ea->iop[1] & 0xff;
  1700. cid.b.rsvd_1 = 0;
  1701. ql_dbg(ql_dbg_disc, vha, 0x20ec,
  1702. "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
  1703. __func__, __LINE__, ea->fcport->port_name,
  1704. ea->fcport->loop_id, cid.b24);
  1705. set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
  1706. ea->fcport->loop_id = FC_NO_LOOP_ID;
  1707. qla24xx_post_gnl_work(vha, ea->fcport);
  1708. break;
  1709. case MBS_PORT_ID_USED:
  1710. lid = ea->iop[1] & 0xffff;
  1711. qlt_find_sess_invalidate_other(vha,
  1712. wwn_to_u64(ea->fcport->port_name),
  1713. ea->fcport->d_id, lid, &conflict_fcport);
  1714. if (conflict_fcport) {
  1715. /*
  1716. * Another fcport share the same loop_id/nport id.
  1717. * Conflict fcport needs to finish cleanup before this
  1718. * fcport can proceed to login.
  1719. */
  1720. conflict_fcport->conflict = ea->fcport;
  1721. ea->fcport->login_pause = 1;
  1722. ql_dbg(ql_dbg_disc, vha, 0x20ed,
  1723. "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
  1724. __func__, __LINE__, ea->fcport->port_name,
  1725. ea->fcport->d_id.b24, lid);
  1726. qla2x00_clear_loop_id(ea->fcport);
  1727. qla24xx_post_gidpn_work(vha, ea->fcport);
  1728. } else {
  1729. ql_dbg(ql_dbg_disc, vha, 0x20ed,
  1730. "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
  1731. __func__, __LINE__, ea->fcport->port_name,
  1732. ea->fcport->d_id.b24, lid);
  1733. qla2x00_clear_loop_id(ea->fcport);
  1734. set_bit(lid, vha->hw->loop_id_map);
  1735. ea->fcport->loop_id = lid;
  1736. ea->fcport->keep_nport_handle = 0;
  1737. qlt_schedule_sess_for_deletion(ea->fcport);
  1738. }
  1739. break;
  1740. }
  1741. return;
  1742. }
  1743. void
  1744. qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
  1745. uint16_t *data)
  1746. {
  1747. qlt_logo_completion_handler(fcport, data[0]);
  1748. fcport->login_gen++;
  1749. fcport->flags &= ~FCF_ASYNC_ACTIVE;
  1750. return;
  1751. }
  1752. void
  1753. qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
  1754. uint16_t *data)
  1755. {
  1756. fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  1757. if (data[0] == MBS_COMMAND_COMPLETE) {
  1758. qla2x00_update_fcport(vha, fcport);
  1759. return;
  1760. }
  1761. /* Retry login. */
  1762. if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
  1763. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1764. else
  1765. qla2x00_mark_device_lost(vha, fcport, 1, 0);
  1766. return;
  1767. }
  1768. /****************************************************************************/
  1769. /* QLogic ISP2x00 Hardware Support Functions. */
  1770. /****************************************************************************/
  1771. static int
  1772. qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
  1773. {
  1774. int rval = QLA_SUCCESS;
  1775. struct qla_hw_data *ha = vha->hw;
  1776. uint32_t idc_major_ver, idc_minor_ver;
  1777. uint16_t config[4];
  1778. qla83xx_idc_lock(vha, 0);
  1779. /* SV: TODO: Assign initialization timeout from
  1780. * flash-info / other param
  1781. */
  1782. ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
  1783. ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
  1784. /* Set our fcoe function presence */
  1785. if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
  1786. ql_dbg(ql_dbg_p3p, vha, 0xb077,
  1787. "Error while setting DRV-Presence.\n");
  1788. rval = QLA_FUNCTION_FAILED;
  1789. goto exit;
  1790. }
  1791. /* Decide the reset ownership */
  1792. qla83xx_reset_ownership(vha);
  1793. /*
  1794. * On first protocol driver load:
  1795. * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
  1796. * register.
  1797. * Others: Check compatibility with current IDC Major version.
  1798. */
  1799. qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
  1800. if (ha->flags.nic_core_reset_owner) {
  1801. /* Set IDC Major version */
  1802. idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
  1803. qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
  1804. /* Clearing IDC-Lock-Recovery register */
  1805. qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
  1806. } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
  1807. /*
  1808. * Clear further IDC participation if we are not compatible with
  1809. * the current IDC Major Version.
  1810. */
  1811. ql_log(ql_log_warn, vha, 0xb07d,
  1812. "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
  1813. idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
  1814. __qla83xx_clear_drv_presence(vha);
  1815. rval = QLA_FUNCTION_FAILED;
  1816. goto exit;
  1817. }
  1818. /* Each function sets its supported Minor version. */
  1819. qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
  1820. idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
  1821. qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
  1822. if (ha->flags.nic_core_reset_owner) {
  1823. memset(config, 0, sizeof(config));
  1824. if (!qla81xx_get_port_config(vha, config))
  1825. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
  1826. QLA8XXX_DEV_READY);
  1827. }
  1828. rval = qla83xx_idc_state_handler(vha);
  1829. exit:
  1830. qla83xx_idc_unlock(vha, 0);
  1831. return rval;
  1832. }
  1833. /*
  1834. * qla2x00_initialize_adapter
  1835. * Initialize board.
  1836. *
  1837. * Input:
  1838. * ha = adapter block pointer.
  1839. *
  1840. * Returns:
  1841. * 0 = success
  1842. */
  1843. int
  1844. qla2x00_initialize_adapter(scsi_qla_host_t *vha)
  1845. {
  1846. int rval;
  1847. struct qla_hw_data *ha = vha->hw;
  1848. struct req_que *req = ha->req_q_map[0];
  1849. memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
  1850. memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
  1851. /* Clear adapter flags. */
  1852. vha->flags.online = 0;
  1853. ha->flags.chip_reset_done = 0;
  1854. vha->flags.reset_active = 0;
  1855. ha->flags.pci_channel_io_perm_failure = 0;
  1856. ha->flags.eeh_busy = 0;
  1857. vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
  1858. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  1859. atomic_set(&vha->loop_state, LOOP_DOWN);
  1860. vha->device_flags = DFLG_NO_CABLE;
  1861. vha->dpc_flags = 0;
  1862. vha->flags.management_server_logged_in = 0;
  1863. vha->marker_needed = 0;
  1864. ha->isp_abort_cnt = 0;
  1865. ha->beacon_blink_led = 0;
  1866. set_bit(0, ha->req_qid_map);
  1867. set_bit(0, ha->rsp_qid_map);
  1868. ql_dbg(ql_dbg_init, vha, 0x0040,
  1869. "Configuring PCI space...\n");
  1870. rval = ha->isp_ops->pci_config(vha);
  1871. if (rval) {
  1872. ql_log(ql_log_warn, vha, 0x0044,
  1873. "Unable to configure PCI space.\n");
  1874. return (rval);
  1875. }
  1876. ha->isp_ops->reset_chip(vha);
  1877. rval = qla2xxx_get_flash_info(vha);
  1878. if (rval) {
  1879. ql_log(ql_log_fatal, vha, 0x004f,
  1880. "Unable to validate FLASH data.\n");
  1881. return rval;
  1882. }
  1883. if (IS_QLA8044(ha)) {
  1884. qla8044_read_reset_template(vha);
  1885. /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
  1886. * If DONRESET_BIT0 is set, drivers should not set dev_state
  1887. * to NEED_RESET. But if NEED_RESET is set, drivers should
  1888. * should honor the reset. */
  1889. if (ql2xdontresethba == 1)
  1890. qla8044_set_idc_dontreset(vha);
  1891. }
  1892. ha->isp_ops->get_flash_version(vha, req->ring);
  1893. ql_dbg(ql_dbg_init, vha, 0x0061,
  1894. "Configure NVRAM parameters...\n");
  1895. ha->isp_ops->nvram_config(vha);
  1896. if (ha->flags.disable_serdes) {
  1897. /* Mask HBA via NVRAM settings? */
  1898. ql_log(ql_log_info, vha, 0x0077,
  1899. "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
  1900. return QLA_FUNCTION_FAILED;
  1901. }
  1902. ql_dbg(ql_dbg_init, vha, 0x0078,
  1903. "Verifying loaded RISC code...\n");
  1904. if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
  1905. rval = ha->isp_ops->chip_diag(vha);
  1906. if (rval)
  1907. return (rval);
  1908. rval = qla2x00_setup_chip(vha);
  1909. if (rval)
  1910. return (rval);
  1911. }
  1912. if (IS_QLA84XX(ha)) {
  1913. ha->cs84xx = qla84xx_get_chip(vha);
  1914. if (!ha->cs84xx) {
  1915. ql_log(ql_log_warn, vha, 0x00d0,
  1916. "Unable to configure ISP84XX.\n");
  1917. return QLA_FUNCTION_FAILED;
  1918. }
  1919. }
  1920. if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
  1921. rval = qla2x00_init_rings(vha);
  1922. ha->flags.chip_reset_done = 1;
  1923. if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
  1924. /* Issue verify 84xx FW IOCB to complete 84xx initialization */
  1925. rval = qla84xx_init_chip(vha);
  1926. if (rval != QLA_SUCCESS) {
  1927. ql_log(ql_log_warn, vha, 0x00d4,
  1928. "Unable to initialize ISP84XX.\n");
  1929. qla84xx_put_chip(vha);
  1930. }
  1931. }
  1932. /* Load the NIC Core f/w if we are the first protocol driver. */
  1933. if (IS_QLA8031(ha)) {
  1934. rval = qla83xx_nic_core_fw_load(vha);
  1935. if (rval)
  1936. ql_log(ql_log_warn, vha, 0x0124,
  1937. "Error in initializing NIC Core f/w.\n");
  1938. }
  1939. if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
  1940. qla24xx_read_fcp_prio_cfg(vha);
  1941. if (IS_P3P_TYPE(ha))
  1942. qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
  1943. else
  1944. qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
  1945. return (rval);
  1946. }
  1947. /**
  1948. * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
  1949. * @vha: HA context
  1950. *
  1951. * Returns 0 on success.
  1952. */
  1953. int
  1954. qla2100_pci_config(scsi_qla_host_t *vha)
  1955. {
  1956. uint16_t w;
  1957. unsigned long flags;
  1958. struct qla_hw_data *ha = vha->hw;
  1959. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  1960. pci_set_master(ha->pdev);
  1961. pci_try_set_mwi(ha->pdev);
  1962. pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
  1963. w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
  1964. pci_write_config_word(ha->pdev, PCI_COMMAND, w);
  1965. pci_disable_rom(ha->pdev);
  1966. /* Get PCI bus information. */
  1967. spin_lock_irqsave(&ha->hardware_lock, flags);
  1968. ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
  1969. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1970. return QLA_SUCCESS;
  1971. }
  1972. /**
  1973. * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
  1974. * @vha: HA context
  1975. *
  1976. * Returns 0 on success.
  1977. */
  1978. int
  1979. qla2300_pci_config(scsi_qla_host_t *vha)
  1980. {
  1981. uint16_t w;
  1982. unsigned long flags = 0;
  1983. uint32_t cnt;
  1984. struct qla_hw_data *ha = vha->hw;
  1985. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  1986. pci_set_master(ha->pdev);
  1987. pci_try_set_mwi(ha->pdev);
  1988. pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
  1989. w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
  1990. if (IS_QLA2322(ha) || IS_QLA6322(ha))
  1991. w &= ~PCI_COMMAND_INTX_DISABLE;
  1992. pci_write_config_word(ha->pdev, PCI_COMMAND, w);
  1993. /*
  1994. * If this is a 2300 card and not 2312, reset the
  1995. * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
  1996. * the 2310 also reports itself as a 2300 so we need to get the
  1997. * fb revision level -- a 6 indicates it really is a 2300 and
  1998. * not a 2310.
  1999. */
  2000. if (IS_QLA2300(ha)) {
  2001. spin_lock_irqsave(&ha->hardware_lock, flags);
  2002. /* Pause RISC. */
  2003. WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
  2004. for (cnt = 0; cnt < 30000; cnt++) {
  2005. if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
  2006. break;
  2007. udelay(10);
  2008. }
  2009. /* Select FPM registers. */
  2010. WRT_REG_WORD(&reg->ctrl_status, 0x20);
  2011. RD_REG_WORD(&reg->ctrl_status);
  2012. /* Get the fb rev level */
  2013. ha->fb_rev = RD_FB_CMD_REG(ha, reg);
  2014. if (ha->fb_rev == FPM_2300)
  2015. pci_clear_mwi(ha->pdev);
  2016. /* Deselect FPM registers. */
  2017. WRT_REG_WORD(&reg->ctrl_status, 0x0);
  2018. RD_REG_WORD(&reg->ctrl_status);
  2019. /* Release RISC module. */
  2020. WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
  2021. for (cnt = 0; cnt < 30000; cnt++) {
  2022. if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
  2023. break;
  2024. udelay(10);
  2025. }
  2026. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2027. }
  2028. pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
  2029. pci_disable_rom(ha->pdev);
  2030. /* Get PCI bus information. */
  2031. spin_lock_irqsave(&ha->hardware_lock, flags);
  2032. ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
  2033. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2034. return QLA_SUCCESS;
  2035. }
  2036. /**
  2037. * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
  2038. * @vha: HA context
  2039. *
  2040. * Returns 0 on success.
  2041. */
  2042. int
  2043. qla24xx_pci_config(scsi_qla_host_t *vha)
  2044. {
  2045. uint16_t w;
  2046. unsigned long flags = 0;
  2047. struct qla_hw_data *ha = vha->hw;
  2048. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  2049. pci_set_master(ha->pdev);
  2050. pci_try_set_mwi(ha->pdev);
  2051. pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
  2052. w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
  2053. w &= ~PCI_COMMAND_INTX_DISABLE;
  2054. pci_write_config_word(ha->pdev, PCI_COMMAND, w);
  2055. pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
  2056. /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
  2057. if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
  2058. pcix_set_mmrbc(ha->pdev, 2048);
  2059. /* PCIe -- adjust Maximum Read Request Size (2048). */
  2060. if (pci_is_pcie(ha->pdev))
  2061. pcie_set_readrq(ha->pdev, 4096);
  2062. pci_disable_rom(ha->pdev);
  2063. ha->chip_revision = ha->pdev->revision;
  2064. /* Get PCI bus information. */
  2065. spin_lock_irqsave(&ha->hardware_lock, flags);
  2066. ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
  2067. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2068. return QLA_SUCCESS;
  2069. }
  2070. /**
  2071. * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
  2072. * @vha: HA context
  2073. *
  2074. * Returns 0 on success.
  2075. */
  2076. int
  2077. qla25xx_pci_config(scsi_qla_host_t *vha)
  2078. {
  2079. uint16_t w;
  2080. struct qla_hw_data *ha = vha->hw;
  2081. pci_set_master(ha->pdev);
  2082. pci_try_set_mwi(ha->pdev);
  2083. pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
  2084. w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
  2085. w &= ~PCI_COMMAND_INTX_DISABLE;
  2086. pci_write_config_word(ha->pdev, PCI_COMMAND, w);
  2087. /* PCIe -- adjust Maximum Read Request Size (2048). */
  2088. if (pci_is_pcie(ha->pdev))
  2089. pcie_set_readrq(ha->pdev, 4096);
  2090. pci_disable_rom(ha->pdev);
  2091. ha->chip_revision = ha->pdev->revision;
  2092. return QLA_SUCCESS;
  2093. }
  2094. /**
  2095. * qla2x00_isp_firmware() - Choose firmware image.
  2096. * @vha: HA context
  2097. *
  2098. * Returns 0 on success.
  2099. */
  2100. static int
  2101. qla2x00_isp_firmware(scsi_qla_host_t *vha)
  2102. {
  2103. int rval;
  2104. uint16_t loop_id, topo, sw_cap;
  2105. uint8_t domain, area, al_pa;
  2106. struct qla_hw_data *ha = vha->hw;
  2107. /* Assume loading risc code */
  2108. rval = QLA_FUNCTION_FAILED;
  2109. if (ha->flags.disable_risc_code_load) {
  2110. ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
  2111. /* Verify checksum of loaded RISC code. */
  2112. rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
  2113. if (rval == QLA_SUCCESS) {
  2114. /* And, verify we are not in ROM code. */
  2115. rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
  2116. &area, &domain, &topo, &sw_cap);
  2117. }
  2118. }
  2119. if (rval)
  2120. ql_dbg(ql_dbg_init, vha, 0x007a,
  2121. "**** Load RISC code ****.\n");
  2122. return (rval);
  2123. }
  2124. /**
  2125. * qla2x00_reset_chip() - Reset ISP chip.
  2126. * @vha: HA context
  2127. *
  2128. * Returns 0 on success.
  2129. */
  2130. void
  2131. qla2x00_reset_chip(scsi_qla_host_t *vha)
  2132. {
  2133. unsigned long flags = 0;
  2134. struct qla_hw_data *ha = vha->hw;
  2135. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  2136. uint32_t cnt;
  2137. uint16_t cmd;
  2138. if (unlikely(pci_channel_offline(ha->pdev)))
  2139. return;
  2140. ha->isp_ops->disable_intrs(ha);
  2141. spin_lock_irqsave(&ha->hardware_lock, flags);
  2142. /* Turn off master enable */
  2143. cmd = 0;
  2144. pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
  2145. cmd &= ~PCI_COMMAND_MASTER;
  2146. pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
  2147. if (!IS_QLA2100(ha)) {
  2148. /* Pause RISC. */
  2149. WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
  2150. if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
  2151. for (cnt = 0; cnt < 30000; cnt++) {
  2152. if ((RD_REG_WORD(&reg->hccr) &
  2153. HCCR_RISC_PAUSE) != 0)
  2154. break;
  2155. udelay(100);
  2156. }
  2157. } else {
  2158. RD_REG_WORD(&reg->hccr); /* PCI Posting. */
  2159. udelay(10);
  2160. }
  2161. /* Select FPM registers. */
  2162. WRT_REG_WORD(&reg->ctrl_status, 0x20);
  2163. RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
  2164. /* FPM Soft Reset. */
  2165. WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
  2166. RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
  2167. /* Toggle Fpm Reset. */
  2168. if (!IS_QLA2200(ha)) {
  2169. WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
  2170. RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
  2171. }
  2172. /* Select frame buffer registers. */
  2173. WRT_REG_WORD(&reg->ctrl_status, 0x10);
  2174. RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
  2175. /* Reset frame buffer FIFOs. */
  2176. if (IS_QLA2200(ha)) {
  2177. WRT_FB_CMD_REG(ha, reg, 0xa000);
  2178. RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
  2179. } else {
  2180. WRT_FB_CMD_REG(ha, reg, 0x00fc);
  2181. /* Read back fb_cmd until zero or 3 seconds max */
  2182. for (cnt = 0; cnt < 3000; cnt++) {
  2183. if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
  2184. break;
  2185. udelay(100);
  2186. }
  2187. }
  2188. /* Select RISC module registers. */
  2189. WRT_REG_WORD(&reg->ctrl_status, 0);
  2190. RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
  2191. /* Reset RISC processor. */
  2192. WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
  2193. RD_REG_WORD(&reg->hccr); /* PCI Posting. */
  2194. /* Release RISC processor. */
  2195. WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
  2196. RD_REG_WORD(&reg->hccr); /* PCI Posting. */
  2197. }
  2198. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  2199. WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
  2200. /* Reset ISP chip. */
  2201. WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
  2202. /* Wait for RISC to recover from reset. */
  2203. if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
  2204. /*
  2205. * It is necessary to for a delay here since the card doesn't
  2206. * respond to PCI reads during a reset. On some architectures
  2207. * this will result in an MCA.
  2208. */
  2209. udelay(20);
  2210. for (cnt = 30000; cnt; cnt--) {
  2211. if ((RD_REG_WORD(&reg->ctrl_status) &
  2212. CSR_ISP_SOFT_RESET) == 0)
  2213. break;
  2214. udelay(100);
  2215. }
  2216. } else
  2217. udelay(10);
  2218. /* Reset RISC processor. */
  2219. WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
  2220. WRT_REG_WORD(&reg->semaphore, 0);
  2221. /* Release RISC processor. */
  2222. WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
  2223. RD_REG_WORD(&reg->hccr); /* PCI Posting. */
  2224. if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
  2225. for (cnt = 0; cnt < 30000; cnt++) {
  2226. if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
  2227. break;
  2228. udelay(100);
  2229. }
  2230. } else
  2231. udelay(100);
  2232. /* Turn on master enable */
  2233. cmd |= PCI_COMMAND_MASTER;
  2234. pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
  2235. /* Disable RISC pause on FPM parity error. */
  2236. if (!IS_QLA2100(ha)) {
  2237. WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
  2238. RD_REG_WORD(&reg->hccr); /* PCI Posting. */
  2239. }
  2240. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2241. }
  2242. /**
  2243. * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
  2244. * @vha: HA context
  2245. *
  2246. * Returns 0 on success.
  2247. */
  2248. static int
  2249. qla81xx_reset_mpi(scsi_qla_host_t *vha)
  2250. {
  2251. uint16_t mb[4] = {0x1010, 0, 1, 0};
  2252. if (!IS_QLA81XX(vha->hw))
  2253. return QLA_SUCCESS;
  2254. return qla81xx_write_mpi_register(vha, mb);
  2255. }
  2256. /**
  2257. * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
  2258. * @vha: HA context
  2259. *
  2260. * Returns 0 on success.
  2261. */
  2262. static inline int
  2263. qla24xx_reset_risc(scsi_qla_host_t *vha)
  2264. {
  2265. unsigned long flags = 0;
  2266. struct qla_hw_data *ha = vha->hw;
  2267. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  2268. uint32_t cnt;
  2269. uint16_t wd;
  2270. static int abts_cnt; /* ISP abort retry counts */
  2271. int rval = QLA_SUCCESS;
  2272. spin_lock_irqsave(&ha->hardware_lock, flags);
  2273. /* Reset RISC. */
  2274. WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
  2275. for (cnt = 0; cnt < 30000; cnt++) {
  2276. if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
  2277. break;
  2278. udelay(10);
  2279. }
  2280. if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
  2281. set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
  2282. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
  2283. "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
  2284. RD_REG_DWORD(&reg->hccr),
  2285. RD_REG_DWORD(&reg->ctrl_status),
  2286. (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
  2287. WRT_REG_DWORD(&reg->ctrl_status,
  2288. CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
  2289. pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
  2290. udelay(100);
  2291. /* Wait for firmware to complete NVRAM accesses. */
  2292. RD_REG_WORD(&reg->mailbox0);
  2293. for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
  2294. rval == QLA_SUCCESS; cnt--) {
  2295. barrier();
  2296. if (cnt)
  2297. udelay(5);
  2298. else
  2299. rval = QLA_FUNCTION_TIMEOUT;
  2300. }
  2301. if (rval == QLA_SUCCESS)
  2302. set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
  2303. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
  2304. "HCCR: 0x%x, MailBox0 Status 0x%x\n",
  2305. RD_REG_DWORD(&reg->hccr),
  2306. RD_REG_DWORD(&reg->mailbox0));
  2307. /* Wait for soft-reset to complete. */
  2308. RD_REG_DWORD(&reg->ctrl_status);
  2309. for (cnt = 0; cnt < 60; cnt++) {
  2310. barrier();
  2311. if ((RD_REG_DWORD(&reg->ctrl_status) &
  2312. CSRX_ISP_SOFT_RESET) == 0)
  2313. break;
  2314. udelay(5);
  2315. }
  2316. if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
  2317. set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
  2318. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
  2319. "HCCR: 0x%x, Soft Reset status: 0x%x\n",
  2320. RD_REG_DWORD(&reg->hccr),
  2321. RD_REG_DWORD(&reg->ctrl_status));
  2322. /* If required, do an MPI FW reset now */
  2323. if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
  2324. if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
  2325. if (++abts_cnt < 5) {
  2326. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2327. set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
  2328. } else {
  2329. /*
  2330. * We exhausted the ISP abort retries. We have to
  2331. * set the board offline.
  2332. */
  2333. abts_cnt = 0;
  2334. vha->flags.online = 0;
  2335. }
  2336. }
  2337. }
  2338. WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
  2339. RD_REG_DWORD(&reg->hccr);
  2340. WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
  2341. RD_REG_DWORD(&reg->hccr);
  2342. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
  2343. RD_REG_DWORD(&reg->hccr);
  2344. RD_REG_WORD(&reg->mailbox0);
  2345. for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
  2346. rval == QLA_SUCCESS; cnt--) {
  2347. barrier();
  2348. if (cnt)
  2349. udelay(5);
  2350. else
  2351. rval = QLA_FUNCTION_TIMEOUT;
  2352. }
  2353. if (rval == QLA_SUCCESS)
  2354. set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
  2355. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
  2356. "Host Risc 0x%x, mailbox0 0x%x\n",
  2357. RD_REG_DWORD(&reg->hccr),
  2358. RD_REG_WORD(&reg->mailbox0));
  2359. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2360. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
  2361. "Driver in %s mode\n",
  2362. IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
  2363. if (IS_NOPOLLING_TYPE(ha))
  2364. ha->isp_ops->enable_intrs(ha);
  2365. return rval;
  2366. }
  2367. static void
  2368. qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
  2369. {
  2370. struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
  2371. WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
  2372. *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
  2373. }
  2374. static void
  2375. qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
  2376. {
  2377. struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
  2378. WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
  2379. WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
  2380. }
  2381. static void
  2382. qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
  2383. {
  2384. uint32_t wd32 = 0;
  2385. uint delta_msec = 100;
  2386. uint elapsed_msec = 0;
  2387. uint timeout_msec;
  2388. ulong n;
  2389. if (vha->hw->pdev->subsystem_device != 0x0175 &&
  2390. vha->hw->pdev->subsystem_device != 0x0240)
  2391. return;
  2392. WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
  2393. udelay(100);
  2394. attempt:
  2395. timeout_msec = TIMEOUT_SEMAPHORE;
  2396. n = timeout_msec / delta_msec;
  2397. while (n--) {
  2398. qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
  2399. qla25xx_read_risc_sema_reg(vha, &wd32);
  2400. if (wd32 & RISC_SEMAPHORE)
  2401. break;
  2402. msleep(delta_msec);
  2403. elapsed_msec += delta_msec;
  2404. if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
  2405. goto force;
  2406. }
  2407. if (!(wd32 & RISC_SEMAPHORE))
  2408. goto force;
  2409. if (!(wd32 & RISC_SEMAPHORE_FORCE))
  2410. goto acquired;
  2411. qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
  2412. timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
  2413. n = timeout_msec / delta_msec;
  2414. while (n--) {
  2415. qla25xx_read_risc_sema_reg(vha, &wd32);
  2416. if (!(wd32 & RISC_SEMAPHORE_FORCE))
  2417. break;
  2418. msleep(delta_msec);
  2419. elapsed_msec += delta_msec;
  2420. if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
  2421. goto force;
  2422. }
  2423. if (wd32 & RISC_SEMAPHORE_FORCE)
  2424. qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
  2425. goto attempt;
  2426. force:
  2427. qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
  2428. acquired:
  2429. return;
  2430. }
  2431. /**
  2432. * qla24xx_reset_chip() - Reset ISP24xx chip.
  2433. * @vha: HA context
  2434. *
  2435. * Returns 0 on success.
  2436. */
  2437. void
  2438. qla24xx_reset_chip(scsi_qla_host_t *vha)
  2439. {
  2440. struct qla_hw_data *ha = vha->hw;
  2441. if (pci_channel_offline(ha->pdev) &&
  2442. ha->flags.pci_channel_io_perm_failure) {
  2443. return;
  2444. }
  2445. ha->isp_ops->disable_intrs(ha);
  2446. qla25xx_manipulate_risc_semaphore(vha);
  2447. /* Perform RISC reset. */
  2448. qla24xx_reset_risc(vha);
  2449. }
  2450. /**
  2451. * qla2x00_chip_diag() - Test chip for proper operation.
  2452. * @vha: HA context
  2453. *
  2454. * Returns 0 on success.
  2455. */
  2456. int
  2457. qla2x00_chip_diag(scsi_qla_host_t *vha)
  2458. {
  2459. int rval;
  2460. struct qla_hw_data *ha = vha->hw;
  2461. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  2462. unsigned long flags = 0;
  2463. uint16_t data;
  2464. uint32_t cnt;
  2465. uint16_t mb[5];
  2466. struct req_que *req = ha->req_q_map[0];
  2467. /* Assume a failed state */
  2468. rval = QLA_FUNCTION_FAILED;
  2469. ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
  2470. &reg->flash_address);
  2471. spin_lock_irqsave(&ha->hardware_lock, flags);
  2472. /* Reset ISP chip. */
  2473. WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
  2474. /*
  2475. * We need to have a delay here since the card will not respond while
  2476. * in reset causing an MCA on some architectures.
  2477. */
  2478. udelay(20);
  2479. data = qla2x00_debounce_register(&reg->ctrl_status);
  2480. for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
  2481. udelay(5);
  2482. data = RD_REG_WORD(&reg->ctrl_status);
  2483. barrier();
  2484. }
  2485. if (!cnt)
  2486. goto chip_diag_failed;
  2487. ql_dbg(ql_dbg_init, vha, 0x007c,
  2488. "Reset register cleared by chip reset.\n");
  2489. /* Reset RISC processor. */
  2490. WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
  2491. WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
  2492. /* Workaround for QLA2312 PCI parity error */
  2493. if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
  2494. data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
  2495. for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
  2496. udelay(5);
  2497. data = RD_MAILBOX_REG(ha, reg, 0);
  2498. barrier();
  2499. }
  2500. } else
  2501. udelay(10);
  2502. if (!cnt)
  2503. goto chip_diag_failed;
  2504. /* Check product ID of chip */
  2505. ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
  2506. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  2507. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  2508. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  2509. mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
  2510. if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
  2511. mb[3] != PROD_ID_3) {
  2512. ql_log(ql_log_warn, vha, 0x0062,
  2513. "Wrong product ID = 0x%x,0x%x,0x%x.\n",
  2514. mb[1], mb[2], mb[3]);
  2515. goto chip_diag_failed;
  2516. }
  2517. ha->product_id[0] = mb[1];
  2518. ha->product_id[1] = mb[2];
  2519. ha->product_id[2] = mb[3];
  2520. ha->product_id[3] = mb[4];
  2521. /* Adjust fw RISC transfer size */
  2522. if (req->length > 1024)
  2523. ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
  2524. else
  2525. ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
  2526. req->length;
  2527. if (IS_QLA2200(ha) &&
  2528. RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
  2529. /* Limit firmware transfer size with a 2200A */
  2530. ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
  2531. ha->device_type |= DT_ISP2200A;
  2532. ha->fw_transfer_size = 128;
  2533. }
  2534. /* Wrap Incoming Mailboxes Test. */
  2535. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2536. ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
  2537. rval = qla2x00_mbx_reg_test(vha);
  2538. if (rval)
  2539. ql_log(ql_log_warn, vha, 0x0080,
  2540. "Failed mailbox send register test.\n");
  2541. else
  2542. /* Flag a successful rval */
  2543. rval = QLA_SUCCESS;
  2544. spin_lock_irqsave(&ha->hardware_lock, flags);
  2545. chip_diag_failed:
  2546. if (rval)
  2547. ql_log(ql_log_info, vha, 0x0081,
  2548. "Chip diagnostics **** FAILED ****.\n");
  2549. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2550. return (rval);
  2551. }
  2552. /**
  2553. * qla24xx_chip_diag() - Test ISP24xx for proper operation.
  2554. * @vha: HA context
  2555. *
  2556. * Returns 0 on success.
  2557. */
  2558. int
  2559. qla24xx_chip_diag(scsi_qla_host_t *vha)
  2560. {
  2561. int rval;
  2562. struct qla_hw_data *ha = vha->hw;
  2563. struct req_que *req = ha->req_q_map[0];
  2564. if (IS_P3P_TYPE(ha))
  2565. return QLA_SUCCESS;
  2566. ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
  2567. rval = qla2x00_mbx_reg_test(vha);
  2568. if (rval) {
  2569. ql_log(ql_log_warn, vha, 0x0082,
  2570. "Failed mailbox send register test.\n");
  2571. } else {
  2572. /* Flag a successful rval */
  2573. rval = QLA_SUCCESS;
  2574. }
  2575. return rval;
  2576. }
  2577. static void
  2578. qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
  2579. {
  2580. int rval;
  2581. dma_addr_t tc_dma;
  2582. void *tc;
  2583. struct qla_hw_data *ha = vha->hw;
  2584. if (ha->eft) {
  2585. ql_dbg(ql_dbg_init, vha, 0x00bd,
  2586. "%s: Offload Mem is already allocated.\n",
  2587. __func__);
  2588. return;
  2589. }
  2590. if (IS_FWI2_CAPABLE(ha)) {
  2591. /* Allocate memory for Fibre Channel Event Buffer. */
  2592. if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
  2593. !IS_QLA27XX(ha))
  2594. goto try_eft;
  2595. if (ha->fce)
  2596. dma_free_coherent(&ha->pdev->dev,
  2597. FCE_SIZE, ha->fce, ha->fce_dma);
  2598. /* Allocate memory for Fibre Channel Event Buffer. */
  2599. tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
  2600. GFP_KERNEL);
  2601. if (!tc) {
  2602. ql_log(ql_log_warn, vha, 0x00be,
  2603. "Unable to allocate (%d KB) for FCE.\n",
  2604. FCE_SIZE / 1024);
  2605. goto try_eft;
  2606. }
  2607. rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
  2608. ha->fce_mb, &ha->fce_bufs);
  2609. if (rval) {
  2610. ql_log(ql_log_warn, vha, 0x00bf,
  2611. "Unable to initialize FCE (%d).\n", rval);
  2612. dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
  2613. tc_dma);
  2614. ha->flags.fce_enabled = 0;
  2615. goto try_eft;
  2616. }
  2617. ql_dbg(ql_dbg_init, vha, 0x00c0,
  2618. "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
  2619. ha->flags.fce_enabled = 1;
  2620. ha->fce_dma = tc_dma;
  2621. ha->fce = tc;
  2622. try_eft:
  2623. if (ha->eft)
  2624. dma_free_coherent(&ha->pdev->dev,
  2625. EFT_SIZE, ha->eft, ha->eft_dma);
  2626. /* Allocate memory for Extended Trace Buffer. */
  2627. tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
  2628. GFP_KERNEL);
  2629. if (!tc) {
  2630. ql_log(ql_log_warn, vha, 0x00c1,
  2631. "Unable to allocate (%d KB) for EFT.\n",
  2632. EFT_SIZE / 1024);
  2633. goto eft_err;
  2634. }
  2635. rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
  2636. if (rval) {
  2637. ql_log(ql_log_warn, vha, 0x00c2,
  2638. "Unable to initialize EFT (%d).\n", rval);
  2639. dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
  2640. tc_dma);
  2641. goto eft_err;
  2642. }
  2643. ql_dbg(ql_dbg_init, vha, 0x00c3,
  2644. "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
  2645. ha->eft_dma = tc_dma;
  2646. ha->eft = tc;
  2647. }
  2648. eft_err:
  2649. return;
  2650. }
  2651. void
  2652. qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
  2653. {
  2654. uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
  2655. eft_size, fce_size, mq_size;
  2656. struct qla_hw_data *ha = vha->hw;
  2657. struct req_que *req = ha->req_q_map[0];
  2658. struct rsp_que *rsp = ha->rsp_q_map[0];
  2659. struct qla2xxx_fw_dump *fw_dump;
  2660. dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
  2661. req_q_size = rsp_q_size = 0;
  2662. if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
  2663. fixed_size = sizeof(struct qla2100_fw_dump);
  2664. } else if (IS_QLA23XX(ha)) {
  2665. fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
  2666. mem_size = (ha->fw_memory_size - 0x11000 + 1) *
  2667. sizeof(uint16_t);
  2668. } else if (IS_FWI2_CAPABLE(ha)) {
  2669. if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
  2670. fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
  2671. else if (IS_QLA81XX(ha))
  2672. fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
  2673. else if (IS_QLA25XX(ha))
  2674. fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
  2675. else
  2676. fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
  2677. mem_size = (ha->fw_memory_size - 0x100000 + 1) *
  2678. sizeof(uint32_t);
  2679. if (ha->mqenable) {
  2680. if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
  2681. mq_size = sizeof(struct qla2xxx_mq_chain);
  2682. /*
  2683. * Allocate maximum buffer size for all queues.
  2684. * Resizing must be done at end-of-dump processing.
  2685. */
  2686. mq_size += ha->max_req_queues *
  2687. (req->length * sizeof(request_t));
  2688. mq_size += ha->max_rsp_queues *
  2689. (rsp->length * sizeof(response_t));
  2690. }
  2691. if (ha->tgt.atio_ring)
  2692. mq_size += ha->tgt.atio_q_length * sizeof(request_t);
  2693. /* Allocate memory for Fibre Channel Event Buffer. */
  2694. if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
  2695. !IS_QLA27XX(ha))
  2696. goto try_eft;
  2697. fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
  2698. try_eft:
  2699. ql_dbg(ql_dbg_init, vha, 0x00c3,
  2700. "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
  2701. eft_size = EFT_SIZE;
  2702. }
  2703. if (IS_QLA27XX(ha)) {
  2704. if (!ha->fw_dump_template) {
  2705. ql_log(ql_log_warn, vha, 0x00ba,
  2706. "Failed missing fwdump template\n");
  2707. return;
  2708. }
  2709. dump_size = qla27xx_fwdt_calculate_dump_size(vha);
  2710. ql_dbg(ql_dbg_init, vha, 0x00fa,
  2711. "-> allocating fwdump (%x bytes)...\n", dump_size);
  2712. goto allocate;
  2713. }
  2714. req_q_size = req->length * sizeof(request_t);
  2715. rsp_q_size = rsp->length * sizeof(response_t);
  2716. dump_size = offsetof(struct qla2xxx_fw_dump, isp);
  2717. dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
  2718. ha->chain_offset = dump_size;
  2719. dump_size += mq_size + fce_size;
  2720. if (ha->exchoffld_buf)
  2721. dump_size += sizeof(struct qla2xxx_offld_chain) +
  2722. ha->exchoffld_size;
  2723. if (ha->exlogin_buf)
  2724. dump_size += sizeof(struct qla2xxx_offld_chain) +
  2725. ha->exlogin_size;
  2726. allocate:
  2727. if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) {
  2728. fw_dump = vmalloc(dump_size);
  2729. if (!fw_dump) {
  2730. ql_log(ql_log_warn, vha, 0x00c4,
  2731. "Unable to allocate (%d KB) for firmware dump.\n",
  2732. dump_size / 1024);
  2733. } else {
  2734. if (ha->fw_dump)
  2735. vfree(ha->fw_dump);
  2736. ha->fw_dump = fw_dump;
  2737. ha->fw_dump_len = dump_size;
  2738. ql_dbg(ql_dbg_init, vha, 0x00c5,
  2739. "Allocated (%d KB) for firmware dump.\n",
  2740. dump_size / 1024);
  2741. if (IS_QLA27XX(ha))
  2742. return;
  2743. ha->fw_dump->signature[0] = 'Q';
  2744. ha->fw_dump->signature[1] = 'L';
  2745. ha->fw_dump->signature[2] = 'G';
  2746. ha->fw_dump->signature[3] = 'C';
  2747. ha->fw_dump->version = htonl(1);
  2748. ha->fw_dump->fixed_size = htonl(fixed_size);
  2749. ha->fw_dump->mem_size = htonl(mem_size);
  2750. ha->fw_dump->req_q_size = htonl(req_q_size);
  2751. ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
  2752. ha->fw_dump->eft_size = htonl(eft_size);
  2753. ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
  2754. ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
  2755. ha->fw_dump->header_size =
  2756. htonl(offsetof(struct qla2xxx_fw_dump, isp));
  2757. }
  2758. }
  2759. }
  2760. static int
  2761. qla81xx_mpi_sync(scsi_qla_host_t *vha)
  2762. {
  2763. #define MPS_MASK 0xe0
  2764. int rval;
  2765. uint16_t dc;
  2766. uint32_t dw;
  2767. if (!IS_QLA81XX(vha->hw))
  2768. return QLA_SUCCESS;
  2769. rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
  2770. if (rval != QLA_SUCCESS) {
  2771. ql_log(ql_log_warn, vha, 0x0105,
  2772. "Unable to acquire semaphore.\n");
  2773. goto done;
  2774. }
  2775. pci_read_config_word(vha->hw->pdev, 0x54, &dc);
  2776. rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
  2777. if (rval != QLA_SUCCESS) {
  2778. ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
  2779. goto done_release;
  2780. }
  2781. dc &= MPS_MASK;
  2782. if (dc == (dw & MPS_MASK))
  2783. goto done_release;
  2784. dw &= ~MPS_MASK;
  2785. dw |= dc;
  2786. rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
  2787. if (rval != QLA_SUCCESS) {
  2788. ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
  2789. }
  2790. done_release:
  2791. rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
  2792. if (rval != QLA_SUCCESS) {
  2793. ql_log(ql_log_warn, vha, 0x006d,
  2794. "Unable to release semaphore.\n");
  2795. }
  2796. done:
  2797. return rval;
  2798. }
  2799. int
  2800. qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
  2801. {
  2802. /* Don't try to reallocate the array */
  2803. if (req->outstanding_cmds)
  2804. return QLA_SUCCESS;
  2805. if (!IS_FWI2_CAPABLE(ha))
  2806. req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
  2807. else {
  2808. if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
  2809. req->num_outstanding_cmds = ha->cur_fw_xcb_count;
  2810. else
  2811. req->num_outstanding_cmds = ha->cur_fw_iocb_count;
  2812. }
  2813. req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
  2814. sizeof(srb_t *),
  2815. GFP_KERNEL);
  2816. if (!req->outstanding_cmds) {
  2817. /*
  2818. * Try to allocate a minimal size just so we can get through
  2819. * initialization.
  2820. */
  2821. req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
  2822. req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
  2823. sizeof(srb_t *),
  2824. GFP_KERNEL);
  2825. if (!req->outstanding_cmds) {
  2826. ql_log(ql_log_fatal, NULL, 0x0126,
  2827. "Failed to allocate memory for "
  2828. "outstanding_cmds for req_que %p.\n", req);
  2829. req->num_outstanding_cmds = 0;
  2830. return QLA_FUNCTION_FAILED;
  2831. }
  2832. }
  2833. return QLA_SUCCESS;
  2834. }
  2835. #define PRINT_FIELD(_field, _flag, _str) { \
  2836. if (a0->_field & _flag) {\
  2837. if (p) {\
  2838. strcat(ptr, "|");\
  2839. ptr++;\
  2840. leftover--;\
  2841. } \
  2842. len = snprintf(ptr, leftover, "%s", _str); \
  2843. p = 1;\
  2844. leftover -= len;\
  2845. ptr += len; \
  2846. } \
  2847. }
  2848. static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
  2849. {
  2850. #define STR_LEN 64
  2851. struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
  2852. u8 str[STR_LEN], *ptr, p;
  2853. int leftover, len;
  2854. memset(str, 0, STR_LEN);
  2855. snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
  2856. ql_dbg(ql_dbg_init, vha, 0x015a,
  2857. "SFP MFG Name: %s\n", str);
  2858. memset(str, 0, STR_LEN);
  2859. snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
  2860. ql_dbg(ql_dbg_init, vha, 0x015c,
  2861. "SFP Part Name: %s\n", str);
  2862. /* media */
  2863. memset(str, 0, STR_LEN);
  2864. ptr = str;
  2865. leftover = STR_LEN;
  2866. p = len = 0;
  2867. PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
  2868. PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
  2869. PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
  2870. PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
  2871. PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
  2872. PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
  2873. PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
  2874. ql_dbg(ql_dbg_init, vha, 0x0160,
  2875. "SFP Media: %s\n", str);
  2876. /* link length */
  2877. memset(str, 0, STR_LEN);
  2878. ptr = str;
  2879. leftover = STR_LEN;
  2880. p = len = 0;
  2881. PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
  2882. PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
  2883. PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
  2884. PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
  2885. PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
  2886. ql_dbg(ql_dbg_init, vha, 0x0196,
  2887. "SFP Link Length: %s\n", str);
  2888. memset(str, 0, STR_LEN);
  2889. ptr = str;
  2890. leftover = STR_LEN;
  2891. p = len = 0;
  2892. PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
  2893. PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
  2894. PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
  2895. PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
  2896. PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
  2897. ql_dbg(ql_dbg_init, vha, 0x016e,
  2898. "SFP FC Link Tech: %s\n", str);
  2899. if (a0->length_km)
  2900. ql_dbg(ql_dbg_init, vha, 0x016f,
  2901. "SFP Distant: %d km\n", a0->length_km);
  2902. if (a0->length_100m)
  2903. ql_dbg(ql_dbg_init, vha, 0x0170,
  2904. "SFP Distant: %d m\n", a0->length_100m*100);
  2905. if (a0->length_50um_10m)
  2906. ql_dbg(ql_dbg_init, vha, 0x0189,
  2907. "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
  2908. if (a0->length_62um_10m)
  2909. ql_dbg(ql_dbg_init, vha, 0x018a,
  2910. "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
  2911. if (a0->length_om4_10m)
  2912. ql_dbg(ql_dbg_init, vha, 0x0194,
  2913. "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
  2914. if (a0->length_om3_10m)
  2915. ql_dbg(ql_dbg_init, vha, 0x0195,
  2916. "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
  2917. }
  2918. /*
  2919. * Return Code:
  2920. * QLA_SUCCESS: no action
  2921. * QLA_INTERFACE_ERROR: SFP is not there.
  2922. * QLA_FUNCTION_FAILED: detected New SFP
  2923. */
  2924. int
  2925. qla24xx_detect_sfp(scsi_qla_host_t *vha)
  2926. {
  2927. int rc = QLA_SUCCESS;
  2928. struct sff_8247_a0 *a;
  2929. struct qla_hw_data *ha = vha->hw;
  2930. if (!AUTO_DETECT_SFP_SUPPORT(vha))
  2931. goto out;
  2932. rc = qla2x00_read_sfp_dev(vha, NULL, 0);
  2933. if (rc)
  2934. goto out;
  2935. a = (struct sff_8247_a0 *)vha->hw->sfp_data;
  2936. qla2xxx_print_sfp_info(vha);
  2937. if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
  2938. /* long range */
  2939. ha->flags.detected_lr_sfp = 1;
  2940. if (a->length_km > 5 || a->length_100m > 50)
  2941. ha->long_range_distance = LR_DISTANCE_10K;
  2942. else
  2943. ha->long_range_distance = LR_DISTANCE_5K;
  2944. if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
  2945. ql_dbg(ql_dbg_async, vha, 0x507b,
  2946. "Detected Long Range SFP.\n");
  2947. } else {
  2948. /* short range */
  2949. ha->flags.detected_lr_sfp = 0;
  2950. if (ha->flags.using_lr_setting)
  2951. ql_dbg(ql_dbg_async, vha, 0x5084,
  2952. "Detected Short Range SFP.\n");
  2953. }
  2954. if (!vha->flags.init_done)
  2955. rc = QLA_SUCCESS;
  2956. out:
  2957. return rc;
  2958. }
  2959. /**
  2960. * qla2x00_setup_chip() - Load and start RISC firmware.
  2961. * @vha: HA context
  2962. *
  2963. * Returns 0 on success.
  2964. */
  2965. static int
  2966. qla2x00_setup_chip(scsi_qla_host_t *vha)
  2967. {
  2968. int rval;
  2969. uint32_t srisc_address = 0;
  2970. struct qla_hw_data *ha = vha->hw;
  2971. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  2972. unsigned long flags;
  2973. uint16_t fw_major_version;
  2974. if (IS_P3P_TYPE(ha)) {
  2975. rval = ha->isp_ops->load_risc(vha, &srisc_address);
  2976. if (rval == QLA_SUCCESS) {
  2977. qla2x00_stop_firmware(vha);
  2978. goto enable_82xx_npiv;
  2979. } else
  2980. goto failed;
  2981. }
  2982. if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
  2983. /* Disable SRAM, Instruction RAM and GP RAM parity. */
  2984. spin_lock_irqsave(&ha->hardware_lock, flags);
  2985. WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
  2986. RD_REG_WORD(&reg->hccr);
  2987. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2988. }
  2989. qla81xx_mpi_sync(vha);
  2990. /* Load firmware sequences */
  2991. rval = ha->isp_ops->load_risc(vha, &srisc_address);
  2992. if (rval == QLA_SUCCESS) {
  2993. ql_dbg(ql_dbg_init, vha, 0x00c9,
  2994. "Verifying Checksum of loaded RISC code.\n");
  2995. rval = qla2x00_verify_checksum(vha, srisc_address);
  2996. if (rval == QLA_SUCCESS) {
  2997. /* Start firmware execution. */
  2998. ql_dbg(ql_dbg_init, vha, 0x00ca,
  2999. "Starting firmware.\n");
  3000. if (ql2xexlogins)
  3001. ha->flags.exlogins_enabled = 1;
  3002. if (qla_is_exch_offld_enabled(vha))
  3003. ha->flags.exchoffld_enabled = 1;
  3004. rval = qla2x00_execute_fw(vha, srisc_address);
  3005. /* Retrieve firmware information. */
  3006. if (rval == QLA_SUCCESS) {
  3007. qla24xx_detect_sfp(vha);
  3008. rval = qla2x00_set_exlogins_buffer(vha);
  3009. if (rval != QLA_SUCCESS)
  3010. goto failed;
  3011. rval = qla2x00_set_exchoffld_buffer(vha);
  3012. if (rval != QLA_SUCCESS)
  3013. goto failed;
  3014. enable_82xx_npiv:
  3015. fw_major_version = ha->fw_major_version;
  3016. if (IS_P3P_TYPE(ha))
  3017. qla82xx_check_md_needed(vha);
  3018. else
  3019. rval = qla2x00_get_fw_version(vha);
  3020. if (rval != QLA_SUCCESS)
  3021. goto failed;
  3022. ha->flags.npiv_supported = 0;
  3023. if (IS_QLA2XXX_MIDTYPE(ha) &&
  3024. (ha->fw_attributes & BIT_2)) {
  3025. ha->flags.npiv_supported = 1;
  3026. if ((!ha->max_npiv_vports) ||
  3027. ((ha->max_npiv_vports + 1) %
  3028. MIN_MULTI_ID_FABRIC))
  3029. ha->max_npiv_vports =
  3030. MIN_MULTI_ID_FABRIC - 1;
  3031. }
  3032. qla2x00_get_resource_cnts(vha);
  3033. /*
  3034. * Allocate the array of outstanding commands
  3035. * now that we know the firmware resources.
  3036. */
  3037. rval = qla2x00_alloc_outstanding_cmds(ha,
  3038. vha->req);
  3039. if (rval != QLA_SUCCESS)
  3040. goto failed;
  3041. if (!fw_major_version && !(IS_P3P_TYPE(ha)))
  3042. qla2x00_alloc_offload_mem(vha);
  3043. if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
  3044. qla2x00_alloc_fw_dump(vha);
  3045. } else {
  3046. goto failed;
  3047. }
  3048. } else {
  3049. ql_log(ql_log_fatal, vha, 0x00cd,
  3050. "ISP Firmware failed checksum.\n");
  3051. goto failed;
  3052. }
  3053. } else
  3054. goto failed;
  3055. if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
  3056. /* Enable proper parity. */
  3057. spin_lock_irqsave(&ha->hardware_lock, flags);
  3058. if (IS_QLA2300(ha))
  3059. /* SRAM parity */
  3060. WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
  3061. else
  3062. /* SRAM, Instruction RAM and GP RAM parity */
  3063. WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
  3064. RD_REG_WORD(&reg->hccr);
  3065. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3066. }
  3067. if (IS_QLA27XX(ha))
  3068. ha->flags.fac_supported = 1;
  3069. else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
  3070. uint32_t size;
  3071. rval = qla81xx_fac_get_sector_size(vha, &size);
  3072. if (rval == QLA_SUCCESS) {
  3073. ha->flags.fac_supported = 1;
  3074. ha->fdt_block_size = size << 2;
  3075. } else {
  3076. ql_log(ql_log_warn, vha, 0x00ce,
  3077. "Unsupported FAC firmware (%d.%02d.%02d).\n",
  3078. ha->fw_major_version, ha->fw_minor_version,
  3079. ha->fw_subminor_version);
  3080. if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  3081. ha->flags.fac_supported = 0;
  3082. rval = QLA_SUCCESS;
  3083. }
  3084. }
  3085. }
  3086. failed:
  3087. if (rval) {
  3088. ql_log(ql_log_fatal, vha, 0x00cf,
  3089. "Setup chip ****FAILED****.\n");
  3090. }
  3091. return (rval);
  3092. }
  3093. /**
  3094. * qla2x00_init_response_q_entries() - Initializes response queue entries.
  3095. * @rsp: response queue
  3096. *
  3097. * Beginning of request ring has initialization control block already built
  3098. * by nvram config routine.
  3099. *
  3100. * Returns 0 on success.
  3101. */
  3102. void
  3103. qla2x00_init_response_q_entries(struct rsp_que *rsp)
  3104. {
  3105. uint16_t cnt;
  3106. response_t *pkt;
  3107. rsp->ring_ptr = rsp->ring;
  3108. rsp->ring_index = 0;
  3109. rsp->status_srb = NULL;
  3110. pkt = rsp->ring_ptr;
  3111. for (cnt = 0; cnt < rsp->length; cnt++) {
  3112. pkt->signature = RESPONSE_PROCESSED;
  3113. pkt++;
  3114. }
  3115. }
  3116. /**
  3117. * qla2x00_update_fw_options() - Read and process firmware options.
  3118. * @vha: HA context
  3119. *
  3120. * Returns 0 on success.
  3121. */
  3122. void
  3123. qla2x00_update_fw_options(scsi_qla_host_t *vha)
  3124. {
  3125. uint16_t swing, emphasis, tx_sens, rx_sens;
  3126. struct qla_hw_data *ha = vha->hw;
  3127. memset(ha->fw_options, 0, sizeof(ha->fw_options));
  3128. qla2x00_get_fw_options(vha, ha->fw_options);
  3129. if (IS_QLA2100(ha) || IS_QLA2200(ha))
  3130. return;
  3131. /* Serial Link options. */
  3132. ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
  3133. "Serial link options.\n");
  3134. ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
  3135. (uint8_t *)&ha->fw_seriallink_options,
  3136. sizeof(ha->fw_seriallink_options));
  3137. ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
  3138. if (ha->fw_seriallink_options[3] & BIT_2) {
  3139. ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
  3140. /* 1G settings */
  3141. swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
  3142. emphasis = (ha->fw_seriallink_options[2] &
  3143. (BIT_4 | BIT_3)) >> 3;
  3144. tx_sens = ha->fw_seriallink_options[0] &
  3145. (BIT_3 | BIT_2 | BIT_1 | BIT_0);
  3146. rx_sens = (ha->fw_seriallink_options[0] &
  3147. (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
  3148. ha->fw_options[10] = (emphasis << 14) | (swing << 8);
  3149. if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
  3150. if (rx_sens == 0x0)
  3151. rx_sens = 0x3;
  3152. ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
  3153. } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
  3154. ha->fw_options[10] |= BIT_5 |
  3155. ((rx_sens & (BIT_1 | BIT_0)) << 2) |
  3156. (tx_sens & (BIT_1 | BIT_0));
  3157. /* 2G settings */
  3158. swing = (ha->fw_seriallink_options[2] &
  3159. (BIT_7 | BIT_6 | BIT_5)) >> 5;
  3160. emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
  3161. tx_sens = ha->fw_seriallink_options[1] &
  3162. (BIT_3 | BIT_2 | BIT_1 | BIT_0);
  3163. rx_sens = (ha->fw_seriallink_options[1] &
  3164. (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
  3165. ha->fw_options[11] = (emphasis << 14) | (swing << 8);
  3166. if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
  3167. if (rx_sens == 0x0)
  3168. rx_sens = 0x3;
  3169. ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
  3170. } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
  3171. ha->fw_options[11] |= BIT_5 |
  3172. ((rx_sens & (BIT_1 | BIT_0)) << 2) |
  3173. (tx_sens & (BIT_1 | BIT_0));
  3174. }
  3175. /* FCP2 options. */
  3176. /* Return command IOCBs without waiting for an ABTS to complete. */
  3177. ha->fw_options[3] |= BIT_13;
  3178. /* LED scheme. */
  3179. if (ha->flags.enable_led_scheme)
  3180. ha->fw_options[2] |= BIT_12;
  3181. /* Detect ISP6312. */
  3182. if (IS_QLA6312(ha))
  3183. ha->fw_options[2] |= BIT_13;
  3184. /* Set Retry FLOGI in case of P2P connection */
  3185. if (ha->operating_mode == P2P) {
  3186. ha->fw_options[2] |= BIT_3;
  3187. ql_dbg(ql_dbg_disc, vha, 0x2100,
  3188. "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
  3189. __func__, ha->fw_options[2]);
  3190. }
  3191. /* Update firmware options. */
  3192. qla2x00_set_fw_options(vha, ha->fw_options);
  3193. }
  3194. void
  3195. qla24xx_update_fw_options(scsi_qla_host_t *vha)
  3196. {
  3197. int rval;
  3198. struct qla_hw_data *ha = vha->hw;
  3199. if (IS_P3P_TYPE(ha))
  3200. return;
  3201. /* Hold status IOCBs until ABTS response received. */
  3202. if (ql2xfwholdabts)
  3203. ha->fw_options[3] |= BIT_12;
  3204. /* Set Retry FLOGI in case of P2P connection */
  3205. if (ha->operating_mode == P2P) {
  3206. ha->fw_options[2] |= BIT_3;
  3207. ql_dbg(ql_dbg_disc, vha, 0x2101,
  3208. "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
  3209. __func__, ha->fw_options[2]);
  3210. }
  3211. /* Move PUREX, ABTS RX & RIDA to ATIOQ */
  3212. if (ql2xmvasynctoatio &&
  3213. (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
  3214. if (qla_tgt_mode_enabled(vha) ||
  3215. qla_dual_mode_enabled(vha))
  3216. ha->fw_options[2] |= BIT_11;
  3217. else
  3218. ha->fw_options[2] &= ~BIT_11;
  3219. }
  3220. if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  3221. /*
  3222. * Tell FW to track each exchange to prevent
  3223. * driver from using stale exchange.
  3224. */
  3225. if (qla_tgt_mode_enabled(vha) ||
  3226. qla_dual_mode_enabled(vha))
  3227. ha->fw_options[2] |= BIT_4;
  3228. else
  3229. ha->fw_options[2] &= ~BIT_4;
  3230. /* Reserve 1/2 of emergency exchanges for ELS.*/
  3231. if (qla2xuseresexchforels)
  3232. ha->fw_options[2] |= BIT_8;
  3233. else
  3234. ha->fw_options[2] &= ~BIT_8;
  3235. }
  3236. ql_dbg(ql_dbg_init, vha, 0x00e8,
  3237. "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
  3238. __func__, ha->fw_options[1], ha->fw_options[2],
  3239. ha->fw_options[3], vha->host->active_mode);
  3240. if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
  3241. qla2x00_set_fw_options(vha, ha->fw_options);
  3242. /* Update Serial Link options. */
  3243. if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
  3244. return;
  3245. rval = qla2x00_set_serdes_params(vha,
  3246. le16_to_cpu(ha->fw_seriallink_options24[1]),
  3247. le16_to_cpu(ha->fw_seriallink_options24[2]),
  3248. le16_to_cpu(ha->fw_seriallink_options24[3]));
  3249. if (rval != QLA_SUCCESS) {
  3250. ql_log(ql_log_warn, vha, 0x0104,
  3251. "Unable to update Serial Link options (%x).\n", rval);
  3252. }
  3253. }
  3254. void
  3255. qla2x00_config_rings(struct scsi_qla_host *vha)
  3256. {
  3257. struct qla_hw_data *ha = vha->hw;
  3258. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  3259. struct req_que *req = ha->req_q_map[0];
  3260. struct rsp_que *rsp = ha->rsp_q_map[0];
  3261. /* Setup ring parameters in initialization control block. */
  3262. ha->init_cb->request_q_outpointer = cpu_to_le16(0);
  3263. ha->init_cb->response_q_inpointer = cpu_to_le16(0);
  3264. ha->init_cb->request_q_length = cpu_to_le16(req->length);
  3265. ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
  3266. ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
  3267. ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
  3268. ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
  3269. ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
  3270. WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
  3271. WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
  3272. WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
  3273. WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
  3274. RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
  3275. }
  3276. void
  3277. qla24xx_config_rings(struct scsi_qla_host *vha)
  3278. {
  3279. struct qla_hw_data *ha = vha->hw;
  3280. device_reg_t *reg = ISP_QUE_REG(ha, 0);
  3281. struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
  3282. struct qla_msix_entry *msix;
  3283. struct init_cb_24xx *icb;
  3284. uint16_t rid = 0;
  3285. struct req_que *req = ha->req_q_map[0];
  3286. struct rsp_que *rsp = ha->rsp_q_map[0];
  3287. /* Setup ring parameters in initialization control block. */
  3288. icb = (struct init_cb_24xx *)ha->init_cb;
  3289. icb->request_q_outpointer = cpu_to_le16(0);
  3290. icb->response_q_inpointer = cpu_to_le16(0);
  3291. icb->request_q_length = cpu_to_le16(req->length);
  3292. icb->response_q_length = cpu_to_le16(rsp->length);
  3293. icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
  3294. icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
  3295. icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
  3296. icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
  3297. /* Setup ATIO queue dma pointers for target mode */
  3298. icb->atio_q_inpointer = cpu_to_le16(0);
  3299. icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
  3300. icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
  3301. icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
  3302. if (IS_SHADOW_REG_CAPABLE(ha))
  3303. icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
  3304. if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  3305. icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
  3306. icb->rid = cpu_to_le16(rid);
  3307. if (ha->flags.msix_enabled) {
  3308. msix = &ha->msix_entries[1];
  3309. ql_dbg(ql_dbg_init, vha, 0x0019,
  3310. "Registering vector 0x%x for base que.\n",
  3311. msix->entry);
  3312. icb->msix = cpu_to_le16(msix->entry);
  3313. }
  3314. /* Use alternate PCI bus number */
  3315. if (MSB(rid))
  3316. icb->firmware_options_2 |= cpu_to_le32(BIT_19);
  3317. /* Use alternate PCI devfn */
  3318. if (LSB(rid))
  3319. icb->firmware_options_2 |= cpu_to_le32(BIT_18);
  3320. /* Use Disable MSIX Handshake mode for capable adapters */
  3321. if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
  3322. (ha->flags.msix_enabled)) {
  3323. icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
  3324. ha->flags.disable_msix_handshake = 1;
  3325. ql_dbg(ql_dbg_init, vha, 0x00fe,
  3326. "MSIX Handshake Disable Mode turned on.\n");
  3327. } else {
  3328. icb->firmware_options_2 |= cpu_to_le32(BIT_22);
  3329. }
  3330. icb->firmware_options_2 |= cpu_to_le32(BIT_23);
  3331. WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
  3332. WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
  3333. WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
  3334. WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
  3335. } else {
  3336. WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
  3337. WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
  3338. WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
  3339. WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
  3340. }
  3341. qlt_24xx_config_rings(vha);
  3342. /* PCI posting */
  3343. RD_REG_DWORD(&ioreg->hccr);
  3344. }
  3345. /**
  3346. * qla2x00_init_rings() - Initializes firmware.
  3347. * @vha: HA context
  3348. *
  3349. * Beginning of request ring has initialization control block already built
  3350. * by nvram config routine.
  3351. *
  3352. * Returns 0 on success.
  3353. */
  3354. int
  3355. qla2x00_init_rings(scsi_qla_host_t *vha)
  3356. {
  3357. int rval;
  3358. unsigned long flags = 0;
  3359. int cnt, que;
  3360. struct qla_hw_data *ha = vha->hw;
  3361. struct req_que *req;
  3362. struct rsp_que *rsp;
  3363. struct mid_init_cb_24xx *mid_init_cb =
  3364. (struct mid_init_cb_24xx *) ha->init_cb;
  3365. spin_lock_irqsave(&ha->hardware_lock, flags);
  3366. /* Clear outstanding commands array. */
  3367. for (que = 0; que < ha->max_req_queues; que++) {
  3368. req = ha->req_q_map[que];
  3369. if (!req || !test_bit(que, ha->req_qid_map))
  3370. continue;
  3371. req->out_ptr = (void *)(req->ring + req->length);
  3372. *req->out_ptr = 0;
  3373. for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
  3374. req->outstanding_cmds[cnt] = NULL;
  3375. req->current_outstanding_cmd = 1;
  3376. /* Initialize firmware. */
  3377. req->ring_ptr = req->ring;
  3378. req->ring_index = 0;
  3379. req->cnt = req->length;
  3380. }
  3381. for (que = 0; que < ha->max_rsp_queues; que++) {
  3382. rsp = ha->rsp_q_map[que];
  3383. if (!rsp || !test_bit(que, ha->rsp_qid_map))
  3384. continue;
  3385. rsp->in_ptr = (void *)(rsp->ring + rsp->length);
  3386. *rsp->in_ptr = 0;
  3387. /* Initialize response queue entries */
  3388. if (IS_QLAFX00(ha))
  3389. qlafx00_init_response_q_entries(rsp);
  3390. else
  3391. qla2x00_init_response_q_entries(rsp);
  3392. }
  3393. ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
  3394. ha->tgt.atio_ring_index = 0;
  3395. /* Initialize ATIO queue entries */
  3396. qlt_init_atio_q_entries(vha);
  3397. ha->isp_ops->config_rings(vha);
  3398. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3399. ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
  3400. if (IS_QLAFX00(ha)) {
  3401. rval = qlafx00_init_firmware(vha, ha->init_cb_size);
  3402. goto next_check;
  3403. }
  3404. /* Update any ISP specific firmware options before initialization. */
  3405. ha->isp_ops->update_fw_options(vha);
  3406. if (ha->flags.npiv_supported) {
  3407. if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
  3408. ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
  3409. mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
  3410. }
  3411. if (IS_FWI2_CAPABLE(ha)) {
  3412. mid_init_cb->options = cpu_to_le16(BIT_1);
  3413. mid_init_cb->init_cb.execution_throttle =
  3414. cpu_to_le16(ha->cur_fw_xcb_count);
  3415. ha->flags.dport_enabled =
  3416. (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
  3417. ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
  3418. (ha->flags.dport_enabled) ? "enabled" : "disabled");
  3419. /* FA-WWPN Status */
  3420. ha->flags.fawwpn_enabled =
  3421. (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
  3422. ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
  3423. (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
  3424. }
  3425. rval = qla2x00_init_firmware(vha, ha->init_cb_size);
  3426. next_check:
  3427. if (rval) {
  3428. ql_log(ql_log_fatal, vha, 0x00d2,
  3429. "Init Firmware **** FAILED ****.\n");
  3430. } else {
  3431. ql_dbg(ql_dbg_init, vha, 0x00d3,
  3432. "Init Firmware -- success.\n");
  3433. QLA_FW_STARTED(ha);
  3434. }
  3435. return (rval);
  3436. }
  3437. /**
  3438. * qla2x00_fw_ready() - Waits for firmware ready.
  3439. * @vha: HA context
  3440. *
  3441. * Returns 0 on success.
  3442. */
  3443. static int
  3444. qla2x00_fw_ready(scsi_qla_host_t *vha)
  3445. {
  3446. int rval;
  3447. unsigned long wtime, mtime, cs84xx_time;
  3448. uint16_t min_wait; /* Minimum wait time if loop is down */
  3449. uint16_t wait_time; /* Wait time if loop is coming ready */
  3450. uint16_t state[6];
  3451. struct qla_hw_data *ha = vha->hw;
  3452. if (IS_QLAFX00(vha->hw))
  3453. return qlafx00_fw_ready(vha);
  3454. rval = QLA_SUCCESS;
  3455. /* Time to wait for loop down */
  3456. if (IS_P3P_TYPE(ha))
  3457. min_wait = 30;
  3458. else
  3459. min_wait = 20;
  3460. /*
  3461. * Firmware should take at most one RATOV to login, plus 5 seconds for
  3462. * our own processing.
  3463. */
  3464. if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
  3465. wait_time = min_wait;
  3466. }
  3467. /* Min wait time if loop down */
  3468. mtime = jiffies + (min_wait * HZ);
  3469. /* wait time before firmware ready */
  3470. wtime = jiffies + (wait_time * HZ);
  3471. /* Wait for ISP to finish LIP */
  3472. if (!vha->flags.init_done)
  3473. ql_log(ql_log_info, vha, 0x801e,
  3474. "Waiting for LIP to complete.\n");
  3475. do {
  3476. memset(state, -1, sizeof(state));
  3477. rval = qla2x00_get_firmware_state(vha, state);
  3478. if (rval == QLA_SUCCESS) {
  3479. if (state[0] < FSTATE_LOSS_OF_SYNC) {
  3480. vha->device_flags &= ~DFLG_NO_CABLE;
  3481. }
  3482. if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
  3483. ql_dbg(ql_dbg_taskm, vha, 0x801f,
  3484. "fw_state=%x 84xx=%x.\n", state[0],
  3485. state[2]);
  3486. if ((state[2] & FSTATE_LOGGED_IN) &&
  3487. (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
  3488. ql_dbg(ql_dbg_taskm, vha, 0x8028,
  3489. "Sending verify iocb.\n");
  3490. cs84xx_time = jiffies;
  3491. rval = qla84xx_init_chip(vha);
  3492. if (rval != QLA_SUCCESS) {
  3493. ql_log(ql_log_warn,
  3494. vha, 0x8007,
  3495. "Init chip failed.\n");
  3496. break;
  3497. }
  3498. /* Add time taken to initialize. */
  3499. cs84xx_time = jiffies - cs84xx_time;
  3500. wtime += cs84xx_time;
  3501. mtime += cs84xx_time;
  3502. ql_dbg(ql_dbg_taskm, vha, 0x8008,
  3503. "Increasing wait time by %ld. "
  3504. "New time %ld.\n", cs84xx_time,
  3505. wtime);
  3506. }
  3507. } else if (state[0] == FSTATE_READY) {
  3508. ql_dbg(ql_dbg_taskm, vha, 0x8037,
  3509. "F/W Ready - OK.\n");
  3510. qla2x00_get_retry_cnt(vha, &ha->retry_count,
  3511. &ha->login_timeout, &ha->r_a_tov);
  3512. rval = QLA_SUCCESS;
  3513. break;
  3514. }
  3515. rval = QLA_FUNCTION_FAILED;
  3516. if (atomic_read(&vha->loop_down_timer) &&
  3517. state[0] != FSTATE_READY) {
  3518. /* Loop down. Timeout on min_wait for states
  3519. * other than Wait for Login.
  3520. */
  3521. if (time_after_eq(jiffies, mtime)) {
  3522. ql_log(ql_log_info, vha, 0x8038,
  3523. "Cable is unplugged...\n");
  3524. vha->device_flags |= DFLG_NO_CABLE;
  3525. break;
  3526. }
  3527. }
  3528. } else {
  3529. /* Mailbox cmd failed. Timeout on min_wait. */
  3530. if (time_after_eq(jiffies, mtime) ||
  3531. ha->flags.isp82xx_fw_hung)
  3532. break;
  3533. }
  3534. if (time_after_eq(jiffies, wtime))
  3535. break;
  3536. /* Delay for a while */
  3537. msleep(500);
  3538. } while (1);
  3539. ql_dbg(ql_dbg_taskm, vha, 0x803a,
  3540. "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
  3541. state[1], state[2], state[3], state[4], state[5], jiffies);
  3542. if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
  3543. ql_log(ql_log_warn, vha, 0x803b,
  3544. "Firmware ready **** FAILED ****.\n");
  3545. }
  3546. return (rval);
  3547. }
  3548. /*
  3549. * qla2x00_configure_hba
  3550. * Setup adapter context.
  3551. *
  3552. * Input:
  3553. * ha = adapter state pointer.
  3554. *
  3555. * Returns:
  3556. * 0 = success
  3557. *
  3558. * Context:
  3559. * Kernel context.
  3560. */
  3561. static int
  3562. qla2x00_configure_hba(scsi_qla_host_t *vha)
  3563. {
  3564. int rval;
  3565. uint16_t loop_id;
  3566. uint16_t topo;
  3567. uint16_t sw_cap;
  3568. uint8_t al_pa;
  3569. uint8_t area;
  3570. uint8_t domain;
  3571. char connect_type[22];
  3572. struct qla_hw_data *ha = vha->hw;
  3573. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  3574. port_id_t id;
  3575. unsigned long flags;
  3576. /* Get host addresses. */
  3577. rval = qla2x00_get_adapter_id(vha,
  3578. &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
  3579. if (rval != QLA_SUCCESS) {
  3580. if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
  3581. IS_CNA_CAPABLE(ha) ||
  3582. (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
  3583. ql_dbg(ql_dbg_disc, vha, 0x2008,
  3584. "Loop is in a transition state.\n");
  3585. } else {
  3586. ql_log(ql_log_warn, vha, 0x2009,
  3587. "Unable to get host loop ID.\n");
  3588. if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
  3589. (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
  3590. ql_log(ql_log_warn, vha, 0x1151,
  3591. "Doing link init.\n");
  3592. if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
  3593. return rval;
  3594. }
  3595. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  3596. }
  3597. return (rval);
  3598. }
  3599. if (topo == 4) {
  3600. ql_log(ql_log_info, vha, 0x200a,
  3601. "Cannot get topology - retrying.\n");
  3602. return (QLA_FUNCTION_FAILED);
  3603. }
  3604. vha->loop_id = loop_id;
  3605. /* initialize */
  3606. ha->min_external_loopid = SNS_FIRST_LOOP_ID;
  3607. ha->operating_mode = LOOP;
  3608. ha->switch_cap = 0;
  3609. switch (topo) {
  3610. case 0:
  3611. ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
  3612. ha->current_topology = ISP_CFG_NL;
  3613. strcpy(connect_type, "(Loop)");
  3614. break;
  3615. case 1:
  3616. ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
  3617. ha->switch_cap = sw_cap;
  3618. ha->current_topology = ISP_CFG_FL;
  3619. strcpy(connect_type, "(FL_Port)");
  3620. break;
  3621. case 2:
  3622. ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
  3623. ha->operating_mode = P2P;
  3624. ha->current_topology = ISP_CFG_N;
  3625. strcpy(connect_type, "(N_Port-to-N_Port)");
  3626. break;
  3627. case 3:
  3628. ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
  3629. ha->switch_cap = sw_cap;
  3630. ha->operating_mode = P2P;
  3631. ha->current_topology = ISP_CFG_F;
  3632. strcpy(connect_type, "(F_Port)");
  3633. break;
  3634. default:
  3635. ql_dbg(ql_dbg_disc, vha, 0x200f,
  3636. "HBA in unknown topology %x, using NL.\n", topo);
  3637. ha->current_topology = ISP_CFG_NL;
  3638. strcpy(connect_type, "(Loop)");
  3639. break;
  3640. }
  3641. /* Save Host port and loop ID. */
  3642. /* byte order - Big Endian */
  3643. id.b.domain = domain;
  3644. id.b.area = area;
  3645. id.b.al_pa = al_pa;
  3646. id.b.rsvd_1 = 0;
  3647. spin_lock_irqsave(&ha->hardware_lock, flags);
  3648. if (!(topo == 2 && ha->flags.n2n_bigger))
  3649. qlt_update_host_map(vha, id);
  3650. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3651. if (!vha->flags.init_done)
  3652. ql_log(ql_log_info, vha, 0x2010,
  3653. "Topology - %s, Host Loop address 0x%x.\n",
  3654. connect_type, vha->loop_id);
  3655. return(rval);
  3656. }
  3657. inline void
  3658. qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
  3659. char *def)
  3660. {
  3661. char *st, *en;
  3662. uint16_t index;
  3663. struct qla_hw_data *ha = vha->hw;
  3664. int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
  3665. !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
  3666. if (memcmp(model, BINZERO, len) != 0) {
  3667. strncpy(ha->model_number, model, len);
  3668. st = en = ha->model_number;
  3669. en += len - 1;
  3670. while (en > st) {
  3671. if (*en != 0x20 && *en != 0x00)
  3672. break;
  3673. *en-- = '\0';
  3674. }
  3675. index = (ha->pdev->subsystem_device & 0xff);
  3676. if (use_tbl &&
  3677. ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
  3678. index < QLA_MODEL_NAMES)
  3679. strncpy(ha->model_desc,
  3680. qla2x00_model_name[index * 2 + 1],
  3681. sizeof(ha->model_desc) - 1);
  3682. } else {
  3683. index = (ha->pdev->subsystem_device & 0xff);
  3684. if (use_tbl &&
  3685. ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
  3686. index < QLA_MODEL_NAMES) {
  3687. strcpy(ha->model_number,
  3688. qla2x00_model_name[index * 2]);
  3689. strncpy(ha->model_desc,
  3690. qla2x00_model_name[index * 2 + 1],
  3691. sizeof(ha->model_desc) - 1);
  3692. } else {
  3693. strcpy(ha->model_number, def);
  3694. }
  3695. }
  3696. if (IS_FWI2_CAPABLE(ha))
  3697. qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
  3698. sizeof(ha->model_desc));
  3699. }
  3700. /* On sparc systems, obtain port and node WWN from firmware
  3701. * properties.
  3702. */
  3703. static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
  3704. {
  3705. #ifdef CONFIG_SPARC
  3706. struct qla_hw_data *ha = vha->hw;
  3707. struct pci_dev *pdev = ha->pdev;
  3708. struct device_node *dp = pci_device_to_OF_node(pdev);
  3709. const u8 *val;
  3710. int len;
  3711. val = of_get_property(dp, "port-wwn", &len);
  3712. if (val && len >= WWN_SIZE)
  3713. memcpy(nv->port_name, val, WWN_SIZE);
  3714. val = of_get_property(dp, "node-wwn", &len);
  3715. if (val && len >= WWN_SIZE)
  3716. memcpy(nv->node_name, val, WWN_SIZE);
  3717. #endif
  3718. }
  3719. /*
  3720. * NVRAM configuration for ISP 2xxx
  3721. *
  3722. * Input:
  3723. * ha = adapter block pointer.
  3724. *
  3725. * Output:
  3726. * initialization control block in response_ring
  3727. * host adapters parameters in host adapter block
  3728. *
  3729. * Returns:
  3730. * 0 = success.
  3731. */
  3732. int
  3733. qla2x00_nvram_config(scsi_qla_host_t *vha)
  3734. {
  3735. int rval;
  3736. uint8_t chksum = 0;
  3737. uint16_t cnt;
  3738. uint8_t *dptr1, *dptr2;
  3739. struct qla_hw_data *ha = vha->hw;
  3740. init_cb_t *icb = ha->init_cb;
  3741. nvram_t *nv = ha->nvram;
  3742. uint8_t *ptr = ha->nvram;
  3743. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  3744. rval = QLA_SUCCESS;
  3745. /* Determine NVRAM starting address. */
  3746. ha->nvram_size = sizeof(nvram_t);
  3747. ha->nvram_base = 0;
  3748. if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
  3749. if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
  3750. ha->nvram_base = 0x80;
  3751. /* Get NVRAM data and calculate checksum. */
  3752. ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
  3753. for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
  3754. chksum += *ptr++;
  3755. ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
  3756. "Contents of NVRAM.\n");
  3757. ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
  3758. (uint8_t *)nv, ha->nvram_size);
  3759. /* Bad NVRAM data, set defaults parameters. */
  3760. if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
  3761. nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
  3762. /* Reset NVRAM data. */
  3763. ql_log(ql_log_warn, vha, 0x0064,
  3764. "Inconsistent NVRAM "
  3765. "detected: checksum=0x%x id=%c version=0x%x.\n",
  3766. chksum, nv->id[0], nv->nvram_version);
  3767. ql_log(ql_log_warn, vha, 0x0065,
  3768. "Falling back to "
  3769. "functioning (yet invalid -- WWPN) defaults.\n");
  3770. /*
  3771. * Set default initialization control block.
  3772. */
  3773. memset(nv, 0, ha->nvram_size);
  3774. nv->parameter_block_version = ICB_VERSION;
  3775. if (IS_QLA23XX(ha)) {
  3776. nv->firmware_options[0] = BIT_2 | BIT_1;
  3777. nv->firmware_options[1] = BIT_7 | BIT_5;
  3778. nv->add_firmware_options[0] = BIT_5;
  3779. nv->add_firmware_options[1] = BIT_5 | BIT_4;
  3780. nv->frame_payload_size = 2048;
  3781. nv->special_options[1] = BIT_7;
  3782. } else if (IS_QLA2200(ha)) {
  3783. nv->firmware_options[0] = BIT_2 | BIT_1;
  3784. nv->firmware_options[1] = BIT_7 | BIT_5;
  3785. nv->add_firmware_options[0] = BIT_5;
  3786. nv->add_firmware_options[1] = BIT_5 | BIT_4;
  3787. nv->frame_payload_size = 1024;
  3788. } else if (IS_QLA2100(ha)) {
  3789. nv->firmware_options[0] = BIT_3 | BIT_1;
  3790. nv->firmware_options[1] = BIT_5;
  3791. nv->frame_payload_size = 1024;
  3792. }
  3793. nv->max_iocb_allocation = cpu_to_le16(256);
  3794. nv->execution_throttle = cpu_to_le16(16);
  3795. nv->retry_count = 8;
  3796. nv->retry_delay = 1;
  3797. nv->port_name[0] = 33;
  3798. nv->port_name[3] = 224;
  3799. nv->port_name[4] = 139;
  3800. qla2xxx_nvram_wwn_from_ofw(vha, nv);
  3801. nv->login_timeout = 4;
  3802. /*
  3803. * Set default host adapter parameters
  3804. */
  3805. nv->host_p[1] = BIT_2;
  3806. nv->reset_delay = 5;
  3807. nv->port_down_retry_count = 8;
  3808. nv->max_luns_per_target = cpu_to_le16(8);
  3809. nv->link_down_timeout = 60;
  3810. rval = 1;
  3811. }
  3812. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
  3813. /*
  3814. * The SN2 does not provide BIOS emulation which means you can't change
  3815. * potentially bogus BIOS settings. Force the use of default settings
  3816. * for link rate and frame size. Hope that the rest of the settings
  3817. * are valid.
  3818. */
  3819. if (ia64_platform_is("sn2")) {
  3820. nv->frame_payload_size = 2048;
  3821. if (IS_QLA23XX(ha))
  3822. nv->special_options[1] = BIT_7;
  3823. }
  3824. #endif
  3825. /* Reset Initialization control block */
  3826. memset(icb, 0, ha->init_cb_size);
  3827. /*
  3828. * Setup driver NVRAM options.
  3829. */
  3830. nv->firmware_options[0] |= (BIT_6 | BIT_1);
  3831. nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
  3832. nv->firmware_options[1] |= (BIT_5 | BIT_0);
  3833. nv->firmware_options[1] &= ~BIT_4;
  3834. if (IS_QLA23XX(ha)) {
  3835. nv->firmware_options[0] |= BIT_2;
  3836. nv->firmware_options[0] &= ~BIT_3;
  3837. nv->special_options[0] &= ~BIT_6;
  3838. nv->add_firmware_options[1] |= BIT_5 | BIT_4;
  3839. if (IS_QLA2300(ha)) {
  3840. if (ha->fb_rev == FPM_2310) {
  3841. strcpy(ha->model_number, "QLA2310");
  3842. } else {
  3843. strcpy(ha->model_number, "QLA2300");
  3844. }
  3845. } else {
  3846. qla2x00_set_model_info(vha, nv->model_number,
  3847. sizeof(nv->model_number), "QLA23xx");
  3848. }
  3849. } else if (IS_QLA2200(ha)) {
  3850. nv->firmware_options[0] |= BIT_2;
  3851. /*
  3852. * 'Point-to-point preferred, else loop' is not a safe
  3853. * connection mode setting.
  3854. */
  3855. if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
  3856. (BIT_5 | BIT_4)) {
  3857. /* Force 'loop preferred, else point-to-point'. */
  3858. nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
  3859. nv->add_firmware_options[0] |= BIT_5;
  3860. }
  3861. strcpy(ha->model_number, "QLA22xx");
  3862. } else /*if (IS_QLA2100(ha))*/ {
  3863. strcpy(ha->model_number, "QLA2100");
  3864. }
  3865. /*
  3866. * Copy over NVRAM RISC parameter block to initialization control block.
  3867. */
  3868. dptr1 = (uint8_t *)icb;
  3869. dptr2 = (uint8_t *)&nv->parameter_block_version;
  3870. cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
  3871. while (cnt--)
  3872. *dptr1++ = *dptr2++;
  3873. /* Copy 2nd half. */
  3874. dptr1 = (uint8_t *)icb->add_firmware_options;
  3875. cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
  3876. while (cnt--)
  3877. *dptr1++ = *dptr2++;
  3878. ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
  3879. /* Use alternate WWN? */
  3880. if (nv->host_p[1] & BIT_7) {
  3881. memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
  3882. memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
  3883. }
  3884. /* Prepare nodename */
  3885. if ((icb->firmware_options[1] & BIT_6) == 0) {
  3886. /*
  3887. * Firmware will apply the following mask if the nodename was
  3888. * not provided.
  3889. */
  3890. memcpy(icb->node_name, icb->port_name, WWN_SIZE);
  3891. icb->node_name[0] &= 0xF0;
  3892. }
  3893. /*
  3894. * Set host adapter parameters.
  3895. */
  3896. /*
  3897. * BIT_7 in the host-parameters section allows for modification to
  3898. * internal driver logging.
  3899. */
  3900. if (nv->host_p[0] & BIT_7)
  3901. ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
  3902. ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
  3903. /* Always load RISC code on non ISP2[12]00 chips. */
  3904. if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
  3905. ha->flags.disable_risc_code_load = 0;
  3906. ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
  3907. ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
  3908. ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
  3909. ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
  3910. ha->flags.disable_serdes = 0;
  3911. ha->operating_mode =
  3912. (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
  3913. memcpy(ha->fw_seriallink_options, nv->seriallink_options,
  3914. sizeof(ha->fw_seriallink_options));
  3915. /* save HBA serial number */
  3916. ha->serial0 = icb->port_name[5];
  3917. ha->serial1 = icb->port_name[6];
  3918. ha->serial2 = icb->port_name[7];
  3919. memcpy(vha->node_name, icb->node_name, WWN_SIZE);
  3920. memcpy(vha->port_name, icb->port_name, WWN_SIZE);
  3921. icb->execution_throttle = cpu_to_le16(0xFFFF);
  3922. ha->retry_count = nv->retry_count;
  3923. /* Set minimum login_timeout to 4 seconds. */
  3924. if (nv->login_timeout != ql2xlogintimeout)
  3925. nv->login_timeout = ql2xlogintimeout;
  3926. if (nv->login_timeout < 4)
  3927. nv->login_timeout = 4;
  3928. ha->login_timeout = nv->login_timeout;
  3929. /* Set minimum RATOV to 100 tenths of a second. */
  3930. ha->r_a_tov = 100;
  3931. ha->loop_reset_delay = nv->reset_delay;
  3932. /* Link Down Timeout = 0:
  3933. *
  3934. * When Port Down timer expires we will start returning
  3935. * I/O's to OS with "DID_NO_CONNECT".
  3936. *
  3937. * Link Down Timeout != 0:
  3938. *
  3939. * The driver waits for the link to come up after link down
  3940. * before returning I/Os to OS with "DID_NO_CONNECT".
  3941. */
  3942. if (nv->link_down_timeout == 0) {
  3943. ha->loop_down_abort_time =
  3944. (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
  3945. } else {
  3946. ha->link_down_timeout = nv->link_down_timeout;
  3947. ha->loop_down_abort_time =
  3948. (LOOP_DOWN_TIME - ha->link_down_timeout);
  3949. }
  3950. /*
  3951. * Need enough time to try and get the port back.
  3952. */
  3953. ha->port_down_retry_count = nv->port_down_retry_count;
  3954. if (qlport_down_retry)
  3955. ha->port_down_retry_count = qlport_down_retry;
  3956. /* Set login_retry_count */
  3957. ha->login_retry_count = nv->retry_count;
  3958. if (ha->port_down_retry_count == nv->port_down_retry_count &&
  3959. ha->port_down_retry_count > 3)
  3960. ha->login_retry_count = ha->port_down_retry_count;
  3961. else if (ha->port_down_retry_count > (int)ha->login_retry_count)
  3962. ha->login_retry_count = ha->port_down_retry_count;
  3963. if (ql2xloginretrycount)
  3964. ha->login_retry_count = ql2xloginretrycount;
  3965. icb->lun_enables = cpu_to_le16(0);
  3966. icb->command_resource_count = 0;
  3967. icb->immediate_notify_resource_count = 0;
  3968. icb->timeout = cpu_to_le16(0);
  3969. if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
  3970. /* Enable RIO */
  3971. icb->firmware_options[0] &= ~BIT_3;
  3972. icb->add_firmware_options[0] &=
  3973. ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
  3974. icb->add_firmware_options[0] |= BIT_2;
  3975. icb->response_accumulation_timer = 3;
  3976. icb->interrupt_delay_timer = 5;
  3977. vha->flags.process_response_queue = 1;
  3978. } else {
  3979. /* Enable ZIO. */
  3980. if (!vha->flags.init_done) {
  3981. ha->zio_mode = icb->add_firmware_options[0] &
  3982. (BIT_3 | BIT_2 | BIT_1 | BIT_0);
  3983. ha->zio_timer = icb->interrupt_delay_timer ?
  3984. icb->interrupt_delay_timer: 2;
  3985. }
  3986. icb->add_firmware_options[0] &=
  3987. ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
  3988. vha->flags.process_response_queue = 0;
  3989. if (ha->zio_mode != QLA_ZIO_DISABLED) {
  3990. ha->zio_mode = QLA_ZIO_MODE_6;
  3991. ql_log(ql_log_info, vha, 0x0068,
  3992. "ZIO mode %d enabled; timer delay (%d us).\n",
  3993. ha->zio_mode, ha->zio_timer * 100);
  3994. icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
  3995. icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
  3996. vha->flags.process_response_queue = 1;
  3997. }
  3998. }
  3999. if (rval) {
  4000. ql_log(ql_log_warn, vha, 0x0069,
  4001. "NVRAM configuration failed.\n");
  4002. }
  4003. return (rval);
  4004. }
  4005. static void
  4006. qla2x00_rport_del(void *data)
  4007. {
  4008. fc_port_t *fcport = data;
  4009. struct fc_rport *rport;
  4010. unsigned long flags;
  4011. spin_lock_irqsave(fcport->vha->host->host_lock, flags);
  4012. rport = fcport->drport ? fcport->drport: fcport->rport;
  4013. fcport->drport = NULL;
  4014. spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
  4015. if (rport) {
  4016. ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
  4017. "%s %8phN. rport %p roles %x\n",
  4018. __func__, fcport->port_name, rport,
  4019. rport->roles);
  4020. fc_remote_port_delete(rport);
  4021. }
  4022. }
  4023. /**
  4024. * qla2x00_alloc_fcport() - Allocate a generic fcport.
  4025. * @vha: HA context
  4026. * @flags: allocation flags
  4027. *
  4028. * Returns a pointer to the allocated fcport, or NULL, if none available.
  4029. */
  4030. fc_port_t *
  4031. qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
  4032. {
  4033. fc_port_t *fcport;
  4034. fcport = kzalloc(sizeof(fc_port_t), flags);
  4035. if (!fcport)
  4036. return NULL;
  4037. /* Setup fcport template structure. */
  4038. fcport->vha = vha;
  4039. fcport->port_type = FCT_UNKNOWN;
  4040. fcport->loop_id = FC_NO_LOOP_ID;
  4041. qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
  4042. fcport->supported_classes = FC_COS_UNSPECIFIED;
  4043. fcport->fp_speed = PORT_SPEED_UNKNOWN;
  4044. fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
  4045. sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
  4046. flags);
  4047. fcport->disc_state = DSC_DELETED;
  4048. fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
  4049. fcport->deleted = QLA_SESS_DELETED;
  4050. fcport->login_retry = vha->hw->login_retry_count;
  4051. fcport->logout_on_delete = 1;
  4052. if (!fcport->ct_desc.ct_sns) {
  4053. ql_log(ql_log_warn, vha, 0xd049,
  4054. "Failed to allocate ct_sns request.\n");
  4055. kfree(fcport);
  4056. return NULL;
  4057. }
  4058. INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
  4059. INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
  4060. INIT_LIST_HEAD(&fcport->gnl_entry);
  4061. INIT_LIST_HEAD(&fcport->list);
  4062. return fcport;
  4063. }
  4064. void
  4065. qla2x00_free_fcport(fc_port_t *fcport)
  4066. {
  4067. if (fcport->ct_desc.ct_sns) {
  4068. dma_free_coherent(&fcport->vha->hw->pdev->dev,
  4069. sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
  4070. fcport->ct_desc.ct_sns_dma);
  4071. fcport->ct_desc.ct_sns = NULL;
  4072. }
  4073. kfree(fcport);
  4074. }
  4075. /*
  4076. * qla2x00_configure_loop
  4077. * Updates Fibre Channel Device Database with what is actually on loop.
  4078. *
  4079. * Input:
  4080. * ha = adapter block pointer.
  4081. *
  4082. * Returns:
  4083. * 0 = success.
  4084. * 1 = error.
  4085. * 2 = database was full and device was not configured.
  4086. */
  4087. static int
  4088. qla2x00_configure_loop(scsi_qla_host_t *vha)
  4089. {
  4090. int rval;
  4091. unsigned long flags, save_flags;
  4092. struct qla_hw_data *ha = vha->hw;
  4093. rval = QLA_SUCCESS;
  4094. /* Get Initiator ID */
  4095. if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
  4096. rval = qla2x00_configure_hba(vha);
  4097. if (rval != QLA_SUCCESS) {
  4098. ql_dbg(ql_dbg_disc, vha, 0x2013,
  4099. "Unable to configure HBA.\n");
  4100. return (rval);
  4101. }
  4102. }
  4103. save_flags = flags = vha->dpc_flags;
  4104. ql_dbg(ql_dbg_disc, vha, 0x2014,
  4105. "Configure loop -- dpc flags = 0x%lx.\n", flags);
  4106. /*
  4107. * If we have both an RSCN and PORT UPDATE pending then handle them
  4108. * both at the same time.
  4109. */
  4110. clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  4111. clear_bit(RSCN_UPDATE, &vha->dpc_flags);
  4112. qla2x00_get_data_rate(vha);
  4113. /* Determine what we need to do */
  4114. if (ha->current_topology == ISP_CFG_FL &&
  4115. (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
  4116. set_bit(RSCN_UPDATE, &flags);
  4117. } else if (ha->current_topology == ISP_CFG_F &&
  4118. (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
  4119. set_bit(RSCN_UPDATE, &flags);
  4120. clear_bit(LOCAL_LOOP_UPDATE, &flags);
  4121. } else if (ha->current_topology == ISP_CFG_NL ||
  4122. ha->current_topology == ISP_CFG_N) {
  4123. clear_bit(RSCN_UPDATE, &flags);
  4124. set_bit(LOCAL_LOOP_UPDATE, &flags);
  4125. } else if (!vha->flags.online ||
  4126. (test_bit(ABORT_ISP_ACTIVE, &flags))) {
  4127. set_bit(RSCN_UPDATE, &flags);
  4128. set_bit(LOCAL_LOOP_UPDATE, &flags);
  4129. }
  4130. if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
  4131. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  4132. ql_dbg(ql_dbg_disc, vha, 0x2015,
  4133. "Loop resync needed, failing.\n");
  4134. rval = QLA_FUNCTION_FAILED;
  4135. } else
  4136. rval = qla2x00_configure_local_loop(vha);
  4137. }
  4138. if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
  4139. if (LOOP_TRANSITION(vha)) {
  4140. ql_dbg(ql_dbg_disc, vha, 0x2099,
  4141. "Needs RSCN update and loop transition.\n");
  4142. rval = QLA_FUNCTION_FAILED;
  4143. }
  4144. else
  4145. rval = qla2x00_configure_fabric(vha);
  4146. }
  4147. if (rval == QLA_SUCCESS) {
  4148. if (atomic_read(&vha->loop_down_timer) ||
  4149. test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  4150. rval = QLA_FUNCTION_FAILED;
  4151. } else {
  4152. atomic_set(&vha->loop_state, LOOP_READY);
  4153. ql_dbg(ql_dbg_disc, vha, 0x2069,
  4154. "LOOP READY.\n");
  4155. ha->flags.fw_init_done = 1;
  4156. /*
  4157. * Process any ATIO queue entries that came in
  4158. * while we weren't online.
  4159. */
  4160. if (qla_tgt_mode_enabled(vha) ||
  4161. qla_dual_mode_enabled(vha)) {
  4162. spin_lock_irqsave(&ha->tgt.atio_lock, flags);
  4163. qlt_24xx_process_atio_queue(vha, 0);
  4164. spin_unlock_irqrestore(&ha->tgt.atio_lock,
  4165. flags);
  4166. }
  4167. }
  4168. }
  4169. if (rval) {
  4170. ql_dbg(ql_dbg_disc, vha, 0x206a,
  4171. "%s *** FAILED ***.\n", __func__);
  4172. } else {
  4173. ql_dbg(ql_dbg_disc, vha, 0x206b,
  4174. "%s: exiting normally.\n", __func__);
  4175. }
  4176. /* Restore state if a resync event occurred during processing */
  4177. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  4178. if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
  4179. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  4180. if (test_bit(RSCN_UPDATE, &save_flags)) {
  4181. set_bit(RSCN_UPDATE, &vha->dpc_flags);
  4182. }
  4183. }
  4184. return (rval);
  4185. }
  4186. /*
  4187. * qla2x00_configure_local_loop
  4188. * Updates Fibre Channel Device Database with local loop devices.
  4189. *
  4190. * Input:
  4191. * ha = adapter block pointer.
  4192. *
  4193. * Returns:
  4194. * 0 = success.
  4195. */
  4196. static int
  4197. qla2x00_configure_local_loop(scsi_qla_host_t *vha)
  4198. {
  4199. int rval, rval2;
  4200. int found_devs;
  4201. int found;
  4202. fc_port_t *fcport, *new_fcport;
  4203. uint16_t index;
  4204. uint16_t entries;
  4205. char *id_iter;
  4206. uint16_t loop_id;
  4207. uint8_t domain, area, al_pa;
  4208. struct qla_hw_data *ha = vha->hw;
  4209. unsigned long flags;
  4210. /* Inititae N2N login. */
  4211. if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
  4212. /* borrowing */
  4213. u32 *bp, i, sz;
  4214. memset(ha->init_cb, 0, ha->init_cb_size);
  4215. sz = min_t(int, sizeof(struct els_plogi_payload),
  4216. ha->init_cb_size);
  4217. rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
  4218. (void *)ha->init_cb, sz);
  4219. if (rval == QLA_SUCCESS) {
  4220. bp = (uint32_t *)ha->init_cb;
  4221. for (i = 0; i < sz/4 ; i++, bp++)
  4222. *bp = cpu_to_be32(*bp);
  4223. memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb,
  4224. sizeof(ha->plogi_els_payld.data));
  4225. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  4226. } else {
  4227. ql_dbg(ql_dbg_init, vha, 0x00d1,
  4228. "PLOGI ELS param read fail.\n");
  4229. }
  4230. return QLA_SUCCESS;
  4231. }
  4232. found_devs = 0;
  4233. new_fcport = NULL;
  4234. entries = MAX_FIBRE_DEVICES_LOOP;
  4235. /* Get list of logged in devices. */
  4236. memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
  4237. rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
  4238. &entries);
  4239. if (rval != QLA_SUCCESS)
  4240. goto cleanup_allocation;
  4241. ql_dbg(ql_dbg_disc, vha, 0x2011,
  4242. "Entries in ID list (%d).\n", entries);
  4243. ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
  4244. (uint8_t *)ha->gid_list,
  4245. entries * sizeof(struct gid_list_info));
  4246. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  4247. fcport->scan_state = QLA_FCPORT_SCAN;
  4248. }
  4249. /* Allocate temporary fcport for any new fcports discovered. */
  4250. new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  4251. if (new_fcport == NULL) {
  4252. ql_log(ql_log_warn, vha, 0x2012,
  4253. "Memory allocation failed for fcport.\n");
  4254. rval = QLA_MEMORY_ALLOC_FAILED;
  4255. goto cleanup_allocation;
  4256. }
  4257. new_fcport->flags &= ~FCF_FABRIC_DEVICE;
  4258. /* Add devices to port list. */
  4259. id_iter = (char *)ha->gid_list;
  4260. for (index = 0; index < entries; index++) {
  4261. domain = ((struct gid_list_info *)id_iter)->domain;
  4262. area = ((struct gid_list_info *)id_iter)->area;
  4263. al_pa = ((struct gid_list_info *)id_iter)->al_pa;
  4264. if (IS_QLA2100(ha) || IS_QLA2200(ha))
  4265. loop_id = (uint16_t)
  4266. ((struct gid_list_info *)id_iter)->loop_id_2100;
  4267. else
  4268. loop_id = le16_to_cpu(
  4269. ((struct gid_list_info *)id_iter)->loop_id);
  4270. id_iter += ha->gid_list_info_size;
  4271. /* Bypass reserved domain fields. */
  4272. if ((domain & 0xf0) == 0xf0)
  4273. continue;
  4274. /* Bypass if not same domain and area of adapter. */
  4275. if (area && domain &&
  4276. (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
  4277. continue;
  4278. /* Bypass invalid local loop ID. */
  4279. if (loop_id > LAST_LOCAL_LOOP_ID)
  4280. continue;
  4281. memset(new_fcport->port_name, 0, WWN_SIZE);
  4282. /* Fill in member data. */
  4283. new_fcport->d_id.b.domain = domain;
  4284. new_fcport->d_id.b.area = area;
  4285. new_fcport->d_id.b.al_pa = al_pa;
  4286. new_fcport->loop_id = loop_id;
  4287. new_fcport->scan_state = QLA_FCPORT_FOUND;
  4288. rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
  4289. if (rval2 != QLA_SUCCESS) {
  4290. ql_dbg(ql_dbg_disc, vha, 0x2097,
  4291. "Failed to retrieve fcport information "
  4292. "-- get_port_database=%x, loop_id=0x%04x.\n",
  4293. rval2, new_fcport->loop_id);
  4294. /* Skip retry if N2N */
  4295. if (ha->current_topology != ISP_CFG_N) {
  4296. ql_dbg(ql_dbg_disc, vha, 0x2105,
  4297. "Scheduling resync.\n");
  4298. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  4299. continue;
  4300. }
  4301. }
  4302. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  4303. /* Check for matching device in port list. */
  4304. found = 0;
  4305. fcport = NULL;
  4306. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  4307. if (memcmp(new_fcport->port_name, fcport->port_name,
  4308. WWN_SIZE))
  4309. continue;
  4310. fcport->flags &= ~FCF_FABRIC_DEVICE;
  4311. fcport->loop_id = new_fcport->loop_id;
  4312. fcport->port_type = new_fcport->port_type;
  4313. fcport->d_id.b24 = new_fcport->d_id.b24;
  4314. memcpy(fcport->node_name, new_fcport->node_name,
  4315. WWN_SIZE);
  4316. fcport->scan_state = QLA_FCPORT_FOUND;
  4317. found++;
  4318. break;
  4319. }
  4320. if (!found) {
  4321. /* New device, add to fcports list. */
  4322. list_add_tail(&new_fcport->list, &vha->vp_fcports);
  4323. /* Allocate a new replacement fcport. */
  4324. fcport = new_fcport;
  4325. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  4326. new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  4327. if (new_fcport == NULL) {
  4328. ql_log(ql_log_warn, vha, 0xd031,
  4329. "Failed to allocate memory for fcport.\n");
  4330. rval = QLA_MEMORY_ALLOC_FAILED;
  4331. goto cleanup_allocation;
  4332. }
  4333. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  4334. new_fcport->flags &= ~FCF_FABRIC_DEVICE;
  4335. }
  4336. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  4337. /* Base iIDMA settings on HBA port speed. */
  4338. fcport->fp_speed = ha->link_data_rate;
  4339. found_devs++;
  4340. }
  4341. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  4342. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  4343. break;
  4344. if (fcport->scan_state == QLA_FCPORT_SCAN) {
  4345. if ((qla_dual_mode_enabled(vha) ||
  4346. qla_ini_mode_enabled(vha)) &&
  4347. atomic_read(&fcport->state) == FCS_ONLINE) {
  4348. qla2x00_mark_device_lost(vha, fcport,
  4349. ql2xplogiabsentdevice, 0);
  4350. if (fcport->loop_id != FC_NO_LOOP_ID &&
  4351. (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
  4352. fcport->port_type != FCT_INITIATOR &&
  4353. fcport->port_type != FCT_BROADCAST) {
  4354. ql_dbg(ql_dbg_disc, vha, 0x20f0,
  4355. "%s %d %8phC post del sess\n",
  4356. __func__, __LINE__,
  4357. fcport->port_name);
  4358. qlt_schedule_sess_for_deletion(fcport);
  4359. continue;
  4360. }
  4361. }
  4362. }
  4363. if (fcport->scan_state == QLA_FCPORT_FOUND)
  4364. qla24xx_fcport_handle_login(vha, fcport);
  4365. }
  4366. cleanup_allocation:
  4367. kfree(new_fcport);
  4368. if (rval != QLA_SUCCESS) {
  4369. ql_dbg(ql_dbg_disc, vha, 0x2098,
  4370. "Configure local loop error exit: rval=%x.\n", rval);
  4371. }
  4372. return (rval);
  4373. }
  4374. static void
  4375. qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
  4376. {
  4377. int rval;
  4378. uint16_t mb[MAILBOX_REGISTER_COUNT];
  4379. struct qla_hw_data *ha = vha->hw;
  4380. if (!IS_IIDMA_CAPABLE(ha))
  4381. return;
  4382. if (atomic_read(&fcport->state) != FCS_ONLINE)
  4383. return;
  4384. if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
  4385. fcport->fp_speed > ha->link_data_rate ||
  4386. !ha->flags.gpsc_supported)
  4387. return;
  4388. rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
  4389. mb);
  4390. if (rval != QLA_SUCCESS) {
  4391. ql_dbg(ql_dbg_disc, vha, 0x2004,
  4392. "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
  4393. fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
  4394. } else {
  4395. ql_dbg(ql_dbg_disc, vha, 0x2005,
  4396. "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
  4397. qla2x00_get_link_speed_str(ha, fcport->fp_speed),
  4398. fcport->fp_speed, fcport->port_name);
  4399. }
  4400. }
  4401. void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
  4402. {
  4403. qla2x00_iidma_fcport(vha, fcport);
  4404. qla24xx_update_fcport_fcp_prio(vha, fcport);
  4405. }
  4406. int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
  4407. {
  4408. struct qla_work_evt *e;
  4409. e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
  4410. if (!e)
  4411. return QLA_FUNCTION_FAILED;
  4412. e->u.fcport.fcport = fcport;
  4413. return qla2x00_post_work(vha, e);
  4414. }
  4415. /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
  4416. static void
  4417. qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
  4418. {
  4419. struct fc_rport_identifiers rport_ids;
  4420. struct fc_rport *rport;
  4421. unsigned long flags;
  4422. if (atomic_read(&fcport->state) == FCS_ONLINE)
  4423. return;
  4424. rport_ids.node_name = wwn_to_u64(fcport->node_name);
  4425. rport_ids.port_name = wwn_to_u64(fcport->port_name);
  4426. rport_ids.port_id = fcport->d_id.b.domain << 16 |
  4427. fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
  4428. rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
  4429. fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
  4430. if (!rport) {
  4431. ql_log(ql_log_warn, vha, 0x2006,
  4432. "Unable to allocate fc remote port.\n");
  4433. return;
  4434. }
  4435. spin_lock_irqsave(fcport->vha->host->host_lock, flags);
  4436. *((fc_port_t **)rport->dd_data) = fcport;
  4437. spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
  4438. rport->supported_classes = fcport->supported_classes;
  4439. rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
  4440. if (fcport->port_type == FCT_INITIATOR)
  4441. rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
  4442. if (fcport->port_type == FCT_TARGET)
  4443. rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
  4444. ql_dbg(ql_dbg_disc, vha, 0x20ee,
  4445. "%s %8phN. rport %p is %s mode\n",
  4446. __func__, fcport->port_name, rport,
  4447. (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
  4448. fc_remote_port_rolechg(rport, rport_ids.roles);
  4449. }
  4450. /*
  4451. * qla2x00_update_fcport
  4452. * Updates device on list.
  4453. *
  4454. * Input:
  4455. * ha = adapter block pointer.
  4456. * fcport = port structure pointer.
  4457. *
  4458. * Return:
  4459. * 0 - Success
  4460. * BIT_0 - error
  4461. *
  4462. * Context:
  4463. * Kernel context.
  4464. */
  4465. void
  4466. qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
  4467. {
  4468. if (IS_SW_RESV_ADDR(fcport->d_id))
  4469. return;
  4470. ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
  4471. __func__, fcport->port_name);
  4472. fcport->disc_state = DSC_UPD_FCPORT;
  4473. fcport->login_retry = vha->hw->login_retry_count;
  4474. fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
  4475. fcport->deleted = 0;
  4476. fcport->logout_on_delete = 1;
  4477. fcport->login_retry = vha->hw->login_retry_count;
  4478. fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
  4479. qla2x00_iidma_fcport(vha, fcport);
  4480. switch (vha->hw->current_topology) {
  4481. case ISP_CFG_N:
  4482. case ISP_CFG_NL:
  4483. fcport->keep_nport_handle = 1;
  4484. break;
  4485. default:
  4486. break;
  4487. }
  4488. if (fcport->fc4f_nvme) {
  4489. qla_nvme_register_remote(vha, fcport);
  4490. fcport->disc_state = DSC_LOGIN_COMPLETE;
  4491. qla2x00_set_fcport_state(fcport, FCS_ONLINE);
  4492. return;
  4493. }
  4494. qla24xx_update_fcport_fcp_prio(vha, fcport);
  4495. switch (vha->host->active_mode) {
  4496. case MODE_INITIATOR:
  4497. qla2x00_reg_remote_port(vha, fcport);
  4498. break;
  4499. case MODE_TARGET:
  4500. if (!vha->vha_tgt.qla_tgt->tgt_stop &&
  4501. !vha->vha_tgt.qla_tgt->tgt_stopped)
  4502. qlt_fc_port_added(vha, fcport);
  4503. break;
  4504. case MODE_DUAL:
  4505. qla2x00_reg_remote_port(vha, fcport);
  4506. if (!vha->vha_tgt.qla_tgt->tgt_stop &&
  4507. !vha->vha_tgt.qla_tgt->tgt_stopped)
  4508. qlt_fc_port_added(vha, fcport);
  4509. break;
  4510. default:
  4511. break;
  4512. }
  4513. if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
  4514. if (fcport->id_changed) {
  4515. fcport->id_changed = 0;
  4516. ql_dbg(ql_dbg_disc, vha, 0x20d7,
  4517. "%s %d %8phC post gfpnid fcp_cnt %d\n",
  4518. __func__, __LINE__, fcport->port_name,
  4519. vha->fcport_count);
  4520. qla24xx_post_gfpnid_work(vha, fcport);
  4521. } else {
  4522. ql_dbg(ql_dbg_disc, vha, 0x20d7,
  4523. "%s %d %8phC post gpsc fcp_cnt %d\n",
  4524. __func__, __LINE__, fcport->port_name,
  4525. vha->fcport_count);
  4526. qla24xx_post_gpsc_work(vha, fcport);
  4527. }
  4528. }
  4529. qla2x00_set_fcport_state(fcport, FCS_ONLINE);
  4530. fcport->disc_state = DSC_LOGIN_COMPLETE;
  4531. }
  4532. void qla_register_fcport_fn(struct work_struct *work)
  4533. {
  4534. fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
  4535. u32 rscn_gen = fcport->rscn_gen;
  4536. u16 data[2];
  4537. if (IS_SW_RESV_ADDR(fcport->d_id))
  4538. return;
  4539. qla2x00_update_fcport(fcport->vha, fcport);
  4540. if (rscn_gen != fcport->rscn_gen) {
  4541. /* RSCN(s) came in while registration */
  4542. switch (fcport->next_disc_state) {
  4543. case DSC_DELETE_PEND:
  4544. qlt_schedule_sess_for_deletion(fcport);
  4545. break;
  4546. case DSC_ADISC:
  4547. data[0] = data[1] = 0;
  4548. qla2x00_post_async_adisc_work(fcport->vha, fcport,
  4549. data);
  4550. break;
  4551. default:
  4552. break;
  4553. }
  4554. }
  4555. }
  4556. /*
  4557. * qla2x00_configure_fabric
  4558. * Setup SNS devices with loop ID's.
  4559. *
  4560. * Input:
  4561. * ha = adapter block pointer.
  4562. *
  4563. * Returns:
  4564. * 0 = success.
  4565. * BIT_0 = error
  4566. */
  4567. static int
  4568. qla2x00_configure_fabric(scsi_qla_host_t *vha)
  4569. {
  4570. int rval;
  4571. fc_port_t *fcport;
  4572. uint16_t mb[MAILBOX_REGISTER_COUNT];
  4573. uint16_t loop_id;
  4574. LIST_HEAD(new_fcports);
  4575. struct qla_hw_data *ha = vha->hw;
  4576. int discovery_gen;
  4577. /* If FL port exists, then SNS is present */
  4578. if (IS_FWI2_CAPABLE(ha))
  4579. loop_id = NPH_F_PORT;
  4580. else
  4581. loop_id = SNS_FL_PORT;
  4582. rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
  4583. if (rval != QLA_SUCCESS) {
  4584. ql_dbg(ql_dbg_disc, vha, 0x20a0,
  4585. "MBX_GET_PORT_NAME failed, No FL Port.\n");
  4586. vha->device_flags &= ~SWITCH_FOUND;
  4587. return (QLA_SUCCESS);
  4588. }
  4589. vha->device_flags |= SWITCH_FOUND;
  4590. if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
  4591. rval = qla2x00_send_change_request(vha, 0x3, 0);
  4592. if (rval != QLA_SUCCESS)
  4593. ql_log(ql_log_warn, vha, 0x121,
  4594. "Failed to enable receiving of RSCN requests: 0x%x.\n",
  4595. rval);
  4596. }
  4597. do {
  4598. qla2x00_mgmt_svr_login(vha);
  4599. /* FDMI support. */
  4600. if (ql2xfdmienable &&
  4601. test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
  4602. qla2x00_fdmi_register(vha);
  4603. /* Ensure we are logged into the SNS. */
  4604. loop_id = NPH_SNS_LID(ha);
  4605. rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
  4606. 0xfc, mb, BIT_1|BIT_0);
  4607. if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
  4608. ql_dbg(ql_dbg_disc, vha, 0x20a1,
  4609. "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
  4610. loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
  4611. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  4612. return rval;
  4613. }
  4614. if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
  4615. if (qla2x00_rft_id(vha)) {
  4616. /* EMPTY */
  4617. ql_dbg(ql_dbg_disc, vha, 0x20a2,
  4618. "Register FC-4 TYPE failed.\n");
  4619. if (test_bit(LOOP_RESYNC_NEEDED,
  4620. &vha->dpc_flags))
  4621. break;
  4622. }
  4623. if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
  4624. /* EMPTY */
  4625. ql_dbg(ql_dbg_disc, vha, 0x209a,
  4626. "Register FC-4 Features failed.\n");
  4627. if (test_bit(LOOP_RESYNC_NEEDED,
  4628. &vha->dpc_flags))
  4629. break;
  4630. }
  4631. if (vha->flags.nvme_enabled) {
  4632. if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
  4633. ql_dbg(ql_dbg_disc, vha, 0x2049,
  4634. "Register NVME FC Type Features failed.\n");
  4635. }
  4636. }
  4637. if (qla2x00_rnn_id(vha)) {
  4638. /* EMPTY */
  4639. ql_dbg(ql_dbg_disc, vha, 0x2104,
  4640. "Register Node Name failed.\n");
  4641. if (test_bit(LOOP_RESYNC_NEEDED,
  4642. &vha->dpc_flags))
  4643. break;
  4644. } else if (qla2x00_rsnn_nn(vha)) {
  4645. /* EMPTY */
  4646. ql_dbg(ql_dbg_disc, vha, 0x209b,
  4647. "Register Symbolic Node Name failed.\n");
  4648. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  4649. break;
  4650. }
  4651. }
  4652. /* Mark the time right before querying FW for connected ports.
  4653. * This process is long, asynchronous and by the time it's done,
  4654. * collected information might not be accurate anymore. E.g.
  4655. * disconnected port might have re-connected and a brand new
  4656. * session has been created. In this case session's generation
  4657. * will be newer than discovery_gen. */
  4658. qlt_do_generation_tick(vha, &discovery_gen);
  4659. if (USE_ASYNC_SCAN(ha)) {
  4660. rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
  4661. NULL);
  4662. if (rval)
  4663. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  4664. } else {
  4665. list_for_each_entry(fcport, &vha->vp_fcports, list)
  4666. fcport->scan_state = QLA_FCPORT_SCAN;
  4667. rval = qla2x00_find_all_fabric_devs(vha);
  4668. }
  4669. if (rval != QLA_SUCCESS)
  4670. break;
  4671. } while (0);
  4672. if (!vha->nvme_local_port && vha->flags.nvme_enabled)
  4673. qla_nvme_register_hba(vha);
  4674. if (rval)
  4675. ql_dbg(ql_dbg_disc, vha, 0x2068,
  4676. "Configure fabric error exit rval=%d.\n", rval);
  4677. return (rval);
  4678. }
  4679. /*
  4680. * qla2x00_find_all_fabric_devs
  4681. *
  4682. * Input:
  4683. * ha = adapter block pointer.
  4684. * dev = database device entry pointer.
  4685. *
  4686. * Returns:
  4687. * 0 = success.
  4688. *
  4689. * Context:
  4690. * Kernel context.
  4691. */
  4692. static int
  4693. qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
  4694. {
  4695. int rval;
  4696. uint16_t loop_id;
  4697. fc_port_t *fcport, *new_fcport;
  4698. int found;
  4699. sw_info_t *swl;
  4700. int swl_idx;
  4701. int first_dev, last_dev;
  4702. port_id_t wrap = {}, nxt_d_id;
  4703. struct qla_hw_data *ha = vha->hw;
  4704. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  4705. unsigned long flags;
  4706. rval = QLA_SUCCESS;
  4707. /* Try GID_PT to get device list, else GAN. */
  4708. if (!ha->swl)
  4709. ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
  4710. GFP_KERNEL);
  4711. swl = ha->swl;
  4712. if (!swl) {
  4713. /*EMPTY*/
  4714. ql_dbg(ql_dbg_disc, vha, 0x209c,
  4715. "GID_PT allocations failed, fallback on GA_NXT.\n");
  4716. } else {
  4717. memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
  4718. if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
  4719. swl = NULL;
  4720. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  4721. return rval;
  4722. } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
  4723. swl = NULL;
  4724. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  4725. return rval;
  4726. } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
  4727. swl = NULL;
  4728. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  4729. return rval;
  4730. } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
  4731. swl = NULL;
  4732. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  4733. return rval;
  4734. }
  4735. /* If other queries succeeded probe for FC-4 type */
  4736. if (swl) {
  4737. qla2x00_gff_id(vha, swl);
  4738. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  4739. return rval;
  4740. }
  4741. }
  4742. swl_idx = 0;
  4743. /* Allocate temporary fcport for any new fcports discovered. */
  4744. new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  4745. if (new_fcport == NULL) {
  4746. ql_log(ql_log_warn, vha, 0x209d,
  4747. "Failed to allocate memory for fcport.\n");
  4748. return (QLA_MEMORY_ALLOC_FAILED);
  4749. }
  4750. new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
  4751. /* Set start port ID scan at adapter ID. */
  4752. first_dev = 1;
  4753. last_dev = 0;
  4754. /* Starting free loop ID. */
  4755. loop_id = ha->min_external_loopid;
  4756. for (; loop_id <= ha->max_loop_id; loop_id++) {
  4757. if (qla2x00_is_reserved_id(vha, loop_id))
  4758. continue;
  4759. if (ha->current_topology == ISP_CFG_FL &&
  4760. (atomic_read(&vha->loop_down_timer) ||
  4761. LOOP_TRANSITION(vha))) {
  4762. atomic_set(&vha->loop_down_timer, 0);
  4763. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  4764. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  4765. break;
  4766. }
  4767. if (swl != NULL) {
  4768. if (last_dev) {
  4769. wrap.b24 = new_fcport->d_id.b24;
  4770. } else {
  4771. new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
  4772. memcpy(new_fcport->node_name,
  4773. swl[swl_idx].node_name, WWN_SIZE);
  4774. memcpy(new_fcport->port_name,
  4775. swl[swl_idx].port_name, WWN_SIZE);
  4776. memcpy(new_fcport->fabric_port_name,
  4777. swl[swl_idx].fabric_port_name, WWN_SIZE);
  4778. new_fcport->fp_speed = swl[swl_idx].fp_speed;
  4779. new_fcport->fc4_type = swl[swl_idx].fc4_type;
  4780. new_fcport->nvme_flag = 0;
  4781. new_fcport->fc4f_nvme = 0;
  4782. if (vha->flags.nvme_enabled &&
  4783. swl[swl_idx].fc4f_nvme) {
  4784. new_fcport->fc4f_nvme =
  4785. swl[swl_idx].fc4f_nvme;
  4786. ql_log(ql_log_info, vha, 0x2131,
  4787. "FOUND: NVME port %8phC as FC Type 28h\n",
  4788. new_fcport->port_name);
  4789. }
  4790. if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
  4791. last_dev = 1;
  4792. }
  4793. swl_idx++;
  4794. }
  4795. } else {
  4796. /* Send GA_NXT to the switch */
  4797. rval = qla2x00_ga_nxt(vha, new_fcport);
  4798. if (rval != QLA_SUCCESS) {
  4799. ql_log(ql_log_warn, vha, 0x209e,
  4800. "SNS scan failed -- assuming "
  4801. "zero-entry result.\n");
  4802. rval = QLA_SUCCESS;
  4803. break;
  4804. }
  4805. }
  4806. /* If wrap on switch device list, exit. */
  4807. if (first_dev) {
  4808. wrap.b24 = new_fcport->d_id.b24;
  4809. first_dev = 0;
  4810. } else if (new_fcport->d_id.b24 == wrap.b24) {
  4811. ql_dbg(ql_dbg_disc, vha, 0x209f,
  4812. "Device wrap (%02x%02x%02x).\n",
  4813. new_fcport->d_id.b.domain,
  4814. new_fcport->d_id.b.area,
  4815. new_fcport->d_id.b.al_pa);
  4816. break;
  4817. }
  4818. /* Bypass if same physical adapter. */
  4819. if (new_fcport->d_id.b24 == base_vha->d_id.b24)
  4820. continue;
  4821. /* Bypass virtual ports of the same host. */
  4822. if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
  4823. continue;
  4824. /* Bypass if same domain and area of adapter. */
  4825. if (((new_fcport->d_id.b24 & 0xffff00) ==
  4826. (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
  4827. ISP_CFG_FL)
  4828. continue;
  4829. /* Bypass reserved domain fields. */
  4830. if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
  4831. continue;
  4832. /* Bypass ports whose FCP-4 type is not FCP_SCSI */
  4833. if (ql2xgffidenable &&
  4834. (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
  4835. new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
  4836. continue;
  4837. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  4838. /* Locate matching device in database. */
  4839. found = 0;
  4840. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  4841. if (memcmp(new_fcport->port_name, fcport->port_name,
  4842. WWN_SIZE))
  4843. continue;
  4844. fcport->scan_state = QLA_FCPORT_FOUND;
  4845. found++;
  4846. /* Update port state. */
  4847. memcpy(fcport->fabric_port_name,
  4848. new_fcport->fabric_port_name, WWN_SIZE);
  4849. fcport->fp_speed = new_fcport->fp_speed;
  4850. /*
  4851. * If address the same and state FCS_ONLINE
  4852. * (or in target mode), nothing changed.
  4853. */
  4854. if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
  4855. (atomic_read(&fcport->state) == FCS_ONLINE ||
  4856. (vha->host->active_mode == MODE_TARGET))) {
  4857. break;
  4858. }
  4859. /*
  4860. * If device was not a fabric device before.
  4861. */
  4862. if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
  4863. fcport->d_id.b24 = new_fcport->d_id.b24;
  4864. qla2x00_clear_loop_id(fcport);
  4865. fcport->flags |= (FCF_FABRIC_DEVICE |
  4866. FCF_LOGIN_NEEDED);
  4867. break;
  4868. }
  4869. /*
  4870. * Port ID changed or device was marked to be updated;
  4871. * Log it out if still logged in and mark it for
  4872. * relogin later.
  4873. */
  4874. if (qla_tgt_mode_enabled(base_vha)) {
  4875. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
  4876. "port changed FC ID, %8phC"
  4877. " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
  4878. fcport->port_name,
  4879. fcport->d_id.b.domain,
  4880. fcport->d_id.b.area,
  4881. fcport->d_id.b.al_pa,
  4882. fcport->loop_id,
  4883. new_fcport->d_id.b.domain,
  4884. new_fcport->d_id.b.area,
  4885. new_fcport->d_id.b.al_pa);
  4886. fcport->d_id.b24 = new_fcport->d_id.b24;
  4887. break;
  4888. }
  4889. fcport->d_id.b24 = new_fcport->d_id.b24;
  4890. fcport->flags |= FCF_LOGIN_NEEDED;
  4891. break;
  4892. }
  4893. if (fcport->fc4f_nvme) {
  4894. if (fcport->disc_state == DSC_DELETE_PEND) {
  4895. fcport->disc_state = DSC_GNL;
  4896. vha->fcport_count--;
  4897. fcport->login_succ = 0;
  4898. }
  4899. }
  4900. if (found) {
  4901. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  4902. continue;
  4903. }
  4904. /* If device was not in our fcports list, then add it. */
  4905. new_fcport->scan_state = QLA_FCPORT_FOUND;
  4906. list_add_tail(&new_fcport->list, &vha->vp_fcports);
  4907. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  4908. /* Allocate a new replacement fcport. */
  4909. nxt_d_id.b24 = new_fcport->d_id.b24;
  4910. new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  4911. if (new_fcport == NULL) {
  4912. ql_log(ql_log_warn, vha, 0xd032,
  4913. "Memory allocation failed for fcport.\n");
  4914. return (QLA_MEMORY_ALLOC_FAILED);
  4915. }
  4916. new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
  4917. new_fcport->d_id.b24 = nxt_d_id.b24;
  4918. }
  4919. qla2x00_free_fcport(new_fcport);
  4920. /*
  4921. * Logout all previous fabric dev marked lost, except FCP2 devices.
  4922. */
  4923. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  4924. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  4925. break;
  4926. if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
  4927. continue;
  4928. if (fcport->scan_state == QLA_FCPORT_SCAN) {
  4929. if ((qla_dual_mode_enabled(vha) ||
  4930. qla_ini_mode_enabled(vha)) &&
  4931. atomic_read(&fcport->state) == FCS_ONLINE) {
  4932. qla2x00_mark_device_lost(vha, fcport,
  4933. ql2xplogiabsentdevice, 0);
  4934. if (fcport->loop_id != FC_NO_LOOP_ID &&
  4935. (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
  4936. fcport->port_type != FCT_INITIATOR &&
  4937. fcport->port_type != FCT_BROADCAST) {
  4938. ql_dbg(ql_dbg_disc, vha, 0x20f0,
  4939. "%s %d %8phC post del sess\n",
  4940. __func__, __LINE__,
  4941. fcport->port_name);
  4942. qlt_schedule_sess_for_deletion(fcport);
  4943. continue;
  4944. }
  4945. }
  4946. }
  4947. if (fcport->scan_state == QLA_FCPORT_FOUND &&
  4948. (fcport->flags & FCF_LOGIN_NEEDED) != 0)
  4949. qla24xx_fcport_handle_login(vha, fcport);
  4950. }
  4951. return (rval);
  4952. }
  4953. /*
  4954. * qla2x00_find_new_loop_id
  4955. * Scan through our port list and find a new usable loop ID.
  4956. *
  4957. * Input:
  4958. * ha: adapter state pointer.
  4959. * dev: port structure pointer.
  4960. *
  4961. * Returns:
  4962. * qla2x00 local function return status code.
  4963. *
  4964. * Context:
  4965. * Kernel context.
  4966. */
  4967. int
  4968. qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
  4969. {
  4970. int rval;
  4971. struct qla_hw_data *ha = vha->hw;
  4972. unsigned long flags = 0;
  4973. rval = QLA_SUCCESS;
  4974. spin_lock_irqsave(&ha->vport_slock, flags);
  4975. dev->loop_id = find_first_zero_bit(ha->loop_id_map,
  4976. LOOPID_MAP_SIZE);
  4977. if (dev->loop_id >= LOOPID_MAP_SIZE ||
  4978. qla2x00_is_reserved_id(vha, dev->loop_id)) {
  4979. dev->loop_id = FC_NO_LOOP_ID;
  4980. rval = QLA_FUNCTION_FAILED;
  4981. } else
  4982. set_bit(dev->loop_id, ha->loop_id_map);
  4983. spin_unlock_irqrestore(&ha->vport_slock, flags);
  4984. if (rval == QLA_SUCCESS)
  4985. ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
  4986. "Assigning new loopid=%x, portid=%x.\n",
  4987. dev->loop_id, dev->d_id.b24);
  4988. else
  4989. ql_log(ql_log_warn, dev->vha, 0x2087,
  4990. "No loop_id's available, portid=%x.\n",
  4991. dev->d_id.b24);
  4992. return (rval);
  4993. }
  4994. /* FW does not set aside Loop id for MGMT Server/FFFFFAh */
  4995. int
  4996. qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
  4997. {
  4998. int loop_id = FC_NO_LOOP_ID;
  4999. int lid = NPH_MGMT_SERVER - vha->vp_idx;
  5000. unsigned long flags;
  5001. struct qla_hw_data *ha = vha->hw;
  5002. if (vha->vp_idx == 0) {
  5003. set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
  5004. return NPH_MGMT_SERVER;
  5005. }
  5006. /* pick id from high and work down to low */
  5007. spin_lock_irqsave(&ha->vport_slock, flags);
  5008. for (; lid > 0; lid--) {
  5009. if (!test_bit(lid, vha->hw->loop_id_map)) {
  5010. set_bit(lid, vha->hw->loop_id_map);
  5011. loop_id = lid;
  5012. break;
  5013. }
  5014. }
  5015. spin_unlock_irqrestore(&ha->vport_slock, flags);
  5016. return loop_id;
  5017. }
  5018. /*
  5019. * qla2x00_fabric_login
  5020. * Issue fabric login command.
  5021. *
  5022. * Input:
  5023. * ha = adapter block pointer.
  5024. * device = pointer to FC device type structure.
  5025. *
  5026. * Returns:
  5027. * 0 - Login successfully
  5028. * 1 - Login failed
  5029. * 2 - Initiator device
  5030. * 3 - Fatal error
  5031. */
  5032. int
  5033. qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
  5034. uint16_t *next_loopid)
  5035. {
  5036. int rval;
  5037. int retry;
  5038. uint16_t tmp_loopid;
  5039. uint16_t mb[MAILBOX_REGISTER_COUNT];
  5040. struct qla_hw_data *ha = vha->hw;
  5041. retry = 0;
  5042. tmp_loopid = 0;
  5043. for (;;) {
  5044. ql_dbg(ql_dbg_disc, vha, 0x2000,
  5045. "Trying Fabric Login w/loop id 0x%04x for port "
  5046. "%02x%02x%02x.\n",
  5047. fcport->loop_id, fcport->d_id.b.domain,
  5048. fcport->d_id.b.area, fcport->d_id.b.al_pa);
  5049. /* Login fcport on switch. */
  5050. rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
  5051. fcport->d_id.b.domain, fcport->d_id.b.area,
  5052. fcport->d_id.b.al_pa, mb, BIT_0);
  5053. if (rval != QLA_SUCCESS) {
  5054. return rval;
  5055. }
  5056. if (mb[0] == MBS_PORT_ID_USED) {
  5057. /*
  5058. * Device has another loop ID. The firmware team
  5059. * recommends the driver perform an implicit login with
  5060. * the specified ID again. The ID we just used is save
  5061. * here so we return with an ID that can be tried by
  5062. * the next login.
  5063. */
  5064. retry++;
  5065. tmp_loopid = fcport->loop_id;
  5066. fcport->loop_id = mb[1];
  5067. ql_dbg(ql_dbg_disc, vha, 0x2001,
  5068. "Fabric Login: port in use - next loop "
  5069. "id=0x%04x, port id= %02x%02x%02x.\n",
  5070. fcport->loop_id, fcport->d_id.b.domain,
  5071. fcport->d_id.b.area, fcport->d_id.b.al_pa);
  5072. } else if (mb[0] == MBS_COMMAND_COMPLETE) {
  5073. /*
  5074. * Login succeeded.
  5075. */
  5076. if (retry) {
  5077. /* A retry occurred before. */
  5078. *next_loopid = tmp_loopid;
  5079. } else {
  5080. /*
  5081. * No retry occurred before. Just increment the
  5082. * ID value for next login.
  5083. */
  5084. *next_loopid = (fcport->loop_id + 1);
  5085. }
  5086. if (mb[1] & BIT_0) {
  5087. fcport->port_type = FCT_INITIATOR;
  5088. } else {
  5089. fcport->port_type = FCT_TARGET;
  5090. if (mb[1] & BIT_1) {
  5091. fcport->flags |= FCF_FCP2_DEVICE;
  5092. }
  5093. }
  5094. if (mb[10] & BIT_0)
  5095. fcport->supported_classes |= FC_COS_CLASS2;
  5096. if (mb[10] & BIT_1)
  5097. fcport->supported_classes |= FC_COS_CLASS3;
  5098. if (IS_FWI2_CAPABLE(ha)) {
  5099. if (mb[10] & BIT_7)
  5100. fcport->flags |=
  5101. FCF_CONF_COMP_SUPPORTED;
  5102. }
  5103. rval = QLA_SUCCESS;
  5104. break;
  5105. } else if (mb[0] == MBS_LOOP_ID_USED) {
  5106. /*
  5107. * Loop ID already used, try next loop ID.
  5108. */
  5109. fcport->loop_id++;
  5110. rval = qla2x00_find_new_loop_id(vha, fcport);
  5111. if (rval != QLA_SUCCESS) {
  5112. /* Ran out of loop IDs to use */
  5113. break;
  5114. }
  5115. } else if (mb[0] == MBS_COMMAND_ERROR) {
  5116. /*
  5117. * Firmware possibly timed out during login. If NO
  5118. * retries are left to do then the device is declared
  5119. * dead.
  5120. */
  5121. *next_loopid = fcport->loop_id;
  5122. ha->isp_ops->fabric_logout(vha, fcport->loop_id,
  5123. fcport->d_id.b.domain, fcport->d_id.b.area,
  5124. fcport->d_id.b.al_pa);
  5125. qla2x00_mark_device_lost(vha, fcport, 1, 0);
  5126. rval = 1;
  5127. break;
  5128. } else {
  5129. /*
  5130. * unrecoverable / not handled error
  5131. */
  5132. ql_dbg(ql_dbg_disc, vha, 0x2002,
  5133. "Failed=%x port_id=%02x%02x%02x loop_id=%x "
  5134. "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
  5135. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  5136. fcport->loop_id, jiffies);
  5137. *next_loopid = fcport->loop_id;
  5138. ha->isp_ops->fabric_logout(vha, fcport->loop_id,
  5139. fcport->d_id.b.domain, fcport->d_id.b.area,
  5140. fcport->d_id.b.al_pa);
  5141. qla2x00_clear_loop_id(fcport);
  5142. fcport->login_retry = 0;
  5143. rval = 3;
  5144. break;
  5145. }
  5146. }
  5147. return (rval);
  5148. }
  5149. /*
  5150. * qla2x00_local_device_login
  5151. * Issue local device login command.
  5152. *
  5153. * Input:
  5154. * ha = adapter block pointer.
  5155. * loop_id = loop id of device to login to.
  5156. *
  5157. * Returns (Where's the #define!!!!):
  5158. * 0 - Login successfully
  5159. * 1 - Login failed
  5160. * 3 - Fatal error
  5161. */
  5162. int
  5163. qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
  5164. {
  5165. int rval;
  5166. uint16_t mb[MAILBOX_REGISTER_COUNT];
  5167. memset(mb, 0, sizeof(mb));
  5168. rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
  5169. if (rval == QLA_SUCCESS) {
  5170. /* Interrogate mailbox registers for any errors */
  5171. if (mb[0] == MBS_COMMAND_ERROR)
  5172. rval = 1;
  5173. else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
  5174. /* device not in PCB table */
  5175. rval = 3;
  5176. }
  5177. return (rval);
  5178. }
  5179. /*
  5180. * qla2x00_loop_resync
  5181. * Resync with fibre channel devices.
  5182. *
  5183. * Input:
  5184. * ha = adapter block pointer.
  5185. *
  5186. * Returns:
  5187. * 0 = success
  5188. */
  5189. int
  5190. qla2x00_loop_resync(scsi_qla_host_t *vha)
  5191. {
  5192. int rval = QLA_SUCCESS;
  5193. uint32_t wait_time;
  5194. struct req_que *req;
  5195. struct rsp_que *rsp;
  5196. req = vha->req;
  5197. rsp = req->rsp;
  5198. clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
  5199. if (vha->flags.online) {
  5200. if (!(rval = qla2x00_fw_ready(vha))) {
  5201. /* Wait at most MAX_TARGET RSCNs for a stable link. */
  5202. wait_time = 256;
  5203. do {
  5204. if (!IS_QLAFX00(vha->hw)) {
  5205. /*
  5206. * Issue a marker after FW becomes
  5207. * ready.
  5208. */
  5209. qla2x00_marker(vha, req, rsp, 0, 0,
  5210. MK_SYNC_ALL);
  5211. vha->marker_needed = 0;
  5212. }
  5213. /* Remap devices on Loop. */
  5214. clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  5215. if (IS_QLAFX00(vha->hw))
  5216. qlafx00_configure_devices(vha);
  5217. else
  5218. qla2x00_configure_loop(vha);
  5219. wait_time--;
  5220. } while (!atomic_read(&vha->loop_down_timer) &&
  5221. !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
  5222. && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
  5223. &vha->dpc_flags)));
  5224. }
  5225. }
  5226. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
  5227. return (QLA_FUNCTION_FAILED);
  5228. if (rval)
  5229. ql_dbg(ql_dbg_disc, vha, 0x206c,
  5230. "%s *** FAILED ***.\n", __func__);
  5231. return (rval);
  5232. }
  5233. /*
  5234. * qla2x00_perform_loop_resync
  5235. * Description: This function will set the appropriate flags and call
  5236. * qla2x00_loop_resync. If successful loop will be resynced
  5237. * Arguments : scsi_qla_host_t pointer
  5238. * returm : Success or Failure
  5239. */
  5240. int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
  5241. {
  5242. int32_t rval = 0;
  5243. if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
  5244. /*Configure the flags so that resync happens properly*/
  5245. atomic_set(&ha->loop_down_timer, 0);
  5246. if (!(ha->device_flags & DFLG_NO_CABLE)) {
  5247. atomic_set(&ha->loop_state, LOOP_UP);
  5248. set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
  5249. set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
  5250. set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
  5251. rval = qla2x00_loop_resync(ha);
  5252. } else
  5253. atomic_set(&ha->loop_state, LOOP_DEAD);
  5254. clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
  5255. }
  5256. return rval;
  5257. }
  5258. void
  5259. qla2x00_update_fcports(scsi_qla_host_t *base_vha)
  5260. {
  5261. fc_port_t *fcport;
  5262. struct scsi_qla_host *vha;
  5263. struct qla_hw_data *ha = base_vha->hw;
  5264. unsigned long flags;
  5265. spin_lock_irqsave(&ha->vport_slock, flags);
  5266. /* Go with deferred removal of rport references. */
  5267. list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
  5268. atomic_inc(&vha->vref_count);
  5269. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  5270. if (fcport->drport &&
  5271. atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
  5272. spin_unlock_irqrestore(&ha->vport_slock, flags);
  5273. qla2x00_rport_del(fcport);
  5274. spin_lock_irqsave(&ha->vport_slock, flags);
  5275. }
  5276. }
  5277. atomic_dec(&vha->vref_count);
  5278. wake_up(&vha->vref_waitq);
  5279. }
  5280. spin_unlock_irqrestore(&ha->vport_slock, flags);
  5281. }
  5282. /* Assumes idc_lock always held on entry */
  5283. void
  5284. qla83xx_reset_ownership(scsi_qla_host_t *vha)
  5285. {
  5286. struct qla_hw_data *ha = vha->hw;
  5287. uint32_t drv_presence, drv_presence_mask;
  5288. uint32_t dev_part_info1, dev_part_info2, class_type;
  5289. uint32_t class_type_mask = 0x3;
  5290. uint16_t fcoe_other_function = 0xffff, i;
  5291. if (IS_QLA8044(ha)) {
  5292. drv_presence = qla8044_rd_direct(vha,
  5293. QLA8044_CRB_DRV_ACTIVE_INDEX);
  5294. dev_part_info1 = qla8044_rd_direct(vha,
  5295. QLA8044_CRB_DEV_PART_INFO_INDEX);
  5296. dev_part_info2 = qla8044_rd_direct(vha,
  5297. QLA8044_CRB_DEV_PART_INFO2);
  5298. } else {
  5299. qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
  5300. qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
  5301. qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
  5302. }
  5303. for (i = 0; i < 8; i++) {
  5304. class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
  5305. if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
  5306. (i != ha->portnum)) {
  5307. fcoe_other_function = i;
  5308. break;
  5309. }
  5310. }
  5311. if (fcoe_other_function == 0xffff) {
  5312. for (i = 0; i < 8; i++) {
  5313. class_type = ((dev_part_info2 >> (i * 4)) &
  5314. class_type_mask);
  5315. if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
  5316. ((i + 8) != ha->portnum)) {
  5317. fcoe_other_function = i + 8;
  5318. break;
  5319. }
  5320. }
  5321. }
  5322. /*
  5323. * Prepare drv-presence mask based on fcoe functions present.
  5324. * However consider only valid physical fcoe function numbers (0-15).
  5325. */
  5326. drv_presence_mask = ~((1 << (ha->portnum)) |
  5327. ((fcoe_other_function == 0xffff) ?
  5328. 0 : (1 << (fcoe_other_function))));
  5329. /* We are the reset owner iff:
  5330. * - No other protocol drivers present.
  5331. * - This is the lowest among fcoe functions. */
  5332. if (!(drv_presence & drv_presence_mask) &&
  5333. (ha->portnum < fcoe_other_function)) {
  5334. ql_dbg(ql_dbg_p3p, vha, 0xb07f,
  5335. "This host is Reset owner.\n");
  5336. ha->flags.nic_core_reset_owner = 1;
  5337. }
  5338. }
  5339. static int
  5340. __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
  5341. {
  5342. int rval = QLA_SUCCESS;
  5343. struct qla_hw_data *ha = vha->hw;
  5344. uint32_t drv_ack;
  5345. rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
  5346. if (rval == QLA_SUCCESS) {
  5347. drv_ack |= (1 << ha->portnum);
  5348. rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
  5349. }
  5350. return rval;
  5351. }
  5352. static int
  5353. __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
  5354. {
  5355. int rval = QLA_SUCCESS;
  5356. struct qla_hw_data *ha = vha->hw;
  5357. uint32_t drv_ack;
  5358. rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
  5359. if (rval == QLA_SUCCESS) {
  5360. drv_ack &= ~(1 << ha->portnum);
  5361. rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
  5362. }
  5363. return rval;
  5364. }
  5365. static const char *
  5366. qla83xx_dev_state_to_string(uint32_t dev_state)
  5367. {
  5368. switch (dev_state) {
  5369. case QLA8XXX_DEV_COLD:
  5370. return "COLD/RE-INIT";
  5371. case QLA8XXX_DEV_INITIALIZING:
  5372. return "INITIALIZING";
  5373. case QLA8XXX_DEV_READY:
  5374. return "READY";
  5375. case QLA8XXX_DEV_NEED_RESET:
  5376. return "NEED RESET";
  5377. case QLA8XXX_DEV_NEED_QUIESCENT:
  5378. return "NEED QUIESCENT";
  5379. case QLA8XXX_DEV_FAILED:
  5380. return "FAILED";
  5381. case QLA8XXX_DEV_QUIESCENT:
  5382. return "QUIESCENT";
  5383. default:
  5384. return "Unknown";
  5385. }
  5386. }
  5387. /* Assumes idc-lock always held on entry */
  5388. void
  5389. qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
  5390. {
  5391. struct qla_hw_data *ha = vha->hw;
  5392. uint32_t idc_audit_reg = 0, duration_secs = 0;
  5393. switch (audit_type) {
  5394. case IDC_AUDIT_TIMESTAMP:
  5395. ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
  5396. idc_audit_reg = (ha->portnum) |
  5397. (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
  5398. qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
  5399. break;
  5400. case IDC_AUDIT_COMPLETION:
  5401. duration_secs = ((jiffies_to_msecs(jiffies) -
  5402. jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
  5403. idc_audit_reg = (ha->portnum) |
  5404. (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
  5405. qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
  5406. break;
  5407. default:
  5408. ql_log(ql_log_warn, vha, 0xb078,
  5409. "Invalid audit type specified.\n");
  5410. break;
  5411. }
  5412. }
  5413. /* Assumes idc_lock always held on entry */
  5414. static int
  5415. qla83xx_initiating_reset(scsi_qla_host_t *vha)
  5416. {
  5417. struct qla_hw_data *ha = vha->hw;
  5418. uint32_t idc_control, dev_state;
  5419. __qla83xx_get_idc_control(vha, &idc_control);
  5420. if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
  5421. ql_log(ql_log_info, vha, 0xb080,
  5422. "NIC Core reset has been disabled. idc-control=0x%x\n",
  5423. idc_control);
  5424. return QLA_FUNCTION_FAILED;
  5425. }
  5426. /* Set NEED-RESET iff in READY state and we are the reset-owner */
  5427. qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
  5428. if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
  5429. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
  5430. QLA8XXX_DEV_NEED_RESET);
  5431. ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
  5432. qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
  5433. } else {
  5434. const char *state = qla83xx_dev_state_to_string(dev_state);
  5435. ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
  5436. /* SV: XXX: Is timeout required here? */
  5437. /* Wait for IDC state change READY -> NEED_RESET */
  5438. while (dev_state == QLA8XXX_DEV_READY) {
  5439. qla83xx_idc_unlock(vha, 0);
  5440. msleep(200);
  5441. qla83xx_idc_lock(vha, 0);
  5442. qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
  5443. }
  5444. }
  5445. /* Send IDC ack by writing to drv-ack register */
  5446. __qla83xx_set_drv_ack(vha);
  5447. return QLA_SUCCESS;
  5448. }
  5449. int
  5450. __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
  5451. {
  5452. return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
  5453. }
  5454. int
  5455. __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
  5456. {
  5457. return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
  5458. }
  5459. static int
  5460. qla83xx_check_driver_presence(scsi_qla_host_t *vha)
  5461. {
  5462. uint32_t drv_presence = 0;
  5463. struct qla_hw_data *ha = vha->hw;
  5464. qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
  5465. if (drv_presence & (1 << ha->portnum))
  5466. return QLA_SUCCESS;
  5467. else
  5468. return QLA_TEST_FAILED;
  5469. }
  5470. int
  5471. qla83xx_nic_core_reset(scsi_qla_host_t *vha)
  5472. {
  5473. int rval = QLA_SUCCESS;
  5474. struct qla_hw_data *ha = vha->hw;
  5475. ql_dbg(ql_dbg_p3p, vha, 0xb058,
  5476. "Entered %s().\n", __func__);
  5477. if (vha->device_flags & DFLG_DEV_FAILED) {
  5478. ql_log(ql_log_warn, vha, 0xb059,
  5479. "Device in unrecoverable FAILED state.\n");
  5480. return QLA_FUNCTION_FAILED;
  5481. }
  5482. qla83xx_idc_lock(vha, 0);
  5483. if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
  5484. ql_log(ql_log_warn, vha, 0xb05a,
  5485. "Function=0x%x has been removed from IDC participation.\n",
  5486. ha->portnum);
  5487. rval = QLA_FUNCTION_FAILED;
  5488. goto exit;
  5489. }
  5490. qla83xx_reset_ownership(vha);
  5491. rval = qla83xx_initiating_reset(vha);
  5492. /*
  5493. * Perform reset if we are the reset-owner,
  5494. * else wait till IDC state changes to READY/FAILED.
  5495. */
  5496. if (rval == QLA_SUCCESS) {
  5497. rval = qla83xx_idc_state_handler(vha);
  5498. if (rval == QLA_SUCCESS)
  5499. ha->flags.nic_core_hung = 0;
  5500. __qla83xx_clear_drv_ack(vha);
  5501. }
  5502. exit:
  5503. qla83xx_idc_unlock(vha, 0);
  5504. ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
  5505. return rval;
  5506. }
  5507. int
  5508. qla2xxx_mctp_dump(scsi_qla_host_t *vha)
  5509. {
  5510. struct qla_hw_data *ha = vha->hw;
  5511. int rval = QLA_FUNCTION_FAILED;
  5512. if (!IS_MCTP_CAPABLE(ha)) {
  5513. /* This message can be removed from the final version */
  5514. ql_log(ql_log_info, vha, 0x506d,
  5515. "This board is not MCTP capable\n");
  5516. return rval;
  5517. }
  5518. if (!ha->mctp_dump) {
  5519. ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
  5520. MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
  5521. if (!ha->mctp_dump) {
  5522. ql_log(ql_log_warn, vha, 0x506e,
  5523. "Failed to allocate memory for mctp dump\n");
  5524. return rval;
  5525. }
  5526. }
  5527. #define MCTP_DUMP_STR_ADDR 0x00000000
  5528. rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
  5529. MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
  5530. if (rval != QLA_SUCCESS) {
  5531. ql_log(ql_log_warn, vha, 0x506f,
  5532. "Failed to capture mctp dump\n");
  5533. } else {
  5534. ql_log(ql_log_info, vha, 0x5070,
  5535. "Mctp dump capture for host (%ld/%p).\n",
  5536. vha->host_no, ha->mctp_dump);
  5537. ha->mctp_dumped = 1;
  5538. }
  5539. if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
  5540. ha->flags.nic_core_reset_hdlr_active = 1;
  5541. rval = qla83xx_restart_nic_firmware(vha);
  5542. if (rval)
  5543. /* NIC Core reset failed. */
  5544. ql_log(ql_log_warn, vha, 0x5071,
  5545. "Failed to restart nic firmware\n");
  5546. else
  5547. ql_dbg(ql_dbg_p3p, vha, 0xb084,
  5548. "Restarted NIC firmware successfully.\n");
  5549. ha->flags.nic_core_reset_hdlr_active = 0;
  5550. }
  5551. return rval;
  5552. }
  5553. /*
  5554. * qla2x00_quiesce_io
  5555. * Description: This function will block the new I/Os
  5556. * Its not aborting any I/Os as context
  5557. * is not destroyed during quiescence
  5558. * Arguments: scsi_qla_host_t
  5559. * return : void
  5560. */
  5561. void
  5562. qla2x00_quiesce_io(scsi_qla_host_t *vha)
  5563. {
  5564. struct qla_hw_data *ha = vha->hw;
  5565. struct scsi_qla_host *vp;
  5566. ql_dbg(ql_dbg_dpc, vha, 0x401d,
  5567. "Quiescing I/O - ha=%p.\n", ha);
  5568. atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
  5569. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  5570. atomic_set(&vha->loop_state, LOOP_DOWN);
  5571. qla2x00_mark_all_devices_lost(vha, 0);
  5572. list_for_each_entry(vp, &ha->vp_list, list)
  5573. qla2x00_mark_all_devices_lost(vp, 0);
  5574. } else {
  5575. if (!atomic_read(&vha->loop_down_timer))
  5576. atomic_set(&vha->loop_down_timer,
  5577. LOOP_DOWN_TIME);
  5578. }
  5579. /* Wait for pending cmds to complete */
  5580. qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
  5581. }
  5582. void
  5583. qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
  5584. {
  5585. struct qla_hw_data *ha = vha->hw;
  5586. struct scsi_qla_host *vp;
  5587. unsigned long flags;
  5588. fc_port_t *fcport;
  5589. u16 i;
  5590. /* For ISP82XX, driver waits for completion of the commands.
  5591. * online flag should be set.
  5592. */
  5593. if (!(IS_P3P_TYPE(ha)))
  5594. vha->flags.online = 0;
  5595. ha->flags.chip_reset_done = 0;
  5596. clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  5597. vha->qla_stats.total_isp_aborts++;
  5598. ql_log(ql_log_info, vha, 0x00af,
  5599. "Performing ISP error recovery - ha=%p.\n", ha);
  5600. ha->flags.purge_mbox = 1;
  5601. /* For ISP82XX, reset_chip is just disabling interrupts.
  5602. * Driver waits for the completion of the commands.
  5603. * the interrupts need to be enabled.
  5604. */
  5605. if (!(IS_P3P_TYPE(ha)))
  5606. ha->isp_ops->reset_chip(vha);
  5607. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  5608. SAVE_TOPO(ha);
  5609. ha->flags.rida_fmt2 = 0;
  5610. ha->flags.n2n_ae = 0;
  5611. ha->flags.lip_ae = 0;
  5612. ha->current_topology = 0;
  5613. ha->flags.fw_started = 0;
  5614. ha->flags.fw_init_done = 0;
  5615. ha->chip_reset++;
  5616. ha->base_qpair->chip_reset = ha->chip_reset;
  5617. for (i = 0; i < ha->max_qpairs; i++) {
  5618. if (ha->queue_pair_map[i])
  5619. ha->queue_pair_map[i]->chip_reset =
  5620. ha->base_qpair->chip_reset;
  5621. }
  5622. /* purge MBox commands */
  5623. if (atomic_read(&ha->num_pend_mbx_stage3)) {
  5624. clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
  5625. complete(&ha->mbx_intr_comp);
  5626. }
  5627. i = 0;
  5628. while (atomic_read(&ha->num_pend_mbx_stage3) ||
  5629. atomic_read(&ha->num_pend_mbx_stage2) ||
  5630. atomic_read(&ha->num_pend_mbx_stage1)) {
  5631. msleep(20);
  5632. i++;
  5633. if (i > 50)
  5634. break;
  5635. }
  5636. ha->flags.purge_mbox = 0;
  5637. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  5638. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  5639. atomic_set(&vha->loop_state, LOOP_DOWN);
  5640. qla2x00_mark_all_devices_lost(vha, 0);
  5641. spin_lock_irqsave(&ha->vport_slock, flags);
  5642. list_for_each_entry(vp, &ha->vp_list, list) {
  5643. atomic_inc(&vp->vref_count);
  5644. spin_unlock_irqrestore(&ha->vport_slock, flags);
  5645. qla2x00_mark_all_devices_lost(vp, 0);
  5646. spin_lock_irqsave(&ha->vport_slock, flags);
  5647. atomic_dec(&vp->vref_count);
  5648. }
  5649. spin_unlock_irqrestore(&ha->vport_slock, flags);
  5650. } else {
  5651. if (!atomic_read(&vha->loop_down_timer))
  5652. atomic_set(&vha->loop_down_timer,
  5653. LOOP_DOWN_TIME);
  5654. }
  5655. /* Clear all async request states across all VPs. */
  5656. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  5657. fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
  5658. fcport->scan_state = 0;
  5659. }
  5660. spin_lock_irqsave(&ha->vport_slock, flags);
  5661. list_for_each_entry(vp, &ha->vp_list, list) {
  5662. atomic_inc(&vp->vref_count);
  5663. spin_unlock_irqrestore(&ha->vport_slock, flags);
  5664. list_for_each_entry(fcport, &vp->vp_fcports, list)
  5665. fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
  5666. spin_lock_irqsave(&ha->vport_slock, flags);
  5667. atomic_dec(&vp->vref_count);
  5668. }
  5669. spin_unlock_irqrestore(&ha->vport_slock, flags);
  5670. if (!ha->flags.eeh_busy) {
  5671. /* Make sure for ISP 82XX IO DMA is complete */
  5672. if (IS_P3P_TYPE(ha)) {
  5673. qla82xx_chip_reset_cleanup(vha);
  5674. ql_log(ql_log_info, vha, 0x00b4,
  5675. "Done chip reset cleanup.\n");
  5676. /* Done waiting for pending commands.
  5677. * Reset the online flag.
  5678. */
  5679. vha->flags.online = 0;
  5680. }
  5681. /* Requeue all commands in outstanding command list. */
  5682. qla2x00_abort_all_cmds(vha, DID_RESET << 16);
  5683. }
  5684. /* memory barrier */
  5685. wmb();
  5686. }
  5687. /*
  5688. * qla2x00_abort_isp
  5689. * Resets ISP and aborts all outstanding commands.
  5690. *
  5691. * Input:
  5692. * ha = adapter block pointer.
  5693. *
  5694. * Returns:
  5695. * 0 = success
  5696. */
  5697. int
  5698. qla2x00_abort_isp(scsi_qla_host_t *vha)
  5699. {
  5700. int rval;
  5701. uint8_t status = 0;
  5702. struct qla_hw_data *ha = vha->hw;
  5703. struct scsi_qla_host *vp;
  5704. struct req_que *req = ha->req_q_map[0];
  5705. unsigned long flags;
  5706. if (vha->flags.online) {
  5707. qla2x00_abort_isp_cleanup(vha);
  5708. if (IS_QLA8031(ha)) {
  5709. ql_dbg(ql_dbg_p3p, vha, 0xb05c,
  5710. "Clearing fcoe driver presence.\n");
  5711. if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
  5712. ql_dbg(ql_dbg_p3p, vha, 0xb073,
  5713. "Error while clearing DRV-Presence.\n");
  5714. }
  5715. if (unlikely(pci_channel_offline(ha->pdev) &&
  5716. ha->flags.pci_channel_io_perm_failure)) {
  5717. clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
  5718. status = 0;
  5719. return status;
  5720. }
  5721. ha->isp_ops->get_flash_version(vha, req->ring);
  5722. ha->isp_ops->nvram_config(vha);
  5723. if (!qla2x00_restart_isp(vha)) {
  5724. clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  5725. if (!atomic_read(&vha->loop_down_timer)) {
  5726. /*
  5727. * Issue marker command only when we are going
  5728. * to start the I/O .
  5729. */
  5730. vha->marker_needed = 1;
  5731. }
  5732. vha->flags.online = 1;
  5733. ha->isp_ops->enable_intrs(ha);
  5734. ha->isp_abort_cnt = 0;
  5735. clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
  5736. if (IS_QLA81XX(ha) || IS_QLA8031(ha))
  5737. qla2x00_get_fw_version(vha);
  5738. if (ha->fce) {
  5739. ha->flags.fce_enabled = 1;
  5740. memset(ha->fce, 0,
  5741. fce_calc_size(ha->fce_bufs));
  5742. rval = qla2x00_enable_fce_trace(vha,
  5743. ha->fce_dma, ha->fce_bufs, ha->fce_mb,
  5744. &ha->fce_bufs);
  5745. if (rval) {
  5746. ql_log(ql_log_warn, vha, 0x8033,
  5747. "Unable to reinitialize FCE "
  5748. "(%d).\n", rval);
  5749. ha->flags.fce_enabled = 0;
  5750. }
  5751. }
  5752. if (ha->eft) {
  5753. memset(ha->eft, 0, EFT_SIZE);
  5754. rval = qla2x00_enable_eft_trace(vha,
  5755. ha->eft_dma, EFT_NUM_BUFFERS);
  5756. if (rval) {
  5757. ql_log(ql_log_warn, vha, 0x8034,
  5758. "Unable to reinitialize EFT "
  5759. "(%d).\n", rval);
  5760. }
  5761. }
  5762. } else { /* failed the ISP abort */
  5763. vha->flags.online = 1;
  5764. if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  5765. if (ha->isp_abort_cnt == 0) {
  5766. ql_log(ql_log_fatal, vha, 0x8035,
  5767. "ISP error recover failed - "
  5768. "board disabled.\n");
  5769. /*
  5770. * The next call disables the board
  5771. * completely.
  5772. */
  5773. qla2x00_abort_isp_cleanup(vha);
  5774. vha->flags.online = 0;
  5775. clear_bit(ISP_ABORT_RETRY,
  5776. &vha->dpc_flags);
  5777. status = 0;
  5778. } else { /* schedule another ISP abort */
  5779. ha->isp_abort_cnt--;
  5780. ql_dbg(ql_dbg_taskm, vha, 0x8020,
  5781. "ISP abort - retry remaining %d.\n",
  5782. ha->isp_abort_cnt);
  5783. status = 1;
  5784. }
  5785. } else {
  5786. ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
  5787. ql_dbg(ql_dbg_taskm, vha, 0x8021,
  5788. "ISP error recovery - retrying (%d) "
  5789. "more times.\n", ha->isp_abort_cnt);
  5790. set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
  5791. status = 1;
  5792. }
  5793. }
  5794. }
  5795. if (!status) {
  5796. ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
  5797. qla2x00_configure_hba(vha);
  5798. spin_lock_irqsave(&ha->vport_slock, flags);
  5799. list_for_each_entry(vp, &ha->vp_list, list) {
  5800. if (vp->vp_idx) {
  5801. atomic_inc(&vp->vref_count);
  5802. spin_unlock_irqrestore(&ha->vport_slock, flags);
  5803. qla2x00_vp_abort_isp(vp);
  5804. spin_lock_irqsave(&ha->vport_slock, flags);
  5805. atomic_dec(&vp->vref_count);
  5806. }
  5807. }
  5808. spin_unlock_irqrestore(&ha->vport_slock, flags);
  5809. if (IS_QLA8031(ha)) {
  5810. ql_dbg(ql_dbg_p3p, vha, 0xb05d,
  5811. "Setting back fcoe driver presence.\n");
  5812. if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
  5813. ql_dbg(ql_dbg_p3p, vha, 0xb074,
  5814. "Error while setting DRV-Presence.\n");
  5815. }
  5816. } else {
  5817. ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
  5818. __func__);
  5819. }
  5820. return(status);
  5821. }
  5822. /*
  5823. * qla2x00_restart_isp
  5824. * restarts the ISP after a reset
  5825. *
  5826. * Input:
  5827. * ha = adapter block pointer.
  5828. *
  5829. * Returns:
  5830. * 0 = success
  5831. */
  5832. static int
  5833. qla2x00_restart_isp(scsi_qla_host_t *vha)
  5834. {
  5835. int status = 0;
  5836. struct qla_hw_data *ha = vha->hw;
  5837. struct req_que *req = ha->req_q_map[0];
  5838. struct rsp_que *rsp = ha->rsp_q_map[0];
  5839. /* If firmware needs to be loaded */
  5840. if (qla2x00_isp_firmware(vha)) {
  5841. vha->flags.online = 0;
  5842. status = ha->isp_ops->chip_diag(vha);
  5843. if (!status)
  5844. status = qla2x00_setup_chip(vha);
  5845. }
  5846. if (!status && !(status = qla2x00_init_rings(vha))) {
  5847. clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  5848. ha->flags.chip_reset_done = 1;
  5849. /* Initialize the queues in use */
  5850. qla25xx_init_queues(ha);
  5851. status = qla2x00_fw_ready(vha);
  5852. if (!status) {
  5853. /* Issue a marker after FW becomes ready. */
  5854. qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
  5855. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  5856. }
  5857. /* if no cable then assume it's good */
  5858. if ((vha->device_flags & DFLG_NO_CABLE))
  5859. status = 0;
  5860. }
  5861. return (status);
  5862. }
  5863. static int
  5864. qla25xx_init_queues(struct qla_hw_data *ha)
  5865. {
  5866. struct rsp_que *rsp = NULL;
  5867. struct req_que *req = NULL;
  5868. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  5869. int ret = -1;
  5870. int i;
  5871. for (i = 1; i < ha->max_rsp_queues; i++) {
  5872. rsp = ha->rsp_q_map[i];
  5873. if (rsp && test_bit(i, ha->rsp_qid_map)) {
  5874. rsp->options &= ~BIT_0;
  5875. ret = qla25xx_init_rsp_que(base_vha, rsp);
  5876. if (ret != QLA_SUCCESS)
  5877. ql_dbg(ql_dbg_init, base_vha, 0x00ff,
  5878. "%s Rsp que: %d init failed.\n",
  5879. __func__, rsp->id);
  5880. else
  5881. ql_dbg(ql_dbg_init, base_vha, 0x0100,
  5882. "%s Rsp que: %d inited.\n",
  5883. __func__, rsp->id);
  5884. }
  5885. }
  5886. for (i = 1; i < ha->max_req_queues; i++) {
  5887. req = ha->req_q_map[i];
  5888. if (req && test_bit(i, ha->req_qid_map)) {
  5889. /* Clear outstanding commands array. */
  5890. req->options &= ~BIT_0;
  5891. ret = qla25xx_init_req_que(base_vha, req);
  5892. if (ret != QLA_SUCCESS)
  5893. ql_dbg(ql_dbg_init, base_vha, 0x0101,
  5894. "%s Req que: %d init failed.\n",
  5895. __func__, req->id);
  5896. else
  5897. ql_dbg(ql_dbg_init, base_vha, 0x0102,
  5898. "%s Req que: %d inited.\n",
  5899. __func__, req->id);
  5900. }
  5901. }
  5902. return ret;
  5903. }
  5904. /*
  5905. * qla2x00_reset_adapter
  5906. * Reset adapter.
  5907. *
  5908. * Input:
  5909. * ha = adapter block pointer.
  5910. */
  5911. void
  5912. qla2x00_reset_adapter(scsi_qla_host_t *vha)
  5913. {
  5914. unsigned long flags = 0;
  5915. struct qla_hw_data *ha = vha->hw;
  5916. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  5917. vha->flags.online = 0;
  5918. ha->isp_ops->disable_intrs(ha);
  5919. spin_lock_irqsave(&ha->hardware_lock, flags);
  5920. WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
  5921. RD_REG_WORD(&reg->hccr); /* PCI Posting. */
  5922. WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
  5923. RD_REG_WORD(&reg->hccr); /* PCI Posting. */
  5924. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  5925. }
  5926. void
  5927. qla24xx_reset_adapter(scsi_qla_host_t *vha)
  5928. {
  5929. unsigned long flags = 0;
  5930. struct qla_hw_data *ha = vha->hw;
  5931. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  5932. if (IS_P3P_TYPE(ha))
  5933. return;
  5934. vha->flags.online = 0;
  5935. ha->isp_ops->disable_intrs(ha);
  5936. spin_lock_irqsave(&ha->hardware_lock, flags);
  5937. WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
  5938. RD_REG_DWORD(&reg->hccr);
  5939. WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
  5940. RD_REG_DWORD(&reg->hccr);
  5941. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  5942. if (IS_NOPOLLING_TYPE(ha))
  5943. ha->isp_ops->enable_intrs(ha);
  5944. }
  5945. /* On sparc systems, obtain port and node WWN from firmware
  5946. * properties.
  5947. */
  5948. static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
  5949. struct nvram_24xx *nv)
  5950. {
  5951. #ifdef CONFIG_SPARC
  5952. struct qla_hw_data *ha = vha->hw;
  5953. struct pci_dev *pdev = ha->pdev;
  5954. struct device_node *dp = pci_device_to_OF_node(pdev);
  5955. const u8 *val;
  5956. int len;
  5957. val = of_get_property(dp, "port-wwn", &len);
  5958. if (val && len >= WWN_SIZE)
  5959. memcpy(nv->port_name, val, WWN_SIZE);
  5960. val = of_get_property(dp, "node-wwn", &len);
  5961. if (val && len >= WWN_SIZE)
  5962. memcpy(nv->node_name, val, WWN_SIZE);
  5963. #endif
  5964. }
  5965. int
  5966. qla24xx_nvram_config(scsi_qla_host_t *vha)
  5967. {
  5968. int rval;
  5969. struct init_cb_24xx *icb;
  5970. struct nvram_24xx *nv;
  5971. uint32_t *dptr;
  5972. uint8_t *dptr1, *dptr2;
  5973. uint32_t chksum;
  5974. uint16_t cnt;
  5975. struct qla_hw_data *ha = vha->hw;
  5976. rval = QLA_SUCCESS;
  5977. icb = (struct init_cb_24xx *)ha->init_cb;
  5978. nv = ha->nvram;
  5979. /* Determine NVRAM starting address. */
  5980. if (ha->port_no == 0) {
  5981. ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
  5982. ha->vpd_base = FA_NVRAM_VPD0_ADDR;
  5983. } else {
  5984. ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
  5985. ha->vpd_base = FA_NVRAM_VPD1_ADDR;
  5986. }
  5987. ha->nvram_size = sizeof(struct nvram_24xx);
  5988. ha->vpd_size = FA_NVRAM_VPD_SIZE;
  5989. /* Get VPD data into cache */
  5990. ha->vpd = ha->nvram + VPD_OFFSET;
  5991. ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
  5992. ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
  5993. /* Get NVRAM data into cache and calculate checksum. */
  5994. dptr = (uint32_t *)nv;
  5995. ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
  5996. ha->nvram_size);
  5997. for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
  5998. chksum += le32_to_cpu(*dptr);
  5999. ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
  6000. "Contents of NVRAM\n");
  6001. ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
  6002. (uint8_t *)nv, ha->nvram_size);
  6003. /* Bad NVRAM data, set defaults parameters. */
  6004. if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
  6005. || nv->id[3] != ' ' ||
  6006. nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
  6007. /* Reset NVRAM data. */
  6008. ql_log(ql_log_warn, vha, 0x006b,
  6009. "Inconsistent NVRAM detected: checksum=0x%x id=%c "
  6010. "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
  6011. ql_log(ql_log_warn, vha, 0x006c,
  6012. "Falling back to functioning (yet invalid -- WWPN) "
  6013. "defaults.\n");
  6014. /*
  6015. * Set default initialization control block.
  6016. */
  6017. memset(nv, 0, ha->nvram_size);
  6018. nv->nvram_version = cpu_to_le16(ICB_VERSION);
  6019. nv->version = cpu_to_le16(ICB_VERSION);
  6020. nv->frame_payload_size = 2048;
  6021. nv->execution_throttle = cpu_to_le16(0xFFFF);
  6022. nv->exchange_count = cpu_to_le16(0);
  6023. nv->hard_address = cpu_to_le16(124);
  6024. nv->port_name[0] = 0x21;
  6025. nv->port_name[1] = 0x00 + ha->port_no + 1;
  6026. nv->port_name[2] = 0x00;
  6027. nv->port_name[3] = 0xe0;
  6028. nv->port_name[4] = 0x8b;
  6029. nv->port_name[5] = 0x1c;
  6030. nv->port_name[6] = 0x55;
  6031. nv->port_name[7] = 0x86;
  6032. nv->node_name[0] = 0x20;
  6033. nv->node_name[1] = 0x00;
  6034. nv->node_name[2] = 0x00;
  6035. nv->node_name[3] = 0xe0;
  6036. nv->node_name[4] = 0x8b;
  6037. nv->node_name[5] = 0x1c;
  6038. nv->node_name[6] = 0x55;
  6039. nv->node_name[7] = 0x86;
  6040. qla24xx_nvram_wwn_from_ofw(vha, nv);
  6041. nv->login_retry_count = cpu_to_le16(8);
  6042. nv->interrupt_delay_timer = cpu_to_le16(0);
  6043. nv->login_timeout = cpu_to_le16(0);
  6044. nv->firmware_options_1 =
  6045. cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
  6046. nv->firmware_options_2 = cpu_to_le32(2 << 4);
  6047. nv->firmware_options_2 |= cpu_to_le32(BIT_12);
  6048. nv->firmware_options_3 = cpu_to_le32(2 << 13);
  6049. nv->host_p = cpu_to_le32(BIT_11|BIT_10);
  6050. nv->efi_parameters = cpu_to_le32(0);
  6051. nv->reset_delay = 5;
  6052. nv->max_luns_per_target = cpu_to_le16(128);
  6053. nv->port_down_retry_count = cpu_to_le16(30);
  6054. nv->link_down_timeout = cpu_to_le16(30);
  6055. rval = 1;
  6056. }
  6057. if (qla_tgt_mode_enabled(vha)) {
  6058. /* Don't enable full login after initial LIP */
  6059. nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
  6060. /* Don't enable LIP full login for initiator */
  6061. nv->host_p &= cpu_to_le32(~BIT_10);
  6062. }
  6063. qlt_24xx_config_nvram_stage1(vha, nv);
  6064. /* Reset Initialization control block */
  6065. memset(icb, 0, ha->init_cb_size);
  6066. /* Copy 1st segment. */
  6067. dptr1 = (uint8_t *)icb;
  6068. dptr2 = (uint8_t *)&nv->version;
  6069. cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
  6070. while (cnt--)
  6071. *dptr1++ = *dptr2++;
  6072. icb->login_retry_count = nv->login_retry_count;
  6073. icb->link_down_on_nos = nv->link_down_on_nos;
  6074. /* Copy 2nd segment. */
  6075. dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
  6076. dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
  6077. cnt = (uint8_t *)&icb->reserved_3 -
  6078. (uint8_t *)&icb->interrupt_delay_timer;
  6079. while (cnt--)
  6080. *dptr1++ = *dptr2++;
  6081. ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
  6082. /*
  6083. * Setup driver NVRAM options.
  6084. */
  6085. qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
  6086. "QLA2462");
  6087. qlt_24xx_config_nvram_stage2(vha, icb);
  6088. if (nv->host_p & cpu_to_le32(BIT_15)) {
  6089. /* Use alternate WWN? */
  6090. memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
  6091. memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
  6092. }
  6093. /* Prepare nodename */
  6094. if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
  6095. /*
  6096. * Firmware will apply the following mask if the nodename was
  6097. * not provided.
  6098. */
  6099. memcpy(icb->node_name, icb->port_name, WWN_SIZE);
  6100. icb->node_name[0] &= 0xF0;
  6101. }
  6102. /* Set host adapter parameters. */
  6103. ha->flags.disable_risc_code_load = 0;
  6104. ha->flags.enable_lip_reset = 0;
  6105. ha->flags.enable_lip_full_login =
  6106. le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
  6107. ha->flags.enable_target_reset =
  6108. le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
  6109. ha->flags.enable_led_scheme = 0;
  6110. ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
  6111. ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
  6112. (BIT_6 | BIT_5 | BIT_4)) >> 4;
  6113. memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
  6114. sizeof(ha->fw_seriallink_options24));
  6115. /* save HBA serial number */
  6116. ha->serial0 = icb->port_name[5];
  6117. ha->serial1 = icb->port_name[6];
  6118. ha->serial2 = icb->port_name[7];
  6119. memcpy(vha->node_name, icb->node_name, WWN_SIZE);
  6120. memcpy(vha->port_name, icb->port_name, WWN_SIZE);
  6121. icb->execution_throttle = cpu_to_le16(0xFFFF);
  6122. ha->retry_count = le16_to_cpu(nv->login_retry_count);
  6123. /* Set minimum login_timeout to 4 seconds. */
  6124. if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
  6125. nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
  6126. if (le16_to_cpu(nv->login_timeout) < 4)
  6127. nv->login_timeout = cpu_to_le16(4);
  6128. ha->login_timeout = le16_to_cpu(nv->login_timeout);
  6129. /* Set minimum RATOV to 100 tenths of a second. */
  6130. ha->r_a_tov = 100;
  6131. ha->loop_reset_delay = nv->reset_delay;
  6132. /* Link Down Timeout = 0:
  6133. *
  6134. * When Port Down timer expires we will start returning
  6135. * I/O's to OS with "DID_NO_CONNECT".
  6136. *
  6137. * Link Down Timeout != 0:
  6138. *
  6139. * The driver waits for the link to come up after link down
  6140. * before returning I/Os to OS with "DID_NO_CONNECT".
  6141. */
  6142. if (le16_to_cpu(nv->link_down_timeout) == 0) {
  6143. ha->loop_down_abort_time =
  6144. (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
  6145. } else {
  6146. ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
  6147. ha->loop_down_abort_time =
  6148. (LOOP_DOWN_TIME - ha->link_down_timeout);
  6149. }
  6150. /* Need enough time to try and get the port back. */
  6151. ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
  6152. if (qlport_down_retry)
  6153. ha->port_down_retry_count = qlport_down_retry;
  6154. /* Set login_retry_count */
  6155. ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
  6156. if (ha->port_down_retry_count ==
  6157. le16_to_cpu(nv->port_down_retry_count) &&
  6158. ha->port_down_retry_count > 3)
  6159. ha->login_retry_count = ha->port_down_retry_count;
  6160. else if (ha->port_down_retry_count > (int)ha->login_retry_count)
  6161. ha->login_retry_count = ha->port_down_retry_count;
  6162. if (ql2xloginretrycount)
  6163. ha->login_retry_count = ql2xloginretrycount;
  6164. /* N2N: driver will initiate Login instead of FW */
  6165. icb->firmware_options_3 |= BIT_8;
  6166. /* Enable ZIO. */
  6167. if (!vha->flags.init_done) {
  6168. ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
  6169. (BIT_3 | BIT_2 | BIT_1 | BIT_0);
  6170. ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
  6171. le16_to_cpu(icb->interrupt_delay_timer): 2;
  6172. }
  6173. icb->firmware_options_2 &= cpu_to_le32(
  6174. ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
  6175. if (ha->zio_mode != QLA_ZIO_DISABLED) {
  6176. ha->zio_mode = QLA_ZIO_MODE_6;
  6177. ql_log(ql_log_info, vha, 0x006f,
  6178. "ZIO mode %d enabled; timer delay (%d us).\n",
  6179. ha->zio_mode, ha->zio_timer * 100);
  6180. icb->firmware_options_2 |= cpu_to_le32(
  6181. (uint32_t)ha->zio_mode);
  6182. icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
  6183. }
  6184. if (rval) {
  6185. ql_log(ql_log_warn, vha, 0x0070,
  6186. "NVRAM configuration failed.\n");
  6187. }
  6188. return (rval);
  6189. }
  6190. uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
  6191. {
  6192. struct qla27xx_image_status pri_image_status, sec_image_status;
  6193. uint8_t valid_pri_image, valid_sec_image;
  6194. uint32_t *wptr;
  6195. uint32_t cnt, chksum, size;
  6196. struct qla_hw_data *ha = vha->hw;
  6197. valid_pri_image = valid_sec_image = 1;
  6198. ha->active_image = 0;
  6199. size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
  6200. if (!ha->flt_region_img_status_pri) {
  6201. valid_pri_image = 0;
  6202. goto check_sec_image;
  6203. }
  6204. qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
  6205. ha->flt_region_img_status_pri, size);
  6206. if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
  6207. ql_dbg(ql_dbg_init, vha, 0x018b,
  6208. "Primary image signature (0x%x) not valid\n",
  6209. pri_image_status.signature);
  6210. valid_pri_image = 0;
  6211. goto check_sec_image;
  6212. }
  6213. wptr = (uint32_t *)(&pri_image_status);
  6214. cnt = size;
  6215. for (chksum = 0; cnt--; wptr++)
  6216. chksum += le32_to_cpu(*wptr);
  6217. if (chksum) {
  6218. ql_dbg(ql_dbg_init, vha, 0x018c,
  6219. "Checksum validation failed for primary image (0x%x)\n",
  6220. chksum);
  6221. valid_pri_image = 0;
  6222. }
  6223. check_sec_image:
  6224. if (!ha->flt_region_img_status_sec) {
  6225. valid_sec_image = 0;
  6226. goto check_valid_image;
  6227. }
  6228. qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
  6229. ha->flt_region_img_status_sec, size);
  6230. if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
  6231. ql_dbg(ql_dbg_init, vha, 0x018d,
  6232. "Secondary image signature(0x%x) not valid\n",
  6233. sec_image_status.signature);
  6234. valid_sec_image = 0;
  6235. goto check_valid_image;
  6236. }
  6237. wptr = (uint32_t *)(&sec_image_status);
  6238. cnt = size;
  6239. for (chksum = 0; cnt--; wptr++)
  6240. chksum += le32_to_cpu(*wptr);
  6241. if (chksum) {
  6242. ql_dbg(ql_dbg_init, vha, 0x018e,
  6243. "Checksum validation failed for secondary image (0x%x)\n",
  6244. chksum);
  6245. valid_sec_image = 0;
  6246. }
  6247. check_valid_image:
  6248. if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
  6249. ha->active_image = QLA27XX_PRIMARY_IMAGE;
  6250. if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
  6251. if (!ha->active_image ||
  6252. pri_image_status.generation_number <
  6253. sec_image_status.generation_number)
  6254. ha->active_image = QLA27XX_SECONDARY_IMAGE;
  6255. }
  6256. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x018f, "%s image\n",
  6257. ha->active_image == 0 ? "default bootld and fw" :
  6258. ha->active_image == 1 ? "primary" :
  6259. ha->active_image == 2 ? "secondary" :
  6260. "Invalid");
  6261. return ha->active_image;
  6262. }
  6263. static int
  6264. qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
  6265. uint32_t faddr)
  6266. {
  6267. int rval = QLA_SUCCESS;
  6268. int segments, fragment;
  6269. uint32_t *dcode, dlen;
  6270. uint32_t risc_addr;
  6271. uint32_t risc_size;
  6272. uint32_t i;
  6273. struct qla_hw_data *ha = vha->hw;
  6274. struct req_que *req = ha->req_q_map[0];
  6275. ql_dbg(ql_dbg_init, vha, 0x008b,
  6276. "FW: Loading firmware from flash (%x).\n", faddr);
  6277. rval = QLA_SUCCESS;
  6278. segments = FA_RISC_CODE_SEGMENTS;
  6279. dcode = (uint32_t *)req->ring;
  6280. *srisc_addr = 0;
  6281. if (IS_QLA27XX(ha) &&
  6282. qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
  6283. faddr = ha->flt_region_fw_sec;
  6284. /* Validate firmware image by checking version. */
  6285. qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
  6286. for (i = 0; i < 4; i++)
  6287. dcode[i] = be32_to_cpu(dcode[i]);
  6288. if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
  6289. dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
  6290. (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
  6291. dcode[3] == 0)) {
  6292. ql_log(ql_log_fatal, vha, 0x008c,
  6293. "Unable to verify the integrity of flash firmware "
  6294. "image.\n");
  6295. ql_log(ql_log_fatal, vha, 0x008d,
  6296. "Firmware data: %08x %08x %08x %08x.\n",
  6297. dcode[0], dcode[1], dcode[2], dcode[3]);
  6298. return QLA_FUNCTION_FAILED;
  6299. }
  6300. while (segments && rval == QLA_SUCCESS) {
  6301. /* Read segment's load information. */
  6302. qla24xx_read_flash_data(vha, dcode, faddr, 4);
  6303. risc_addr = be32_to_cpu(dcode[2]);
  6304. *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
  6305. risc_size = be32_to_cpu(dcode[3]);
  6306. fragment = 0;
  6307. while (risc_size > 0 && rval == QLA_SUCCESS) {
  6308. dlen = (uint32_t)(ha->fw_transfer_size >> 2);
  6309. if (dlen > risc_size)
  6310. dlen = risc_size;
  6311. ql_dbg(ql_dbg_init, vha, 0x008e,
  6312. "Loading risc segment@ risc addr %x "
  6313. "number of dwords 0x%x offset 0x%x.\n",
  6314. risc_addr, dlen, faddr);
  6315. qla24xx_read_flash_data(vha, dcode, faddr, dlen);
  6316. for (i = 0; i < dlen; i++)
  6317. dcode[i] = swab32(dcode[i]);
  6318. rval = qla2x00_load_ram(vha, req->dma, risc_addr,
  6319. dlen);
  6320. if (rval) {
  6321. ql_log(ql_log_fatal, vha, 0x008f,
  6322. "Failed to load segment %d of firmware.\n",
  6323. fragment);
  6324. return QLA_FUNCTION_FAILED;
  6325. }
  6326. faddr += dlen;
  6327. risc_addr += dlen;
  6328. risc_size -= dlen;
  6329. fragment++;
  6330. }
  6331. /* Next segment. */
  6332. segments--;
  6333. }
  6334. if (!IS_QLA27XX(ha))
  6335. return rval;
  6336. if (ha->fw_dump_template)
  6337. vfree(ha->fw_dump_template);
  6338. ha->fw_dump_template = NULL;
  6339. ha->fw_dump_template_len = 0;
  6340. ql_dbg(ql_dbg_init, vha, 0x0161,
  6341. "Loading fwdump template from %x\n", faddr);
  6342. qla24xx_read_flash_data(vha, dcode, faddr, 7);
  6343. risc_size = be32_to_cpu(dcode[2]);
  6344. ql_dbg(ql_dbg_init, vha, 0x0162,
  6345. "-> array size %x dwords\n", risc_size);
  6346. if (risc_size == 0 || risc_size == ~0)
  6347. goto default_template;
  6348. dlen = (risc_size - 8) * sizeof(*dcode);
  6349. ql_dbg(ql_dbg_init, vha, 0x0163,
  6350. "-> template allocating %x bytes...\n", dlen);
  6351. ha->fw_dump_template = vmalloc(dlen);
  6352. if (!ha->fw_dump_template) {
  6353. ql_log(ql_log_warn, vha, 0x0164,
  6354. "Failed fwdump template allocate %x bytes.\n", risc_size);
  6355. goto default_template;
  6356. }
  6357. faddr += 7;
  6358. risc_size -= 8;
  6359. dcode = ha->fw_dump_template;
  6360. qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
  6361. for (i = 0; i < risc_size; i++)
  6362. dcode[i] = le32_to_cpu(dcode[i]);
  6363. if (!qla27xx_fwdt_template_valid(dcode)) {
  6364. ql_log(ql_log_warn, vha, 0x0165,
  6365. "Failed fwdump template validate\n");
  6366. goto default_template;
  6367. }
  6368. dlen = qla27xx_fwdt_template_size(dcode);
  6369. ql_dbg(ql_dbg_init, vha, 0x0166,
  6370. "-> template size %x bytes\n", dlen);
  6371. if (dlen > risc_size * sizeof(*dcode)) {
  6372. ql_log(ql_log_warn, vha, 0x0167,
  6373. "Failed fwdump template exceeds array by %zx bytes\n",
  6374. (size_t)(dlen - risc_size * sizeof(*dcode)));
  6375. goto default_template;
  6376. }
  6377. ha->fw_dump_template_len = dlen;
  6378. return rval;
  6379. default_template:
  6380. ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
  6381. if (ha->fw_dump_template)
  6382. vfree(ha->fw_dump_template);
  6383. ha->fw_dump_template = NULL;
  6384. ha->fw_dump_template_len = 0;
  6385. dlen = qla27xx_fwdt_template_default_size();
  6386. ql_dbg(ql_dbg_init, vha, 0x0169,
  6387. "-> template allocating %x bytes...\n", dlen);
  6388. ha->fw_dump_template = vmalloc(dlen);
  6389. if (!ha->fw_dump_template) {
  6390. ql_log(ql_log_warn, vha, 0x016a,
  6391. "Failed fwdump template allocate %x bytes.\n", risc_size);
  6392. goto failed_template;
  6393. }
  6394. dcode = ha->fw_dump_template;
  6395. risc_size = dlen / sizeof(*dcode);
  6396. memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
  6397. for (i = 0; i < risc_size; i++)
  6398. dcode[i] = be32_to_cpu(dcode[i]);
  6399. if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
  6400. ql_log(ql_log_warn, vha, 0x016b,
  6401. "Failed fwdump template validate\n");
  6402. goto failed_template;
  6403. }
  6404. dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
  6405. ql_dbg(ql_dbg_init, vha, 0x016c,
  6406. "-> template size %x bytes\n", dlen);
  6407. ha->fw_dump_template_len = dlen;
  6408. return rval;
  6409. failed_template:
  6410. ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
  6411. if (ha->fw_dump_template)
  6412. vfree(ha->fw_dump_template);
  6413. ha->fw_dump_template = NULL;
  6414. ha->fw_dump_template_len = 0;
  6415. return rval;
  6416. }
  6417. #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
  6418. int
  6419. qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
  6420. {
  6421. int rval;
  6422. int i, fragment;
  6423. uint16_t *wcode, *fwcode;
  6424. uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
  6425. struct fw_blob *blob;
  6426. struct qla_hw_data *ha = vha->hw;
  6427. struct req_que *req = ha->req_q_map[0];
  6428. /* Load firmware blob. */
  6429. blob = qla2x00_request_firmware(vha);
  6430. if (!blob) {
  6431. ql_log(ql_log_info, vha, 0x0083,
  6432. "Firmware image unavailable.\n");
  6433. ql_log(ql_log_info, vha, 0x0084,
  6434. "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
  6435. return QLA_FUNCTION_FAILED;
  6436. }
  6437. rval = QLA_SUCCESS;
  6438. wcode = (uint16_t *)req->ring;
  6439. *srisc_addr = 0;
  6440. fwcode = (uint16_t *)blob->fw->data;
  6441. fwclen = 0;
  6442. /* Validate firmware image by checking version. */
  6443. if (blob->fw->size < 8 * sizeof(uint16_t)) {
  6444. ql_log(ql_log_fatal, vha, 0x0085,
  6445. "Unable to verify integrity of firmware image (%zd).\n",
  6446. blob->fw->size);
  6447. goto fail_fw_integrity;
  6448. }
  6449. for (i = 0; i < 4; i++)
  6450. wcode[i] = be16_to_cpu(fwcode[i + 4]);
  6451. if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
  6452. wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
  6453. wcode[2] == 0 && wcode[3] == 0)) {
  6454. ql_log(ql_log_fatal, vha, 0x0086,
  6455. "Unable to verify integrity of firmware image.\n");
  6456. ql_log(ql_log_fatal, vha, 0x0087,
  6457. "Firmware data: %04x %04x %04x %04x.\n",
  6458. wcode[0], wcode[1], wcode[2], wcode[3]);
  6459. goto fail_fw_integrity;
  6460. }
  6461. seg = blob->segs;
  6462. while (*seg && rval == QLA_SUCCESS) {
  6463. risc_addr = *seg;
  6464. *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
  6465. risc_size = be16_to_cpu(fwcode[3]);
  6466. /* Validate firmware image size. */
  6467. fwclen += risc_size * sizeof(uint16_t);
  6468. if (blob->fw->size < fwclen) {
  6469. ql_log(ql_log_fatal, vha, 0x0088,
  6470. "Unable to verify integrity of firmware image "
  6471. "(%zd).\n", blob->fw->size);
  6472. goto fail_fw_integrity;
  6473. }
  6474. fragment = 0;
  6475. while (risc_size > 0 && rval == QLA_SUCCESS) {
  6476. wlen = (uint16_t)(ha->fw_transfer_size >> 1);
  6477. if (wlen > risc_size)
  6478. wlen = risc_size;
  6479. ql_dbg(ql_dbg_init, vha, 0x0089,
  6480. "Loading risc segment@ risc addr %x number of "
  6481. "words 0x%x.\n", risc_addr, wlen);
  6482. for (i = 0; i < wlen; i++)
  6483. wcode[i] = swab16(fwcode[i]);
  6484. rval = qla2x00_load_ram(vha, req->dma, risc_addr,
  6485. wlen);
  6486. if (rval) {
  6487. ql_log(ql_log_fatal, vha, 0x008a,
  6488. "Failed to load segment %d of firmware.\n",
  6489. fragment);
  6490. break;
  6491. }
  6492. fwcode += wlen;
  6493. risc_addr += wlen;
  6494. risc_size -= wlen;
  6495. fragment++;
  6496. }
  6497. /* Next segment. */
  6498. seg++;
  6499. }
  6500. return rval;
  6501. fail_fw_integrity:
  6502. return QLA_FUNCTION_FAILED;
  6503. }
  6504. static int
  6505. qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
  6506. {
  6507. int rval;
  6508. int segments, fragment;
  6509. uint32_t *dcode, dlen;
  6510. uint32_t risc_addr;
  6511. uint32_t risc_size;
  6512. uint32_t i;
  6513. struct fw_blob *blob;
  6514. const uint32_t *fwcode;
  6515. uint32_t fwclen;
  6516. struct qla_hw_data *ha = vha->hw;
  6517. struct req_que *req = ha->req_q_map[0];
  6518. /* Load firmware blob. */
  6519. blob = qla2x00_request_firmware(vha);
  6520. if (!blob) {
  6521. ql_log(ql_log_warn, vha, 0x0090,
  6522. "Firmware image unavailable.\n");
  6523. ql_log(ql_log_warn, vha, 0x0091,
  6524. "Firmware images can be retrieved from: "
  6525. QLA_FW_URL ".\n");
  6526. return QLA_FUNCTION_FAILED;
  6527. }
  6528. ql_dbg(ql_dbg_init, vha, 0x0092,
  6529. "FW: Loading via request-firmware.\n");
  6530. rval = QLA_SUCCESS;
  6531. segments = FA_RISC_CODE_SEGMENTS;
  6532. dcode = (uint32_t *)req->ring;
  6533. *srisc_addr = 0;
  6534. fwcode = (uint32_t *)blob->fw->data;
  6535. fwclen = 0;
  6536. /* Validate firmware image by checking version. */
  6537. if (blob->fw->size < 8 * sizeof(uint32_t)) {
  6538. ql_log(ql_log_fatal, vha, 0x0093,
  6539. "Unable to verify integrity of firmware image (%zd).\n",
  6540. blob->fw->size);
  6541. return QLA_FUNCTION_FAILED;
  6542. }
  6543. for (i = 0; i < 4; i++)
  6544. dcode[i] = be32_to_cpu(fwcode[i + 4]);
  6545. if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
  6546. dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
  6547. (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
  6548. dcode[3] == 0)) {
  6549. ql_log(ql_log_fatal, vha, 0x0094,
  6550. "Unable to verify integrity of firmware image (%zd).\n",
  6551. blob->fw->size);
  6552. ql_log(ql_log_fatal, vha, 0x0095,
  6553. "Firmware data: %08x %08x %08x %08x.\n",
  6554. dcode[0], dcode[1], dcode[2], dcode[3]);
  6555. return QLA_FUNCTION_FAILED;
  6556. }
  6557. while (segments && rval == QLA_SUCCESS) {
  6558. risc_addr = be32_to_cpu(fwcode[2]);
  6559. *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
  6560. risc_size = be32_to_cpu(fwcode[3]);
  6561. /* Validate firmware image size. */
  6562. fwclen += risc_size * sizeof(uint32_t);
  6563. if (blob->fw->size < fwclen) {
  6564. ql_log(ql_log_fatal, vha, 0x0096,
  6565. "Unable to verify integrity of firmware image "
  6566. "(%zd).\n", blob->fw->size);
  6567. return QLA_FUNCTION_FAILED;
  6568. }
  6569. fragment = 0;
  6570. while (risc_size > 0 && rval == QLA_SUCCESS) {
  6571. dlen = (uint32_t)(ha->fw_transfer_size >> 2);
  6572. if (dlen > risc_size)
  6573. dlen = risc_size;
  6574. ql_dbg(ql_dbg_init, vha, 0x0097,
  6575. "Loading risc segment@ risc addr %x "
  6576. "number of dwords 0x%x.\n", risc_addr, dlen);
  6577. for (i = 0; i < dlen; i++)
  6578. dcode[i] = swab32(fwcode[i]);
  6579. rval = qla2x00_load_ram(vha, req->dma, risc_addr,
  6580. dlen);
  6581. if (rval) {
  6582. ql_log(ql_log_fatal, vha, 0x0098,
  6583. "Failed to load segment %d of firmware.\n",
  6584. fragment);
  6585. return QLA_FUNCTION_FAILED;
  6586. }
  6587. fwcode += dlen;
  6588. risc_addr += dlen;
  6589. risc_size -= dlen;
  6590. fragment++;
  6591. }
  6592. /* Next segment. */
  6593. segments--;
  6594. }
  6595. if (!IS_QLA27XX(ha))
  6596. return rval;
  6597. if (ha->fw_dump_template)
  6598. vfree(ha->fw_dump_template);
  6599. ha->fw_dump_template = NULL;
  6600. ha->fw_dump_template_len = 0;
  6601. ql_dbg(ql_dbg_init, vha, 0x171,
  6602. "Loading fwdump template from %x\n",
  6603. (uint32_t)((void *)fwcode - (void *)blob->fw->data));
  6604. risc_size = be32_to_cpu(fwcode[2]);
  6605. ql_dbg(ql_dbg_init, vha, 0x172,
  6606. "-> array size %x dwords\n", risc_size);
  6607. if (risc_size == 0 || risc_size == ~0)
  6608. goto default_template;
  6609. dlen = (risc_size - 8) * sizeof(*fwcode);
  6610. ql_dbg(ql_dbg_init, vha, 0x0173,
  6611. "-> template allocating %x bytes...\n", dlen);
  6612. ha->fw_dump_template = vmalloc(dlen);
  6613. if (!ha->fw_dump_template) {
  6614. ql_log(ql_log_warn, vha, 0x0174,
  6615. "Failed fwdump template allocate %x bytes.\n", risc_size);
  6616. goto default_template;
  6617. }
  6618. fwcode += 7;
  6619. risc_size -= 8;
  6620. dcode = ha->fw_dump_template;
  6621. for (i = 0; i < risc_size; i++)
  6622. dcode[i] = le32_to_cpu(fwcode[i]);
  6623. if (!qla27xx_fwdt_template_valid(dcode)) {
  6624. ql_log(ql_log_warn, vha, 0x0175,
  6625. "Failed fwdump template validate\n");
  6626. goto default_template;
  6627. }
  6628. dlen = qla27xx_fwdt_template_size(dcode);
  6629. ql_dbg(ql_dbg_init, vha, 0x0176,
  6630. "-> template size %x bytes\n", dlen);
  6631. if (dlen > risc_size * sizeof(*fwcode)) {
  6632. ql_log(ql_log_warn, vha, 0x0177,
  6633. "Failed fwdump template exceeds array by %zx bytes\n",
  6634. (size_t)(dlen - risc_size * sizeof(*fwcode)));
  6635. goto default_template;
  6636. }
  6637. ha->fw_dump_template_len = dlen;
  6638. return rval;
  6639. default_template:
  6640. ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
  6641. if (ha->fw_dump_template)
  6642. vfree(ha->fw_dump_template);
  6643. ha->fw_dump_template = NULL;
  6644. ha->fw_dump_template_len = 0;
  6645. dlen = qla27xx_fwdt_template_default_size();
  6646. ql_dbg(ql_dbg_init, vha, 0x0179,
  6647. "-> template allocating %x bytes...\n", dlen);
  6648. ha->fw_dump_template = vmalloc(dlen);
  6649. if (!ha->fw_dump_template) {
  6650. ql_log(ql_log_warn, vha, 0x017a,
  6651. "Failed fwdump template allocate %x bytes.\n", risc_size);
  6652. goto failed_template;
  6653. }
  6654. dcode = ha->fw_dump_template;
  6655. risc_size = dlen / sizeof(*fwcode);
  6656. fwcode = qla27xx_fwdt_template_default();
  6657. for (i = 0; i < risc_size; i++)
  6658. dcode[i] = be32_to_cpu(fwcode[i]);
  6659. if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
  6660. ql_log(ql_log_warn, vha, 0x017b,
  6661. "Failed fwdump template validate\n");
  6662. goto failed_template;
  6663. }
  6664. dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
  6665. ql_dbg(ql_dbg_init, vha, 0x017c,
  6666. "-> template size %x bytes\n", dlen);
  6667. ha->fw_dump_template_len = dlen;
  6668. return rval;
  6669. failed_template:
  6670. ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
  6671. if (ha->fw_dump_template)
  6672. vfree(ha->fw_dump_template);
  6673. ha->fw_dump_template = NULL;
  6674. ha->fw_dump_template_len = 0;
  6675. return rval;
  6676. }
  6677. int
  6678. qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
  6679. {
  6680. int rval;
  6681. if (ql2xfwloadbin == 1)
  6682. return qla81xx_load_risc(vha, srisc_addr);
  6683. /*
  6684. * FW Load priority:
  6685. * 1) Firmware via request-firmware interface (.bin file).
  6686. * 2) Firmware residing in flash.
  6687. */
  6688. rval = qla24xx_load_risc_blob(vha, srisc_addr);
  6689. if (rval == QLA_SUCCESS)
  6690. return rval;
  6691. return qla24xx_load_risc_flash(vha, srisc_addr,
  6692. vha->hw->flt_region_fw);
  6693. }
  6694. int
  6695. qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
  6696. {
  6697. int rval;
  6698. struct qla_hw_data *ha = vha->hw;
  6699. if (ql2xfwloadbin == 2)
  6700. goto try_blob_fw;
  6701. /*
  6702. * FW Load priority:
  6703. * 1) Firmware residing in flash.
  6704. * 2) Firmware via request-firmware interface (.bin file).
  6705. * 3) Golden-Firmware residing in flash -- limited operation.
  6706. */
  6707. rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
  6708. if (rval == QLA_SUCCESS)
  6709. return rval;
  6710. try_blob_fw:
  6711. rval = qla24xx_load_risc_blob(vha, srisc_addr);
  6712. if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
  6713. return rval;
  6714. ql_log(ql_log_info, vha, 0x0099,
  6715. "Attempting to fallback to golden firmware.\n");
  6716. rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
  6717. if (rval != QLA_SUCCESS)
  6718. return rval;
  6719. ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
  6720. ha->flags.running_gold_fw = 1;
  6721. return rval;
  6722. }
  6723. void
  6724. qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
  6725. {
  6726. int ret, retries;
  6727. struct qla_hw_data *ha = vha->hw;
  6728. if (ha->flags.pci_channel_io_perm_failure)
  6729. return;
  6730. if (!IS_FWI2_CAPABLE(ha))
  6731. return;
  6732. if (!ha->fw_major_version)
  6733. return;
  6734. if (!ha->flags.fw_started)
  6735. return;
  6736. ret = qla2x00_stop_firmware(vha);
  6737. for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
  6738. ret != QLA_INVALID_COMMAND && retries ; retries--) {
  6739. ha->isp_ops->reset_chip(vha);
  6740. if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
  6741. continue;
  6742. if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
  6743. continue;
  6744. ql_log(ql_log_info, vha, 0x8015,
  6745. "Attempting retry of stop-firmware command.\n");
  6746. ret = qla2x00_stop_firmware(vha);
  6747. }
  6748. QLA_FW_STOPPED(ha);
  6749. ha->flags.fw_init_done = 0;
  6750. }
  6751. int
  6752. qla24xx_configure_vhba(scsi_qla_host_t *vha)
  6753. {
  6754. int rval = QLA_SUCCESS;
  6755. int rval2;
  6756. uint16_t mb[MAILBOX_REGISTER_COUNT];
  6757. struct qla_hw_data *ha = vha->hw;
  6758. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  6759. struct req_que *req;
  6760. struct rsp_que *rsp;
  6761. if (!vha->vp_idx)
  6762. return -EINVAL;
  6763. rval = qla2x00_fw_ready(base_vha);
  6764. if (vha->qpair)
  6765. req = vha->qpair->req;
  6766. else
  6767. req = ha->req_q_map[0];
  6768. rsp = req->rsp;
  6769. if (rval == QLA_SUCCESS) {
  6770. clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  6771. qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
  6772. }
  6773. vha->flags.management_server_logged_in = 0;
  6774. /* Login to SNS first */
  6775. rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
  6776. BIT_1);
  6777. if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
  6778. if (rval2 == QLA_MEMORY_ALLOC_FAILED)
  6779. ql_dbg(ql_dbg_init, vha, 0x0120,
  6780. "Failed SNS login: loop_id=%x, rval2=%d\n",
  6781. NPH_SNS, rval2);
  6782. else
  6783. ql_dbg(ql_dbg_init, vha, 0x0103,
  6784. "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
  6785. "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
  6786. NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
  6787. return (QLA_FUNCTION_FAILED);
  6788. }
  6789. atomic_set(&vha->loop_down_timer, 0);
  6790. atomic_set(&vha->loop_state, LOOP_UP);
  6791. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  6792. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  6793. rval = qla2x00_loop_resync(base_vha);
  6794. return rval;
  6795. }
  6796. /* 84XX Support **************************************************************/
  6797. static LIST_HEAD(qla_cs84xx_list);
  6798. static DEFINE_MUTEX(qla_cs84xx_mutex);
  6799. static struct qla_chip_state_84xx *
  6800. qla84xx_get_chip(struct scsi_qla_host *vha)
  6801. {
  6802. struct qla_chip_state_84xx *cs84xx;
  6803. struct qla_hw_data *ha = vha->hw;
  6804. mutex_lock(&qla_cs84xx_mutex);
  6805. /* Find any shared 84xx chip. */
  6806. list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
  6807. if (cs84xx->bus == ha->pdev->bus) {
  6808. kref_get(&cs84xx->kref);
  6809. goto done;
  6810. }
  6811. }
  6812. cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
  6813. if (!cs84xx)
  6814. goto done;
  6815. kref_init(&cs84xx->kref);
  6816. spin_lock_init(&cs84xx->access_lock);
  6817. mutex_init(&cs84xx->fw_update_mutex);
  6818. cs84xx->bus = ha->pdev->bus;
  6819. list_add_tail(&cs84xx->list, &qla_cs84xx_list);
  6820. done:
  6821. mutex_unlock(&qla_cs84xx_mutex);
  6822. return cs84xx;
  6823. }
  6824. static void
  6825. __qla84xx_chip_release(struct kref *kref)
  6826. {
  6827. struct qla_chip_state_84xx *cs84xx =
  6828. container_of(kref, struct qla_chip_state_84xx, kref);
  6829. mutex_lock(&qla_cs84xx_mutex);
  6830. list_del(&cs84xx->list);
  6831. mutex_unlock(&qla_cs84xx_mutex);
  6832. kfree(cs84xx);
  6833. }
  6834. void
  6835. qla84xx_put_chip(struct scsi_qla_host *vha)
  6836. {
  6837. struct qla_hw_data *ha = vha->hw;
  6838. if (ha->cs84xx)
  6839. kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
  6840. }
  6841. static int
  6842. qla84xx_init_chip(scsi_qla_host_t *vha)
  6843. {
  6844. int rval;
  6845. uint16_t status[2];
  6846. struct qla_hw_data *ha = vha->hw;
  6847. mutex_lock(&ha->cs84xx->fw_update_mutex);
  6848. rval = qla84xx_verify_chip(vha, status);
  6849. mutex_unlock(&ha->cs84xx->fw_update_mutex);
  6850. return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
  6851. QLA_SUCCESS;
  6852. }
  6853. /* 81XX Support **************************************************************/
  6854. int
  6855. qla81xx_nvram_config(scsi_qla_host_t *vha)
  6856. {
  6857. int rval;
  6858. struct init_cb_81xx *icb;
  6859. struct nvram_81xx *nv;
  6860. uint32_t *dptr;
  6861. uint8_t *dptr1, *dptr2;
  6862. uint32_t chksum;
  6863. uint16_t cnt;
  6864. struct qla_hw_data *ha = vha->hw;
  6865. rval = QLA_SUCCESS;
  6866. icb = (struct init_cb_81xx *)ha->init_cb;
  6867. nv = ha->nvram;
  6868. /* Determine NVRAM starting address. */
  6869. ha->nvram_size = sizeof(struct nvram_81xx);
  6870. ha->vpd_size = FA_NVRAM_VPD_SIZE;
  6871. if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
  6872. ha->vpd_size = FA_VPD_SIZE_82XX;
  6873. /* Get VPD data into cache */
  6874. ha->vpd = ha->nvram + VPD_OFFSET;
  6875. ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
  6876. ha->vpd_size);
  6877. /* Get NVRAM data into cache and calculate checksum. */
  6878. ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
  6879. ha->nvram_size);
  6880. dptr = (uint32_t *)nv;
  6881. for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
  6882. chksum += le32_to_cpu(*dptr);
  6883. ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
  6884. "Contents of NVRAM:\n");
  6885. ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
  6886. (uint8_t *)nv, ha->nvram_size);
  6887. /* Bad NVRAM data, set defaults parameters. */
  6888. if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
  6889. || nv->id[3] != ' ' ||
  6890. nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
  6891. /* Reset NVRAM data. */
  6892. ql_log(ql_log_info, vha, 0x0073,
  6893. "Inconsistent NVRAM detected: checksum=0x%x id=%c "
  6894. "version=0x%x.\n", chksum, nv->id[0],
  6895. le16_to_cpu(nv->nvram_version));
  6896. ql_log(ql_log_info, vha, 0x0074,
  6897. "Falling back to functioning (yet invalid -- WWPN) "
  6898. "defaults.\n");
  6899. /*
  6900. * Set default initialization control block.
  6901. */
  6902. memset(nv, 0, ha->nvram_size);
  6903. nv->nvram_version = cpu_to_le16(ICB_VERSION);
  6904. nv->version = cpu_to_le16(ICB_VERSION);
  6905. nv->frame_payload_size = 2048;
  6906. nv->execution_throttle = cpu_to_le16(0xFFFF);
  6907. nv->exchange_count = cpu_to_le16(0);
  6908. nv->port_name[0] = 0x21;
  6909. nv->port_name[1] = 0x00 + ha->port_no + 1;
  6910. nv->port_name[2] = 0x00;
  6911. nv->port_name[3] = 0xe0;
  6912. nv->port_name[4] = 0x8b;
  6913. nv->port_name[5] = 0x1c;
  6914. nv->port_name[6] = 0x55;
  6915. nv->port_name[7] = 0x86;
  6916. nv->node_name[0] = 0x20;
  6917. nv->node_name[1] = 0x00;
  6918. nv->node_name[2] = 0x00;
  6919. nv->node_name[3] = 0xe0;
  6920. nv->node_name[4] = 0x8b;
  6921. nv->node_name[5] = 0x1c;
  6922. nv->node_name[6] = 0x55;
  6923. nv->node_name[7] = 0x86;
  6924. nv->login_retry_count = cpu_to_le16(8);
  6925. nv->interrupt_delay_timer = cpu_to_le16(0);
  6926. nv->login_timeout = cpu_to_le16(0);
  6927. nv->firmware_options_1 =
  6928. cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
  6929. nv->firmware_options_2 = cpu_to_le32(2 << 4);
  6930. nv->firmware_options_2 |= cpu_to_le32(BIT_12);
  6931. nv->firmware_options_3 = cpu_to_le32(2 << 13);
  6932. nv->host_p = cpu_to_le32(BIT_11|BIT_10);
  6933. nv->efi_parameters = cpu_to_le32(0);
  6934. nv->reset_delay = 5;
  6935. nv->max_luns_per_target = cpu_to_le16(128);
  6936. nv->port_down_retry_count = cpu_to_le16(30);
  6937. nv->link_down_timeout = cpu_to_le16(180);
  6938. nv->enode_mac[0] = 0x00;
  6939. nv->enode_mac[1] = 0xC0;
  6940. nv->enode_mac[2] = 0xDD;
  6941. nv->enode_mac[3] = 0x04;
  6942. nv->enode_mac[4] = 0x05;
  6943. nv->enode_mac[5] = 0x06 + ha->port_no + 1;
  6944. rval = 1;
  6945. }
  6946. if (IS_T10_PI_CAPABLE(ha))
  6947. nv->frame_payload_size &= ~7;
  6948. qlt_81xx_config_nvram_stage1(vha, nv);
  6949. /* Reset Initialization control block */
  6950. memset(icb, 0, ha->init_cb_size);
  6951. /* Copy 1st segment. */
  6952. dptr1 = (uint8_t *)icb;
  6953. dptr2 = (uint8_t *)&nv->version;
  6954. cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
  6955. while (cnt--)
  6956. *dptr1++ = *dptr2++;
  6957. icb->login_retry_count = nv->login_retry_count;
  6958. /* Copy 2nd segment. */
  6959. dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
  6960. dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
  6961. cnt = (uint8_t *)&icb->reserved_5 -
  6962. (uint8_t *)&icb->interrupt_delay_timer;
  6963. while (cnt--)
  6964. *dptr1++ = *dptr2++;
  6965. memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
  6966. /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
  6967. if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
  6968. icb->enode_mac[0] = 0x00;
  6969. icb->enode_mac[1] = 0xC0;
  6970. icb->enode_mac[2] = 0xDD;
  6971. icb->enode_mac[3] = 0x04;
  6972. icb->enode_mac[4] = 0x05;
  6973. icb->enode_mac[5] = 0x06 + ha->port_no + 1;
  6974. }
  6975. /* Use extended-initialization control block. */
  6976. memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
  6977. ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
  6978. /*
  6979. * Setup driver NVRAM options.
  6980. */
  6981. qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
  6982. "QLE8XXX");
  6983. qlt_81xx_config_nvram_stage2(vha, icb);
  6984. /* Use alternate WWN? */
  6985. if (nv->host_p & cpu_to_le32(BIT_15)) {
  6986. memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
  6987. memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
  6988. }
  6989. /* Prepare nodename */
  6990. if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
  6991. /*
  6992. * Firmware will apply the following mask if the nodename was
  6993. * not provided.
  6994. */
  6995. memcpy(icb->node_name, icb->port_name, WWN_SIZE);
  6996. icb->node_name[0] &= 0xF0;
  6997. }
  6998. /* Set host adapter parameters. */
  6999. ha->flags.disable_risc_code_load = 0;
  7000. ha->flags.enable_lip_reset = 0;
  7001. ha->flags.enable_lip_full_login =
  7002. le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
  7003. ha->flags.enable_target_reset =
  7004. le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
  7005. ha->flags.enable_led_scheme = 0;
  7006. ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
  7007. ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
  7008. (BIT_6 | BIT_5 | BIT_4)) >> 4;
  7009. /* save HBA serial number */
  7010. ha->serial0 = icb->port_name[5];
  7011. ha->serial1 = icb->port_name[6];
  7012. ha->serial2 = icb->port_name[7];
  7013. memcpy(vha->node_name, icb->node_name, WWN_SIZE);
  7014. memcpy(vha->port_name, icb->port_name, WWN_SIZE);
  7015. icb->execution_throttle = cpu_to_le16(0xFFFF);
  7016. ha->retry_count = le16_to_cpu(nv->login_retry_count);
  7017. /* Set minimum login_timeout to 4 seconds. */
  7018. if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
  7019. nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
  7020. if (le16_to_cpu(nv->login_timeout) < 4)
  7021. nv->login_timeout = cpu_to_le16(4);
  7022. ha->login_timeout = le16_to_cpu(nv->login_timeout);
  7023. /* Set minimum RATOV to 100 tenths of a second. */
  7024. ha->r_a_tov = 100;
  7025. ha->loop_reset_delay = nv->reset_delay;
  7026. /* Link Down Timeout = 0:
  7027. *
  7028. * When Port Down timer expires we will start returning
  7029. * I/O's to OS with "DID_NO_CONNECT".
  7030. *
  7031. * Link Down Timeout != 0:
  7032. *
  7033. * The driver waits for the link to come up after link down
  7034. * before returning I/Os to OS with "DID_NO_CONNECT".
  7035. */
  7036. if (le16_to_cpu(nv->link_down_timeout) == 0) {
  7037. ha->loop_down_abort_time =
  7038. (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
  7039. } else {
  7040. ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
  7041. ha->loop_down_abort_time =
  7042. (LOOP_DOWN_TIME - ha->link_down_timeout);
  7043. }
  7044. /* Need enough time to try and get the port back. */
  7045. ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
  7046. if (qlport_down_retry)
  7047. ha->port_down_retry_count = qlport_down_retry;
  7048. /* Set login_retry_count */
  7049. ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
  7050. if (ha->port_down_retry_count ==
  7051. le16_to_cpu(nv->port_down_retry_count) &&
  7052. ha->port_down_retry_count > 3)
  7053. ha->login_retry_count = ha->port_down_retry_count;
  7054. else if (ha->port_down_retry_count > (int)ha->login_retry_count)
  7055. ha->login_retry_count = ha->port_down_retry_count;
  7056. if (ql2xloginretrycount)
  7057. ha->login_retry_count = ql2xloginretrycount;
  7058. /* if not running MSI-X we need handshaking on interrupts */
  7059. if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
  7060. icb->firmware_options_2 |= cpu_to_le32(BIT_22);
  7061. /* Enable ZIO. */
  7062. if (!vha->flags.init_done) {
  7063. ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
  7064. (BIT_3 | BIT_2 | BIT_1 | BIT_0);
  7065. ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
  7066. le16_to_cpu(icb->interrupt_delay_timer): 2;
  7067. }
  7068. icb->firmware_options_2 &= cpu_to_le32(
  7069. ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
  7070. vha->flags.process_response_queue = 0;
  7071. if (ha->zio_mode != QLA_ZIO_DISABLED) {
  7072. ha->zio_mode = QLA_ZIO_MODE_6;
  7073. ql_log(ql_log_info, vha, 0x0075,
  7074. "ZIO mode %d enabled; timer delay (%d us).\n",
  7075. ha->zio_mode,
  7076. ha->zio_timer * 100);
  7077. icb->firmware_options_2 |= cpu_to_le32(
  7078. (uint32_t)ha->zio_mode);
  7079. icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
  7080. vha->flags.process_response_queue = 1;
  7081. }
  7082. /* enable RIDA Format2 */
  7083. icb->firmware_options_3 |= BIT_0;
  7084. /* N2N: driver will initiate Login instead of FW */
  7085. icb->firmware_options_3 |= BIT_8;
  7086. if (IS_QLA27XX(ha)) {
  7087. icb->firmware_options_3 |= BIT_8;
  7088. ql_dbg(ql_log_info, vha, 0x0075,
  7089. "Enabling direct connection.\n");
  7090. }
  7091. if (rval) {
  7092. ql_log(ql_log_warn, vha, 0x0076,
  7093. "NVRAM configuration failed.\n");
  7094. }
  7095. return (rval);
  7096. }
  7097. int
  7098. qla82xx_restart_isp(scsi_qla_host_t *vha)
  7099. {
  7100. int status, rval;
  7101. struct qla_hw_data *ha = vha->hw;
  7102. struct req_que *req = ha->req_q_map[0];
  7103. struct rsp_que *rsp = ha->rsp_q_map[0];
  7104. struct scsi_qla_host *vp;
  7105. unsigned long flags;
  7106. status = qla2x00_init_rings(vha);
  7107. if (!status) {
  7108. clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  7109. ha->flags.chip_reset_done = 1;
  7110. status = qla2x00_fw_ready(vha);
  7111. if (!status) {
  7112. /* Issue a marker after FW becomes ready. */
  7113. qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
  7114. vha->flags.online = 1;
  7115. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  7116. }
  7117. /* if no cable then assume it's good */
  7118. if ((vha->device_flags & DFLG_NO_CABLE))
  7119. status = 0;
  7120. }
  7121. if (!status) {
  7122. clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  7123. if (!atomic_read(&vha->loop_down_timer)) {
  7124. /*
  7125. * Issue marker command only when we are going
  7126. * to start the I/O .
  7127. */
  7128. vha->marker_needed = 1;
  7129. }
  7130. ha->isp_ops->enable_intrs(ha);
  7131. ha->isp_abort_cnt = 0;
  7132. clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
  7133. /* Update the firmware version */
  7134. status = qla82xx_check_md_needed(vha);
  7135. if (ha->fce) {
  7136. ha->flags.fce_enabled = 1;
  7137. memset(ha->fce, 0,
  7138. fce_calc_size(ha->fce_bufs));
  7139. rval = qla2x00_enable_fce_trace(vha,
  7140. ha->fce_dma, ha->fce_bufs, ha->fce_mb,
  7141. &ha->fce_bufs);
  7142. if (rval) {
  7143. ql_log(ql_log_warn, vha, 0x8001,
  7144. "Unable to reinitialize FCE (%d).\n",
  7145. rval);
  7146. ha->flags.fce_enabled = 0;
  7147. }
  7148. }
  7149. if (ha->eft) {
  7150. memset(ha->eft, 0, EFT_SIZE);
  7151. rval = qla2x00_enable_eft_trace(vha,
  7152. ha->eft_dma, EFT_NUM_BUFFERS);
  7153. if (rval) {
  7154. ql_log(ql_log_warn, vha, 0x8010,
  7155. "Unable to reinitialize EFT (%d).\n",
  7156. rval);
  7157. }
  7158. }
  7159. }
  7160. if (!status) {
  7161. ql_dbg(ql_dbg_taskm, vha, 0x8011,
  7162. "qla82xx_restart_isp succeeded.\n");
  7163. spin_lock_irqsave(&ha->vport_slock, flags);
  7164. list_for_each_entry(vp, &ha->vp_list, list) {
  7165. if (vp->vp_idx) {
  7166. atomic_inc(&vp->vref_count);
  7167. spin_unlock_irqrestore(&ha->vport_slock, flags);
  7168. qla2x00_vp_abort_isp(vp);
  7169. spin_lock_irqsave(&ha->vport_slock, flags);
  7170. atomic_dec(&vp->vref_count);
  7171. }
  7172. }
  7173. spin_unlock_irqrestore(&ha->vport_slock, flags);
  7174. } else {
  7175. ql_log(ql_log_warn, vha, 0x8016,
  7176. "qla82xx_restart_isp **** FAILED ****.\n");
  7177. }
  7178. return status;
  7179. }
  7180. void
  7181. qla81xx_update_fw_options(scsi_qla_host_t *vha)
  7182. {
  7183. struct qla_hw_data *ha = vha->hw;
  7184. /* Hold status IOCBs until ABTS response received. */
  7185. if (ql2xfwholdabts)
  7186. ha->fw_options[3] |= BIT_12;
  7187. /* Set Retry FLOGI in case of P2P connection */
  7188. if (ha->operating_mode == P2P) {
  7189. ha->fw_options[2] |= BIT_3;
  7190. ql_dbg(ql_dbg_disc, vha, 0x2103,
  7191. "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
  7192. __func__, ha->fw_options[2]);
  7193. }
  7194. /* Move PUREX, ABTS RX & RIDA to ATIOQ */
  7195. if (ql2xmvasynctoatio) {
  7196. if (qla_tgt_mode_enabled(vha) ||
  7197. qla_dual_mode_enabled(vha))
  7198. ha->fw_options[2] |= BIT_11;
  7199. else
  7200. ha->fw_options[2] &= ~BIT_11;
  7201. }
  7202. if (qla_tgt_mode_enabled(vha) ||
  7203. qla_dual_mode_enabled(vha)) {
  7204. /* FW auto send SCSI status during */
  7205. ha->fw_options[1] |= BIT_8;
  7206. ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
  7207. /* FW perform Exchange validation */
  7208. ha->fw_options[2] |= BIT_4;
  7209. } else {
  7210. ha->fw_options[1] &= ~BIT_8;
  7211. ha->fw_options[10] &= 0x00ff;
  7212. ha->fw_options[2] &= ~BIT_4;
  7213. }
  7214. if (ql2xetsenable) {
  7215. /* Enable ETS Burst. */
  7216. memset(ha->fw_options, 0, sizeof(ha->fw_options));
  7217. ha->fw_options[2] |= BIT_9;
  7218. }
  7219. ql_dbg(ql_dbg_init, vha, 0x00e9,
  7220. "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
  7221. __func__, ha->fw_options[1], ha->fw_options[2],
  7222. ha->fw_options[3], vha->host->active_mode);
  7223. qla2x00_set_fw_options(vha, ha->fw_options);
  7224. }
  7225. /*
  7226. * qla24xx_get_fcp_prio
  7227. * Gets the fcp cmd priority value for the logged in port.
  7228. * Looks for a match of the port descriptors within
  7229. * each of the fcp prio config entries. If a match is found,
  7230. * the tag (priority) value is returned.
  7231. *
  7232. * Input:
  7233. * vha = scsi host structure pointer.
  7234. * fcport = port structure pointer.
  7235. *
  7236. * Return:
  7237. * non-zero (if found)
  7238. * -1 (if not found)
  7239. *
  7240. * Context:
  7241. * Kernel context
  7242. */
  7243. static int
  7244. qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
  7245. {
  7246. int i, entries;
  7247. uint8_t pid_match, wwn_match;
  7248. int priority;
  7249. uint32_t pid1, pid2;
  7250. uint64_t wwn1, wwn2;
  7251. struct qla_fcp_prio_entry *pri_entry;
  7252. struct qla_hw_data *ha = vha->hw;
  7253. if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
  7254. return -1;
  7255. priority = -1;
  7256. entries = ha->fcp_prio_cfg->num_entries;
  7257. pri_entry = &ha->fcp_prio_cfg->entry[0];
  7258. for (i = 0; i < entries; i++) {
  7259. pid_match = wwn_match = 0;
  7260. if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
  7261. pri_entry++;
  7262. continue;
  7263. }
  7264. /* check source pid for a match */
  7265. if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
  7266. pid1 = pri_entry->src_pid & INVALID_PORT_ID;
  7267. pid2 = vha->d_id.b24 & INVALID_PORT_ID;
  7268. if (pid1 == INVALID_PORT_ID)
  7269. pid_match++;
  7270. else if (pid1 == pid2)
  7271. pid_match++;
  7272. }
  7273. /* check destination pid for a match */
  7274. if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
  7275. pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
  7276. pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
  7277. if (pid1 == INVALID_PORT_ID)
  7278. pid_match++;
  7279. else if (pid1 == pid2)
  7280. pid_match++;
  7281. }
  7282. /* check source WWN for a match */
  7283. if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
  7284. wwn1 = wwn_to_u64(vha->port_name);
  7285. wwn2 = wwn_to_u64(pri_entry->src_wwpn);
  7286. if (wwn2 == (uint64_t)-1)
  7287. wwn_match++;
  7288. else if (wwn1 == wwn2)
  7289. wwn_match++;
  7290. }
  7291. /* check destination WWN for a match */
  7292. if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
  7293. wwn1 = wwn_to_u64(fcport->port_name);
  7294. wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
  7295. if (wwn2 == (uint64_t)-1)
  7296. wwn_match++;
  7297. else if (wwn1 == wwn2)
  7298. wwn_match++;
  7299. }
  7300. if (pid_match == 2 || wwn_match == 2) {
  7301. /* Found a matching entry */
  7302. if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
  7303. priority = pri_entry->tag;
  7304. break;
  7305. }
  7306. pri_entry++;
  7307. }
  7308. return priority;
  7309. }
  7310. /*
  7311. * qla24xx_update_fcport_fcp_prio
  7312. * Activates fcp priority for the logged in fc port
  7313. *
  7314. * Input:
  7315. * vha = scsi host structure pointer.
  7316. * fcp = port structure pointer.
  7317. *
  7318. * Return:
  7319. * QLA_SUCCESS or QLA_FUNCTION_FAILED
  7320. *
  7321. * Context:
  7322. * Kernel context.
  7323. */
  7324. int
  7325. qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
  7326. {
  7327. int ret;
  7328. int priority;
  7329. uint16_t mb[5];
  7330. if (fcport->port_type != FCT_TARGET ||
  7331. fcport->loop_id == FC_NO_LOOP_ID)
  7332. return QLA_FUNCTION_FAILED;
  7333. priority = qla24xx_get_fcp_prio(vha, fcport);
  7334. if (priority < 0)
  7335. return QLA_FUNCTION_FAILED;
  7336. if (IS_P3P_TYPE(vha->hw)) {
  7337. fcport->fcp_prio = priority & 0xf;
  7338. return QLA_SUCCESS;
  7339. }
  7340. ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
  7341. if (ret == QLA_SUCCESS) {
  7342. if (fcport->fcp_prio != priority)
  7343. ql_dbg(ql_dbg_user, vha, 0x709e,
  7344. "Updated FCP_CMND priority - value=%d loop_id=%d "
  7345. "port_id=%02x%02x%02x.\n", priority,
  7346. fcport->loop_id, fcport->d_id.b.domain,
  7347. fcport->d_id.b.area, fcport->d_id.b.al_pa);
  7348. fcport->fcp_prio = priority & 0xf;
  7349. } else
  7350. ql_dbg(ql_dbg_user, vha, 0x704f,
  7351. "Unable to update FCP_CMND priority - ret=0x%x for "
  7352. "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
  7353. fcport->d_id.b.domain, fcport->d_id.b.area,
  7354. fcport->d_id.b.al_pa);
  7355. return ret;
  7356. }
  7357. /*
  7358. * qla24xx_update_all_fcp_prio
  7359. * Activates fcp priority for all the logged in ports
  7360. *
  7361. * Input:
  7362. * ha = adapter block pointer.
  7363. *
  7364. * Return:
  7365. * QLA_SUCCESS or QLA_FUNCTION_FAILED
  7366. *
  7367. * Context:
  7368. * Kernel context.
  7369. */
  7370. int
  7371. qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
  7372. {
  7373. int ret;
  7374. fc_port_t *fcport;
  7375. ret = QLA_FUNCTION_FAILED;
  7376. /* We need to set priority for all logged in ports */
  7377. list_for_each_entry(fcport, &vha->vp_fcports, list)
  7378. ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
  7379. return ret;
  7380. }
  7381. struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
  7382. int vp_idx, bool startqp)
  7383. {
  7384. int rsp_id = 0;
  7385. int req_id = 0;
  7386. int i;
  7387. struct qla_hw_data *ha = vha->hw;
  7388. uint16_t qpair_id = 0;
  7389. struct qla_qpair *qpair = NULL;
  7390. struct qla_msix_entry *msix;
  7391. if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
  7392. ql_log(ql_log_warn, vha, 0x00181,
  7393. "FW/Driver is not multi-queue capable.\n");
  7394. return NULL;
  7395. }
  7396. if (ql2xmqsupport || ql2xnvmeenable) {
  7397. qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
  7398. if (qpair == NULL) {
  7399. ql_log(ql_log_warn, vha, 0x0182,
  7400. "Failed to allocate memory for queue pair.\n");
  7401. return NULL;
  7402. }
  7403. memset(qpair, 0, sizeof(struct qla_qpair));
  7404. qpair->hw = vha->hw;
  7405. qpair->vha = vha;
  7406. qpair->qp_lock_ptr = &qpair->qp_lock;
  7407. spin_lock_init(&qpair->qp_lock);
  7408. qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
  7409. /* Assign available que pair id */
  7410. mutex_lock(&ha->mq_lock);
  7411. qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
  7412. if (ha->num_qpairs >= ha->max_qpairs) {
  7413. mutex_unlock(&ha->mq_lock);
  7414. ql_log(ql_log_warn, vha, 0x0183,
  7415. "No resources to create additional q pair.\n");
  7416. goto fail_qid_map;
  7417. }
  7418. ha->num_qpairs++;
  7419. set_bit(qpair_id, ha->qpair_qid_map);
  7420. ha->queue_pair_map[qpair_id] = qpair;
  7421. qpair->id = qpair_id;
  7422. qpair->vp_idx = vp_idx;
  7423. qpair->fw_started = ha->flags.fw_started;
  7424. INIT_LIST_HEAD(&qpair->hints_list);
  7425. qpair->chip_reset = ha->base_qpair->chip_reset;
  7426. qpair->enable_class_2 = ha->base_qpair->enable_class_2;
  7427. qpair->enable_explicit_conf =
  7428. ha->base_qpair->enable_explicit_conf;
  7429. for (i = 0; i < ha->msix_count; i++) {
  7430. msix = &ha->msix_entries[i];
  7431. if (msix->in_use)
  7432. continue;
  7433. qpair->msix = msix;
  7434. ql_dbg(ql_dbg_multiq, vha, 0xc00f,
  7435. "Vector %x selected for qpair\n", msix->vector);
  7436. break;
  7437. }
  7438. if (!qpair->msix) {
  7439. ql_log(ql_log_warn, vha, 0x0184,
  7440. "Out of MSI-X vectors!.\n");
  7441. goto fail_msix;
  7442. }
  7443. qpair->msix->in_use = 1;
  7444. list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
  7445. qpair->pdev = ha->pdev;
  7446. if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
  7447. qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
  7448. mutex_unlock(&ha->mq_lock);
  7449. /* Create response queue first */
  7450. rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
  7451. if (!rsp_id) {
  7452. ql_log(ql_log_warn, vha, 0x0185,
  7453. "Failed to create response queue.\n");
  7454. goto fail_rsp;
  7455. }
  7456. qpair->rsp = ha->rsp_q_map[rsp_id];
  7457. /* Create request queue */
  7458. req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
  7459. startqp);
  7460. if (!req_id) {
  7461. ql_log(ql_log_warn, vha, 0x0186,
  7462. "Failed to create request queue.\n");
  7463. goto fail_req;
  7464. }
  7465. qpair->req = ha->req_q_map[req_id];
  7466. qpair->rsp->req = qpair->req;
  7467. qpair->rsp->qpair = qpair;
  7468. /* init qpair to this cpu. Will adjust at run time. */
  7469. qla_cpu_update(qpair, smp_processor_id());
  7470. if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
  7471. if (ha->fw_attributes & BIT_4)
  7472. qpair->difdix_supported = 1;
  7473. }
  7474. qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
  7475. if (!qpair->srb_mempool) {
  7476. ql_log(ql_log_warn, vha, 0xd036,
  7477. "Failed to create srb mempool for qpair %d\n",
  7478. qpair->id);
  7479. goto fail_mempool;
  7480. }
  7481. /* Mark as online */
  7482. qpair->online = 1;
  7483. if (!vha->flags.qpairs_available)
  7484. vha->flags.qpairs_available = 1;
  7485. ql_dbg(ql_dbg_multiq, vha, 0xc00d,
  7486. "Request/Response queue pair created, id %d\n",
  7487. qpair->id);
  7488. ql_dbg(ql_dbg_init, vha, 0x0187,
  7489. "Request/Response queue pair created, id %d\n",
  7490. qpair->id);
  7491. }
  7492. return qpair;
  7493. fail_mempool:
  7494. fail_req:
  7495. qla25xx_delete_rsp_que(vha, qpair->rsp);
  7496. fail_rsp:
  7497. mutex_lock(&ha->mq_lock);
  7498. qpair->msix->in_use = 0;
  7499. list_del(&qpair->qp_list_elem);
  7500. if (list_empty(&vha->qp_list))
  7501. vha->flags.qpairs_available = 0;
  7502. fail_msix:
  7503. ha->queue_pair_map[qpair_id] = NULL;
  7504. clear_bit(qpair_id, ha->qpair_qid_map);
  7505. ha->num_qpairs--;
  7506. mutex_unlock(&ha->mq_lock);
  7507. fail_qid_map:
  7508. kfree(qpair);
  7509. return NULL;
  7510. }
  7511. int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
  7512. {
  7513. int ret = QLA_FUNCTION_FAILED;
  7514. struct qla_hw_data *ha = qpair->hw;
  7515. qpair->delete_in_progress = 1;
  7516. ret = qla25xx_delete_req_que(vha, qpair->req);
  7517. if (ret != QLA_SUCCESS)
  7518. goto fail;
  7519. ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
  7520. if (ret != QLA_SUCCESS)
  7521. goto fail;
  7522. mutex_lock(&ha->mq_lock);
  7523. ha->queue_pair_map[qpair->id] = NULL;
  7524. clear_bit(qpair->id, ha->qpair_qid_map);
  7525. ha->num_qpairs--;
  7526. list_del(&qpair->qp_list_elem);
  7527. if (list_empty(&vha->qp_list)) {
  7528. vha->flags.qpairs_available = 0;
  7529. vha->flags.qpairs_req_created = 0;
  7530. vha->flags.qpairs_rsp_created = 0;
  7531. }
  7532. mempool_destroy(qpair->srb_mempool);
  7533. kfree(qpair);
  7534. mutex_unlock(&ha->mq_lock);
  7535. return QLA_SUCCESS;
  7536. fail:
  7537. return ret;
  7538. }