hpsa.c 271 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924
  1. /*
  2. * Disk Array driver for HP Smart Array SAS controllers
  3. * Copyright 2016 Microsemi Corporation
  4. * Copyright 2014-2015 PMC-Sierra, Inc.
  5. * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; version 2 of the License.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  14. * NON INFRINGEMENT. See the GNU General Public License for more details.
  15. *
  16. * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
  17. *
  18. */
  19. #include <linux/module.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/types.h>
  22. #include <linux/pci.h>
  23. #include <linux/pci-aspm.h>
  24. #include <linux/kernel.h>
  25. #include <linux/slab.h>
  26. #include <linux/delay.h>
  27. #include <linux/fs.h>
  28. #include <linux/timer.h>
  29. #include <linux/init.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/compat.h>
  32. #include <linux/blktrace_api.h>
  33. #include <linux/uaccess.h>
  34. #include <linux/io.h>
  35. #include <linux/dma-mapping.h>
  36. #include <linux/completion.h>
  37. #include <linux/moduleparam.h>
  38. #include <scsi/scsi.h>
  39. #include <scsi/scsi_cmnd.h>
  40. #include <scsi/scsi_device.h>
  41. #include <scsi/scsi_host.h>
  42. #include <scsi/scsi_tcq.h>
  43. #include <scsi/scsi_eh.h>
  44. #include <scsi/scsi_transport_sas.h>
  45. #include <scsi/scsi_dbg.h>
  46. #include <linux/cciss_ioctl.h>
  47. #include <linux/string.h>
  48. #include <linux/bitmap.h>
  49. #include <linux/atomic.h>
  50. #include <linux/jiffies.h>
  51. #include <linux/percpu-defs.h>
  52. #include <linux/percpu.h>
  53. #include <asm/unaligned.h>
  54. #include <asm/div64.h>
  55. #include "hpsa_cmd.h"
  56. #include "hpsa.h"
  57. /*
  58. * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
  59. * with an optional trailing '-' followed by a byte value (0-255).
  60. */
  61. #define HPSA_DRIVER_VERSION "3.4.20-125"
  62. #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
  63. #define HPSA "hpsa"
  64. /* How long to wait for CISS doorbell communication */
  65. #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
  66. #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
  67. #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
  68. #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
  69. #define MAX_IOCTL_CONFIG_WAIT 1000
  70. /*define how many times we will try a command because of bus resets */
  71. #define MAX_CMD_RETRIES 3
  72. /* Embedded module documentation macros - see modules.h */
  73. MODULE_AUTHOR("Hewlett-Packard Company");
  74. MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
  75. HPSA_DRIVER_VERSION);
  76. MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
  77. MODULE_VERSION(HPSA_DRIVER_VERSION);
  78. MODULE_LICENSE("GPL");
  79. MODULE_ALIAS("cciss");
  80. static int hpsa_simple_mode;
  81. module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
  82. MODULE_PARM_DESC(hpsa_simple_mode,
  83. "Use 'simple mode' rather than 'performant mode'");
  84. /* define the PCI info for the cards we can control */
  85. static const struct pci_device_id hpsa_pci_device_id[] = {
  86. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
  87. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
  88. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
  89. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
  90. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
  91. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
  92. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
  93. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
  94. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
  95. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
  96. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
  97. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
  98. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
  99. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
  100. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
  101. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
  102. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
  103. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
  104. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
  105. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
  106. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
  107. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
  108. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
  109. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
  110. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
  111. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
  112. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
  113. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
  114. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
  115. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
  116. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
  117. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
  118. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
  119. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
  120. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
  121. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
  122. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
  123. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
  124. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
  125. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
  126. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
  127. {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
  128. {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
  129. {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
  130. {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
  131. {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
  132. {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
  133. {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
  134. {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
  135. {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
  136. {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
  137. {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
  138. {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
  139. {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  140. PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
  141. {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  142. PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
  143. {0,}
  144. };
  145. MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
  146. /* board_id = Subsystem Device ID & Vendor ID
  147. * product = Marketing Name for the board
  148. * access = Address of the struct of function pointers
  149. */
  150. static struct board_type products[] = {
  151. {0x40700E11, "Smart Array 5300", &SA5A_access},
  152. {0x40800E11, "Smart Array 5i", &SA5B_access},
  153. {0x40820E11, "Smart Array 532", &SA5B_access},
  154. {0x40830E11, "Smart Array 5312", &SA5B_access},
  155. {0x409A0E11, "Smart Array 641", &SA5A_access},
  156. {0x409B0E11, "Smart Array 642", &SA5A_access},
  157. {0x409C0E11, "Smart Array 6400", &SA5A_access},
  158. {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
  159. {0x40910E11, "Smart Array 6i", &SA5A_access},
  160. {0x3225103C, "Smart Array P600", &SA5A_access},
  161. {0x3223103C, "Smart Array P800", &SA5A_access},
  162. {0x3234103C, "Smart Array P400", &SA5A_access},
  163. {0x3235103C, "Smart Array P400i", &SA5A_access},
  164. {0x3211103C, "Smart Array E200i", &SA5A_access},
  165. {0x3212103C, "Smart Array E200", &SA5A_access},
  166. {0x3213103C, "Smart Array E200i", &SA5A_access},
  167. {0x3214103C, "Smart Array E200i", &SA5A_access},
  168. {0x3215103C, "Smart Array E200i", &SA5A_access},
  169. {0x3237103C, "Smart Array E500", &SA5A_access},
  170. {0x323D103C, "Smart Array P700m", &SA5A_access},
  171. {0x3241103C, "Smart Array P212", &SA5_access},
  172. {0x3243103C, "Smart Array P410", &SA5_access},
  173. {0x3245103C, "Smart Array P410i", &SA5_access},
  174. {0x3247103C, "Smart Array P411", &SA5_access},
  175. {0x3249103C, "Smart Array P812", &SA5_access},
  176. {0x324A103C, "Smart Array P712m", &SA5_access},
  177. {0x324B103C, "Smart Array P711m", &SA5_access},
  178. {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
  179. {0x3350103C, "Smart Array P222", &SA5_access},
  180. {0x3351103C, "Smart Array P420", &SA5_access},
  181. {0x3352103C, "Smart Array P421", &SA5_access},
  182. {0x3353103C, "Smart Array P822", &SA5_access},
  183. {0x3354103C, "Smart Array P420i", &SA5_access},
  184. {0x3355103C, "Smart Array P220i", &SA5_access},
  185. {0x3356103C, "Smart Array P721m", &SA5_access},
  186. {0x1920103C, "Smart Array P430i", &SA5_access},
  187. {0x1921103C, "Smart Array P830i", &SA5_access},
  188. {0x1922103C, "Smart Array P430", &SA5_access},
  189. {0x1923103C, "Smart Array P431", &SA5_access},
  190. {0x1924103C, "Smart Array P830", &SA5_access},
  191. {0x1925103C, "Smart Array P831", &SA5_access},
  192. {0x1926103C, "Smart Array P731m", &SA5_access},
  193. {0x1928103C, "Smart Array P230i", &SA5_access},
  194. {0x1929103C, "Smart Array P530", &SA5_access},
  195. {0x21BD103C, "Smart Array P244br", &SA5_access},
  196. {0x21BE103C, "Smart Array P741m", &SA5_access},
  197. {0x21BF103C, "Smart HBA H240ar", &SA5_access},
  198. {0x21C0103C, "Smart Array P440ar", &SA5_access},
  199. {0x21C1103C, "Smart Array P840ar", &SA5_access},
  200. {0x21C2103C, "Smart Array P440", &SA5_access},
  201. {0x21C3103C, "Smart Array P441", &SA5_access},
  202. {0x21C4103C, "Smart Array", &SA5_access},
  203. {0x21C5103C, "Smart Array P841", &SA5_access},
  204. {0x21C6103C, "Smart HBA H244br", &SA5_access},
  205. {0x21C7103C, "Smart HBA H240", &SA5_access},
  206. {0x21C8103C, "Smart HBA H241", &SA5_access},
  207. {0x21C9103C, "Smart Array", &SA5_access},
  208. {0x21CA103C, "Smart Array P246br", &SA5_access},
  209. {0x21CB103C, "Smart Array P840", &SA5_access},
  210. {0x21CC103C, "Smart Array", &SA5_access},
  211. {0x21CD103C, "Smart Array", &SA5_access},
  212. {0x21CE103C, "Smart HBA", &SA5_access},
  213. {0x05809005, "SmartHBA-SA", &SA5_access},
  214. {0x05819005, "SmartHBA-SA 8i", &SA5_access},
  215. {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
  216. {0x05839005, "SmartHBA-SA 8e", &SA5_access},
  217. {0x05849005, "SmartHBA-SA 16i", &SA5_access},
  218. {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
  219. {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
  220. {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
  221. {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
  222. {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
  223. {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
  224. {0xFFFF103C, "Unknown Smart Array", &SA5_access},
  225. };
  226. static struct scsi_transport_template *hpsa_sas_transport_template;
  227. static int hpsa_add_sas_host(struct ctlr_info *h);
  228. static void hpsa_delete_sas_host(struct ctlr_info *h);
  229. static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
  230. struct hpsa_scsi_dev_t *device);
  231. static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
  232. static struct hpsa_scsi_dev_t
  233. *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
  234. struct sas_rphy *rphy);
  235. #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
  236. static const struct scsi_cmnd hpsa_cmd_busy;
  237. #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
  238. static const struct scsi_cmnd hpsa_cmd_idle;
  239. static int number_of_controllers;
  240. static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
  241. static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
  242. static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
  243. #ifdef CONFIG_COMPAT
  244. static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
  245. void __user *arg);
  246. #endif
  247. static void cmd_free(struct ctlr_info *h, struct CommandList *c);
  248. static struct CommandList *cmd_alloc(struct ctlr_info *h);
  249. static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
  250. static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
  251. struct scsi_cmnd *scmd);
  252. static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
  253. void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
  254. int cmd_type);
  255. static void hpsa_free_cmd_pool(struct ctlr_info *h);
  256. #define VPD_PAGE (1 << 8)
  257. #define HPSA_SIMPLE_ERROR_BITS 0x03
  258. static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
  259. static void hpsa_scan_start(struct Scsi_Host *);
  260. static int hpsa_scan_finished(struct Scsi_Host *sh,
  261. unsigned long elapsed_time);
  262. static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
  263. static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
  264. static int hpsa_slave_alloc(struct scsi_device *sdev);
  265. static int hpsa_slave_configure(struct scsi_device *sdev);
  266. static void hpsa_slave_destroy(struct scsi_device *sdev);
  267. static void hpsa_update_scsi_devices(struct ctlr_info *h);
  268. static int check_for_unit_attention(struct ctlr_info *h,
  269. struct CommandList *c);
  270. static void check_ioctl_unit_attention(struct ctlr_info *h,
  271. struct CommandList *c);
  272. /* performant mode helper functions */
  273. static void calc_bucket_map(int *bucket, int num_buckets,
  274. int nsgs, int min_blocks, u32 *bucket_map);
  275. static void hpsa_free_performant_mode(struct ctlr_info *h);
  276. static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
  277. static inline u32 next_command(struct ctlr_info *h, u8 q);
  278. static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
  279. u32 *cfg_base_addr, u64 *cfg_base_addr_index,
  280. u64 *cfg_offset);
  281. static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
  282. unsigned long *memory_bar);
  283. static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
  284. bool *legacy_board);
  285. static int wait_for_device_to_become_ready(struct ctlr_info *h,
  286. unsigned char lunaddr[],
  287. int reply_queue);
  288. static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
  289. int wait_for_ready);
  290. static inline void finish_cmd(struct CommandList *c);
  291. static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
  292. #define BOARD_NOT_READY 0
  293. #define BOARD_READY 1
  294. static void hpsa_drain_accel_commands(struct ctlr_info *h);
  295. static void hpsa_flush_cache(struct ctlr_info *h);
  296. static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
  297. struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
  298. u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
  299. static void hpsa_command_resubmit_worker(struct work_struct *work);
  300. static u32 lockup_detected(struct ctlr_info *h);
  301. static int detect_controller_lockup(struct ctlr_info *h);
  302. static void hpsa_disable_rld_caching(struct ctlr_info *h);
  303. static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
  304. struct ReportExtendedLUNdata *buf, int bufsize);
  305. static bool hpsa_vpd_page_supported(struct ctlr_info *h,
  306. unsigned char scsi3addr[], u8 page);
  307. static int hpsa_luns_changed(struct ctlr_info *h);
  308. static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
  309. struct hpsa_scsi_dev_t *dev,
  310. unsigned char *scsi3addr);
  311. static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
  312. {
  313. unsigned long *priv = shost_priv(sdev->host);
  314. return (struct ctlr_info *) *priv;
  315. }
  316. static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
  317. {
  318. unsigned long *priv = shost_priv(sh);
  319. return (struct ctlr_info *) *priv;
  320. }
  321. static inline bool hpsa_is_cmd_idle(struct CommandList *c)
  322. {
  323. return c->scsi_cmd == SCSI_CMD_IDLE;
  324. }
  325. static inline bool hpsa_is_pending_event(struct CommandList *c)
  326. {
  327. return c->reset_pending;
  328. }
  329. /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
  330. static void decode_sense_data(const u8 *sense_data, int sense_data_len,
  331. u8 *sense_key, u8 *asc, u8 *ascq)
  332. {
  333. struct scsi_sense_hdr sshdr;
  334. bool rc;
  335. *sense_key = -1;
  336. *asc = -1;
  337. *ascq = -1;
  338. if (sense_data_len < 1)
  339. return;
  340. rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
  341. if (rc) {
  342. *sense_key = sshdr.sense_key;
  343. *asc = sshdr.asc;
  344. *ascq = sshdr.ascq;
  345. }
  346. }
  347. static int check_for_unit_attention(struct ctlr_info *h,
  348. struct CommandList *c)
  349. {
  350. u8 sense_key, asc, ascq;
  351. int sense_len;
  352. if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
  353. sense_len = sizeof(c->err_info->SenseInfo);
  354. else
  355. sense_len = c->err_info->SenseLen;
  356. decode_sense_data(c->err_info->SenseInfo, sense_len,
  357. &sense_key, &asc, &ascq);
  358. if (sense_key != UNIT_ATTENTION || asc == 0xff)
  359. return 0;
  360. switch (asc) {
  361. case STATE_CHANGED:
  362. dev_warn(&h->pdev->dev,
  363. "%s: a state change detected, command retried\n",
  364. h->devname);
  365. break;
  366. case LUN_FAILED:
  367. dev_warn(&h->pdev->dev,
  368. "%s: LUN failure detected\n", h->devname);
  369. break;
  370. case REPORT_LUNS_CHANGED:
  371. dev_warn(&h->pdev->dev,
  372. "%s: report LUN data changed\n", h->devname);
  373. /*
  374. * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
  375. * target (array) devices.
  376. */
  377. break;
  378. case POWER_OR_RESET:
  379. dev_warn(&h->pdev->dev,
  380. "%s: a power on or device reset detected\n",
  381. h->devname);
  382. break;
  383. case UNIT_ATTENTION_CLEARED:
  384. dev_warn(&h->pdev->dev,
  385. "%s: unit attention cleared by another initiator\n",
  386. h->devname);
  387. break;
  388. default:
  389. dev_warn(&h->pdev->dev,
  390. "%s: unknown unit attention detected\n",
  391. h->devname);
  392. break;
  393. }
  394. return 1;
  395. }
  396. static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
  397. {
  398. if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
  399. (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
  400. c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
  401. return 0;
  402. dev_warn(&h->pdev->dev, HPSA "device busy");
  403. return 1;
  404. }
  405. static u32 lockup_detected(struct ctlr_info *h);
  406. static ssize_t host_show_lockup_detected(struct device *dev,
  407. struct device_attribute *attr, char *buf)
  408. {
  409. int ld;
  410. struct ctlr_info *h;
  411. struct Scsi_Host *shost = class_to_shost(dev);
  412. h = shost_to_hba(shost);
  413. ld = lockup_detected(h);
  414. return sprintf(buf, "ld=%d\n", ld);
  415. }
  416. static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
  417. struct device_attribute *attr,
  418. const char *buf, size_t count)
  419. {
  420. int status, len;
  421. struct ctlr_info *h;
  422. struct Scsi_Host *shost = class_to_shost(dev);
  423. char tmpbuf[10];
  424. if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  425. return -EACCES;
  426. len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
  427. strncpy(tmpbuf, buf, len);
  428. tmpbuf[len] = '\0';
  429. if (sscanf(tmpbuf, "%d", &status) != 1)
  430. return -EINVAL;
  431. h = shost_to_hba(shost);
  432. h->acciopath_status = !!status;
  433. dev_warn(&h->pdev->dev,
  434. "hpsa: HP SSD Smart Path %s via sysfs update.\n",
  435. h->acciopath_status ? "enabled" : "disabled");
  436. return count;
  437. }
  438. static ssize_t host_store_raid_offload_debug(struct device *dev,
  439. struct device_attribute *attr,
  440. const char *buf, size_t count)
  441. {
  442. int debug_level, len;
  443. struct ctlr_info *h;
  444. struct Scsi_Host *shost = class_to_shost(dev);
  445. char tmpbuf[10];
  446. if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  447. return -EACCES;
  448. len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
  449. strncpy(tmpbuf, buf, len);
  450. tmpbuf[len] = '\0';
  451. if (sscanf(tmpbuf, "%d", &debug_level) != 1)
  452. return -EINVAL;
  453. if (debug_level < 0)
  454. debug_level = 0;
  455. h = shost_to_hba(shost);
  456. h->raid_offload_debug = debug_level;
  457. dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
  458. h->raid_offload_debug);
  459. return count;
  460. }
  461. static ssize_t host_store_rescan(struct device *dev,
  462. struct device_attribute *attr,
  463. const char *buf, size_t count)
  464. {
  465. struct ctlr_info *h;
  466. struct Scsi_Host *shost = class_to_shost(dev);
  467. h = shost_to_hba(shost);
  468. hpsa_scan_start(h->scsi_host);
  469. return count;
  470. }
  471. static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
  472. {
  473. device->offload_enabled = 0;
  474. device->offload_to_be_enabled = 0;
  475. }
  476. static ssize_t host_show_firmware_revision(struct device *dev,
  477. struct device_attribute *attr, char *buf)
  478. {
  479. struct ctlr_info *h;
  480. struct Scsi_Host *shost = class_to_shost(dev);
  481. unsigned char *fwrev;
  482. h = shost_to_hba(shost);
  483. if (!h->hba_inquiry_data)
  484. return 0;
  485. fwrev = &h->hba_inquiry_data[32];
  486. return snprintf(buf, 20, "%c%c%c%c\n",
  487. fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
  488. }
  489. static ssize_t host_show_commands_outstanding(struct device *dev,
  490. struct device_attribute *attr, char *buf)
  491. {
  492. struct Scsi_Host *shost = class_to_shost(dev);
  493. struct ctlr_info *h = shost_to_hba(shost);
  494. return snprintf(buf, 20, "%d\n",
  495. atomic_read(&h->commands_outstanding));
  496. }
  497. static ssize_t host_show_transport_mode(struct device *dev,
  498. struct device_attribute *attr, char *buf)
  499. {
  500. struct ctlr_info *h;
  501. struct Scsi_Host *shost = class_to_shost(dev);
  502. h = shost_to_hba(shost);
  503. return snprintf(buf, 20, "%s\n",
  504. h->transMethod & CFGTBL_Trans_Performant ?
  505. "performant" : "simple");
  506. }
  507. static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
  508. struct device_attribute *attr, char *buf)
  509. {
  510. struct ctlr_info *h;
  511. struct Scsi_Host *shost = class_to_shost(dev);
  512. h = shost_to_hba(shost);
  513. return snprintf(buf, 30, "HP SSD Smart Path %s\n",
  514. (h->acciopath_status == 1) ? "enabled" : "disabled");
  515. }
  516. /* List of controllers which cannot be hard reset on kexec with reset_devices */
  517. static u32 unresettable_controller[] = {
  518. 0x324a103C, /* Smart Array P712m */
  519. 0x324b103C, /* Smart Array P711m */
  520. 0x3223103C, /* Smart Array P800 */
  521. 0x3234103C, /* Smart Array P400 */
  522. 0x3235103C, /* Smart Array P400i */
  523. 0x3211103C, /* Smart Array E200i */
  524. 0x3212103C, /* Smart Array E200 */
  525. 0x3213103C, /* Smart Array E200i */
  526. 0x3214103C, /* Smart Array E200i */
  527. 0x3215103C, /* Smart Array E200i */
  528. 0x3237103C, /* Smart Array E500 */
  529. 0x323D103C, /* Smart Array P700m */
  530. 0x40800E11, /* Smart Array 5i */
  531. 0x409C0E11, /* Smart Array 6400 */
  532. 0x409D0E11, /* Smart Array 6400 EM */
  533. 0x40700E11, /* Smart Array 5300 */
  534. 0x40820E11, /* Smart Array 532 */
  535. 0x40830E11, /* Smart Array 5312 */
  536. 0x409A0E11, /* Smart Array 641 */
  537. 0x409B0E11, /* Smart Array 642 */
  538. 0x40910E11, /* Smart Array 6i */
  539. };
  540. /* List of controllers which cannot even be soft reset */
  541. static u32 soft_unresettable_controller[] = {
  542. 0x40800E11, /* Smart Array 5i */
  543. 0x40700E11, /* Smart Array 5300 */
  544. 0x40820E11, /* Smart Array 532 */
  545. 0x40830E11, /* Smart Array 5312 */
  546. 0x409A0E11, /* Smart Array 641 */
  547. 0x409B0E11, /* Smart Array 642 */
  548. 0x40910E11, /* Smart Array 6i */
  549. /* Exclude 640x boards. These are two pci devices in one slot
  550. * which share a battery backed cache module. One controls the
  551. * cache, the other accesses the cache through the one that controls
  552. * it. If we reset the one controlling the cache, the other will
  553. * likely not be happy. Just forbid resetting this conjoined mess.
  554. * The 640x isn't really supported by hpsa anyway.
  555. */
  556. 0x409C0E11, /* Smart Array 6400 */
  557. 0x409D0E11, /* Smart Array 6400 EM */
  558. };
  559. static int board_id_in_array(u32 a[], int nelems, u32 board_id)
  560. {
  561. int i;
  562. for (i = 0; i < nelems; i++)
  563. if (a[i] == board_id)
  564. return 1;
  565. return 0;
  566. }
  567. static int ctlr_is_hard_resettable(u32 board_id)
  568. {
  569. return !board_id_in_array(unresettable_controller,
  570. ARRAY_SIZE(unresettable_controller), board_id);
  571. }
  572. static int ctlr_is_soft_resettable(u32 board_id)
  573. {
  574. return !board_id_in_array(soft_unresettable_controller,
  575. ARRAY_SIZE(soft_unresettable_controller), board_id);
  576. }
  577. static int ctlr_is_resettable(u32 board_id)
  578. {
  579. return ctlr_is_hard_resettable(board_id) ||
  580. ctlr_is_soft_resettable(board_id);
  581. }
  582. static ssize_t host_show_resettable(struct device *dev,
  583. struct device_attribute *attr, char *buf)
  584. {
  585. struct ctlr_info *h;
  586. struct Scsi_Host *shost = class_to_shost(dev);
  587. h = shost_to_hba(shost);
  588. return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
  589. }
  590. static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
  591. {
  592. return (scsi3addr[3] & 0xC0) == 0x40;
  593. }
  594. static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
  595. "1(+0)ADM", "UNKNOWN", "PHYS DRV"
  596. };
  597. #define HPSA_RAID_0 0
  598. #define HPSA_RAID_4 1
  599. #define HPSA_RAID_1 2 /* also used for RAID 10 */
  600. #define HPSA_RAID_5 3 /* also used for RAID 50 */
  601. #define HPSA_RAID_51 4
  602. #define HPSA_RAID_6 5 /* also used for RAID 60 */
  603. #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
  604. #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
  605. #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
  606. static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
  607. {
  608. return !device->physical_device;
  609. }
  610. static ssize_t raid_level_show(struct device *dev,
  611. struct device_attribute *attr, char *buf)
  612. {
  613. ssize_t l = 0;
  614. unsigned char rlevel;
  615. struct ctlr_info *h;
  616. struct scsi_device *sdev;
  617. struct hpsa_scsi_dev_t *hdev;
  618. unsigned long flags;
  619. sdev = to_scsi_device(dev);
  620. h = sdev_to_hba(sdev);
  621. spin_lock_irqsave(&h->lock, flags);
  622. hdev = sdev->hostdata;
  623. if (!hdev) {
  624. spin_unlock_irqrestore(&h->lock, flags);
  625. return -ENODEV;
  626. }
  627. /* Is this even a logical drive? */
  628. if (!is_logical_device(hdev)) {
  629. spin_unlock_irqrestore(&h->lock, flags);
  630. l = snprintf(buf, PAGE_SIZE, "N/A\n");
  631. return l;
  632. }
  633. rlevel = hdev->raid_level;
  634. spin_unlock_irqrestore(&h->lock, flags);
  635. if (rlevel > RAID_UNKNOWN)
  636. rlevel = RAID_UNKNOWN;
  637. l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
  638. return l;
  639. }
  640. static ssize_t lunid_show(struct device *dev,
  641. struct device_attribute *attr, char *buf)
  642. {
  643. struct ctlr_info *h;
  644. struct scsi_device *sdev;
  645. struct hpsa_scsi_dev_t *hdev;
  646. unsigned long flags;
  647. unsigned char lunid[8];
  648. sdev = to_scsi_device(dev);
  649. h = sdev_to_hba(sdev);
  650. spin_lock_irqsave(&h->lock, flags);
  651. hdev = sdev->hostdata;
  652. if (!hdev) {
  653. spin_unlock_irqrestore(&h->lock, flags);
  654. return -ENODEV;
  655. }
  656. memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
  657. spin_unlock_irqrestore(&h->lock, flags);
  658. return snprintf(buf, 20, "0x%8phN\n", lunid);
  659. }
  660. static ssize_t unique_id_show(struct device *dev,
  661. struct device_attribute *attr, char *buf)
  662. {
  663. struct ctlr_info *h;
  664. struct scsi_device *sdev;
  665. struct hpsa_scsi_dev_t *hdev;
  666. unsigned long flags;
  667. unsigned char sn[16];
  668. sdev = to_scsi_device(dev);
  669. h = sdev_to_hba(sdev);
  670. spin_lock_irqsave(&h->lock, flags);
  671. hdev = sdev->hostdata;
  672. if (!hdev) {
  673. spin_unlock_irqrestore(&h->lock, flags);
  674. return -ENODEV;
  675. }
  676. memcpy(sn, hdev->device_id, sizeof(sn));
  677. spin_unlock_irqrestore(&h->lock, flags);
  678. return snprintf(buf, 16 * 2 + 2,
  679. "%02X%02X%02X%02X%02X%02X%02X%02X"
  680. "%02X%02X%02X%02X%02X%02X%02X%02X\n",
  681. sn[0], sn[1], sn[2], sn[3],
  682. sn[4], sn[5], sn[6], sn[7],
  683. sn[8], sn[9], sn[10], sn[11],
  684. sn[12], sn[13], sn[14], sn[15]);
  685. }
  686. static ssize_t sas_address_show(struct device *dev,
  687. struct device_attribute *attr, char *buf)
  688. {
  689. struct ctlr_info *h;
  690. struct scsi_device *sdev;
  691. struct hpsa_scsi_dev_t *hdev;
  692. unsigned long flags;
  693. u64 sas_address;
  694. sdev = to_scsi_device(dev);
  695. h = sdev_to_hba(sdev);
  696. spin_lock_irqsave(&h->lock, flags);
  697. hdev = sdev->hostdata;
  698. if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
  699. spin_unlock_irqrestore(&h->lock, flags);
  700. return -ENODEV;
  701. }
  702. sas_address = hdev->sas_address;
  703. spin_unlock_irqrestore(&h->lock, flags);
  704. return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
  705. }
  706. static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
  707. struct device_attribute *attr, char *buf)
  708. {
  709. struct ctlr_info *h;
  710. struct scsi_device *sdev;
  711. struct hpsa_scsi_dev_t *hdev;
  712. unsigned long flags;
  713. int offload_enabled;
  714. sdev = to_scsi_device(dev);
  715. h = sdev_to_hba(sdev);
  716. spin_lock_irqsave(&h->lock, flags);
  717. hdev = sdev->hostdata;
  718. if (!hdev) {
  719. spin_unlock_irqrestore(&h->lock, flags);
  720. return -ENODEV;
  721. }
  722. offload_enabled = hdev->offload_enabled;
  723. spin_unlock_irqrestore(&h->lock, flags);
  724. if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
  725. return snprintf(buf, 20, "%d\n", offload_enabled);
  726. else
  727. return snprintf(buf, 40, "%s\n",
  728. "Not applicable for a controller");
  729. }
  730. #define MAX_PATHS 8
  731. static ssize_t path_info_show(struct device *dev,
  732. struct device_attribute *attr, char *buf)
  733. {
  734. struct ctlr_info *h;
  735. struct scsi_device *sdev;
  736. struct hpsa_scsi_dev_t *hdev;
  737. unsigned long flags;
  738. int i;
  739. int output_len = 0;
  740. u8 box;
  741. u8 bay;
  742. u8 path_map_index = 0;
  743. char *active;
  744. unsigned char phys_connector[2];
  745. sdev = to_scsi_device(dev);
  746. h = sdev_to_hba(sdev);
  747. spin_lock_irqsave(&h->devlock, flags);
  748. hdev = sdev->hostdata;
  749. if (!hdev) {
  750. spin_unlock_irqrestore(&h->devlock, flags);
  751. return -ENODEV;
  752. }
  753. bay = hdev->bay;
  754. for (i = 0; i < MAX_PATHS; i++) {
  755. path_map_index = 1<<i;
  756. if (i == hdev->active_path_index)
  757. active = "Active";
  758. else if (hdev->path_map & path_map_index)
  759. active = "Inactive";
  760. else
  761. continue;
  762. output_len += scnprintf(buf + output_len,
  763. PAGE_SIZE - output_len,
  764. "[%d:%d:%d:%d] %20.20s ",
  765. h->scsi_host->host_no,
  766. hdev->bus, hdev->target, hdev->lun,
  767. scsi_device_type(hdev->devtype));
  768. if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
  769. output_len += scnprintf(buf + output_len,
  770. PAGE_SIZE - output_len,
  771. "%s\n", active);
  772. continue;
  773. }
  774. box = hdev->box[i];
  775. memcpy(&phys_connector, &hdev->phys_connector[i],
  776. sizeof(phys_connector));
  777. if (phys_connector[0] < '0')
  778. phys_connector[0] = '0';
  779. if (phys_connector[1] < '0')
  780. phys_connector[1] = '0';
  781. output_len += scnprintf(buf + output_len,
  782. PAGE_SIZE - output_len,
  783. "PORT: %.2s ",
  784. phys_connector);
  785. if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
  786. hdev->expose_device) {
  787. if (box == 0 || box == 0xFF) {
  788. output_len += scnprintf(buf + output_len,
  789. PAGE_SIZE - output_len,
  790. "BAY: %hhu %s\n",
  791. bay, active);
  792. } else {
  793. output_len += scnprintf(buf + output_len,
  794. PAGE_SIZE - output_len,
  795. "BOX: %hhu BAY: %hhu %s\n",
  796. box, bay, active);
  797. }
  798. } else if (box != 0 && box != 0xFF) {
  799. output_len += scnprintf(buf + output_len,
  800. PAGE_SIZE - output_len, "BOX: %hhu %s\n",
  801. box, active);
  802. } else
  803. output_len += scnprintf(buf + output_len,
  804. PAGE_SIZE - output_len, "%s\n", active);
  805. }
  806. spin_unlock_irqrestore(&h->devlock, flags);
  807. return output_len;
  808. }
  809. static ssize_t host_show_ctlr_num(struct device *dev,
  810. struct device_attribute *attr, char *buf)
  811. {
  812. struct ctlr_info *h;
  813. struct Scsi_Host *shost = class_to_shost(dev);
  814. h = shost_to_hba(shost);
  815. return snprintf(buf, 20, "%d\n", h->ctlr);
  816. }
  817. static ssize_t host_show_legacy_board(struct device *dev,
  818. struct device_attribute *attr, char *buf)
  819. {
  820. struct ctlr_info *h;
  821. struct Scsi_Host *shost = class_to_shost(dev);
  822. h = shost_to_hba(shost);
  823. return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
  824. }
  825. static DEVICE_ATTR_RO(raid_level);
  826. static DEVICE_ATTR_RO(lunid);
  827. static DEVICE_ATTR_RO(unique_id);
  828. static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
  829. static DEVICE_ATTR_RO(sas_address);
  830. static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
  831. host_show_hp_ssd_smart_path_enabled, NULL);
  832. static DEVICE_ATTR_RO(path_info);
  833. static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
  834. host_show_hp_ssd_smart_path_status,
  835. host_store_hp_ssd_smart_path_status);
  836. static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
  837. host_store_raid_offload_debug);
  838. static DEVICE_ATTR(firmware_revision, S_IRUGO,
  839. host_show_firmware_revision, NULL);
  840. static DEVICE_ATTR(commands_outstanding, S_IRUGO,
  841. host_show_commands_outstanding, NULL);
  842. static DEVICE_ATTR(transport_mode, S_IRUGO,
  843. host_show_transport_mode, NULL);
  844. static DEVICE_ATTR(resettable, S_IRUGO,
  845. host_show_resettable, NULL);
  846. static DEVICE_ATTR(lockup_detected, S_IRUGO,
  847. host_show_lockup_detected, NULL);
  848. static DEVICE_ATTR(ctlr_num, S_IRUGO,
  849. host_show_ctlr_num, NULL);
  850. static DEVICE_ATTR(legacy_board, S_IRUGO,
  851. host_show_legacy_board, NULL);
  852. static struct device_attribute *hpsa_sdev_attrs[] = {
  853. &dev_attr_raid_level,
  854. &dev_attr_lunid,
  855. &dev_attr_unique_id,
  856. &dev_attr_hp_ssd_smart_path_enabled,
  857. &dev_attr_path_info,
  858. &dev_attr_sas_address,
  859. NULL,
  860. };
  861. static struct device_attribute *hpsa_shost_attrs[] = {
  862. &dev_attr_rescan,
  863. &dev_attr_firmware_revision,
  864. &dev_attr_commands_outstanding,
  865. &dev_attr_transport_mode,
  866. &dev_attr_resettable,
  867. &dev_attr_hp_ssd_smart_path_status,
  868. &dev_attr_raid_offload_debug,
  869. &dev_attr_lockup_detected,
  870. &dev_attr_ctlr_num,
  871. &dev_attr_legacy_board,
  872. NULL,
  873. };
  874. #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
  875. HPSA_MAX_CONCURRENT_PASSTHRUS)
  876. static struct scsi_host_template hpsa_driver_template = {
  877. .module = THIS_MODULE,
  878. .name = HPSA,
  879. .proc_name = HPSA,
  880. .queuecommand = hpsa_scsi_queue_command,
  881. .scan_start = hpsa_scan_start,
  882. .scan_finished = hpsa_scan_finished,
  883. .change_queue_depth = hpsa_change_queue_depth,
  884. .this_id = -1,
  885. .use_clustering = ENABLE_CLUSTERING,
  886. .eh_device_reset_handler = hpsa_eh_device_reset_handler,
  887. .ioctl = hpsa_ioctl,
  888. .slave_alloc = hpsa_slave_alloc,
  889. .slave_configure = hpsa_slave_configure,
  890. .slave_destroy = hpsa_slave_destroy,
  891. #ifdef CONFIG_COMPAT
  892. .compat_ioctl = hpsa_compat_ioctl,
  893. #endif
  894. .sdev_attrs = hpsa_sdev_attrs,
  895. .shost_attrs = hpsa_shost_attrs,
  896. .max_sectors = 2048,
  897. .no_write_same = 1,
  898. };
  899. static inline u32 next_command(struct ctlr_info *h, u8 q)
  900. {
  901. u32 a;
  902. struct reply_queue_buffer *rq = &h->reply_queue[q];
  903. if (h->transMethod & CFGTBL_Trans_io_accel1)
  904. return h->access.command_completed(h, q);
  905. if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
  906. return h->access.command_completed(h, q);
  907. if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
  908. a = rq->head[rq->current_entry];
  909. rq->current_entry++;
  910. atomic_dec(&h->commands_outstanding);
  911. } else {
  912. a = FIFO_EMPTY;
  913. }
  914. /* Check for wraparound */
  915. if (rq->current_entry == h->max_commands) {
  916. rq->current_entry = 0;
  917. rq->wraparound ^= 1;
  918. }
  919. return a;
  920. }
  921. /*
  922. * There are some special bits in the bus address of the
  923. * command that we have to set for the controller to know
  924. * how to process the command:
  925. *
  926. * Normal performant mode:
  927. * bit 0: 1 means performant mode, 0 means simple mode.
  928. * bits 1-3 = block fetch table entry
  929. * bits 4-6 = command type (== 0)
  930. *
  931. * ioaccel1 mode:
  932. * bit 0 = "performant mode" bit.
  933. * bits 1-3 = block fetch table entry
  934. * bits 4-6 = command type (== 110)
  935. * (command type is needed because ioaccel1 mode
  936. * commands are submitted through the same register as normal
  937. * mode commands, so this is how the controller knows whether
  938. * the command is normal mode or ioaccel1 mode.)
  939. *
  940. * ioaccel2 mode:
  941. * bit 0 = "performant mode" bit.
  942. * bits 1-4 = block fetch table entry (note extra bit)
  943. * bits 4-6 = not needed, because ioaccel2 mode has
  944. * a separate special register for submitting commands.
  945. */
  946. /*
  947. * set_performant_mode: Modify the tag for cciss performant
  948. * set bit 0 for pull model, bits 3-1 for block fetch
  949. * register number
  950. */
  951. #define DEFAULT_REPLY_QUEUE (-1)
  952. static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
  953. int reply_queue)
  954. {
  955. if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
  956. c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
  957. if (unlikely(!h->msix_vectors))
  958. return;
  959. c->Header.ReplyQueue = reply_queue;
  960. }
  961. }
  962. static void set_ioaccel1_performant_mode(struct ctlr_info *h,
  963. struct CommandList *c,
  964. int reply_queue)
  965. {
  966. struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
  967. /*
  968. * Tell the controller to post the reply to the queue for this
  969. * processor. This seems to give the best I/O throughput.
  970. */
  971. cp->ReplyQueue = reply_queue;
  972. /*
  973. * Set the bits in the address sent down to include:
  974. * - performant mode bit (bit 0)
  975. * - pull count (bits 1-3)
  976. * - command type (bits 4-6)
  977. */
  978. c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
  979. IOACCEL1_BUSADDR_CMDTYPE;
  980. }
  981. static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
  982. struct CommandList *c,
  983. int reply_queue)
  984. {
  985. struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
  986. &h->ioaccel2_cmd_pool[c->cmdindex];
  987. /* Tell the controller to post the reply to the queue for this
  988. * processor. This seems to give the best I/O throughput.
  989. */
  990. cp->reply_queue = reply_queue;
  991. /* Set the bits in the address sent down to include:
  992. * - performant mode bit not used in ioaccel mode 2
  993. * - pull count (bits 0-3)
  994. * - command type isn't needed for ioaccel2
  995. */
  996. c->busaddr |= h->ioaccel2_blockFetchTable[0];
  997. }
  998. static void set_ioaccel2_performant_mode(struct ctlr_info *h,
  999. struct CommandList *c,
  1000. int reply_queue)
  1001. {
  1002. struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
  1003. /*
  1004. * Tell the controller to post the reply to the queue for this
  1005. * processor. This seems to give the best I/O throughput.
  1006. */
  1007. cp->reply_queue = reply_queue;
  1008. /*
  1009. * Set the bits in the address sent down to include:
  1010. * - performant mode bit not used in ioaccel mode 2
  1011. * - pull count (bits 0-3)
  1012. * - command type isn't needed for ioaccel2
  1013. */
  1014. c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
  1015. }
  1016. static int is_firmware_flash_cmd(u8 *cdb)
  1017. {
  1018. return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
  1019. }
  1020. /*
  1021. * During firmware flash, the heartbeat register may not update as frequently
  1022. * as it should. So we dial down lockup detection during firmware flash. and
  1023. * dial it back up when firmware flash completes.
  1024. */
  1025. #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
  1026. #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
  1027. #define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
  1028. static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
  1029. struct CommandList *c)
  1030. {
  1031. if (!is_firmware_flash_cmd(c->Request.CDB))
  1032. return;
  1033. atomic_inc(&h->firmware_flash_in_progress);
  1034. h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
  1035. }
  1036. static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
  1037. struct CommandList *c)
  1038. {
  1039. if (is_firmware_flash_cmd(c->Request.CDB) &&
  1040. atomic_dec_and_test(&h->firmware_flash_in_progress))
  1041. h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
  1042. }
  1043. static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
  1044. struct CommandList *c, int reply_queue)
  1045. {
  1046. dial_down_lockup_detection_during_fw_flash(h, c);
  1047. atomic_inc(&h->commands_outstanding);
  1048. reply_queue = h->reply_map[raw_smp_processor_id()];
  1049. switch (c->cmd_type) {
  1050. case CMD_IOACCEL1:
  1051. set_ioaccel1_performant_mode(h, c, reply_queue);
  1052. writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
  1053. break;
  1054. case CMD_IOACCEL2:
  1055. set_ioaccel2_performant_mode(h, c, reply_queue);
  1056. writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
  1057. break;
  1058. case IOACCEL2_TMF:
  1059. set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
  1060. writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
  1061. break;
  1062. default:
  1063. set_performant_mode(h, c, reply_queue);
  1064. h->access.submit_command(h, c);
  1065. }
  1066. }
  1067. static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
  1068. {
  1069. if (unlikely(hpsa_is_pending_event(c)))
  1070. return finish_cmd(c);
  1071. __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
  1072. }
  1073. static inline int is_hba_lunid(unsigned char scsi3addr[])
  1074. {
  1075. return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
  1076. }
  1077. static inline int is_scsi_rev_5(struct ctlr_info *h)
  1078. {
  1079. if (!h->hba_inquiry_data)
  1080. return 0;
  1081. if ((h->hba_inquiry_data[2] & 0x07) == 5)
  1082. return 1;
  1083. return 0;
  1084. }
  1085. static int hpsa_find_target_lun(struct ctlr_info *h,
  1086. unsigned char scsi3addr[], int bus, int *target, int *lun)
  1087. {
  1088. /* finds an unused bus, target, lun for a new physical device
  1089. * assumes h->devlock is held
  1090. */
  1091. int i, found = 0;
  1092. DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
  1093. bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
  1094. for (i = 0; i < h->ndevices; i++) {
  1095. if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
  1096. __set_bit(h->dev[i]->target, lun_taken);
  1097. }
  1098. i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
  1099. if (i < HPSA_MAX_DEVICES) {
  1100. /* *bus = 1; */
  1101. *target = i;
  1102. *lun = 0;
  1103. found = 1;
  1104. }
  1105. return !found;
  1106. }
  1107. static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
  1108. struct hpsa_scsi_dev_t *dev, char *description)
  1109. {
  1110. #define LABEL_SIZE 25
  1111. char label[LABEL_SIZE];
  1112. if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
  1113. return;
  1114. switch (dev->devtype) {
  1115. case TYPE_RAID:
  1116. snprintf(label, LABEL_SIZE, "controller");
  1117. break;
  1118. case TYPE_ENCLOSURE:
  1119. snprintf(label, LABEL_SIZE, "enclosure");
  1120. break;
  1121. case TYPE_DISK:
  1122. case TYPE_ZBC:
  1123. if (dev->external)
  1124. snprintf(label, LABEL_SIZE, "external");
  1125. else if (!is_logical_dev_addr_mode(dev->scsi3addr))
  1126. snprintf(label, LABEL_SIZE, "%s",
  1127. raid_label[PHYSICAL_DRIVE]);
  1128. else
  1129. snprintf(label, LABEL_SIZE, "RAID-%s",
  1130. dev->raid_level > RAID_UNKNOWN ? "?" :
  1131. raid_label[dev->raid_level]);
  1132. break;
  1133. case TYPE_ROM:
  1134. snprintf(label, LABEL_SIZE, "rom");
  1135. break;
  1136. case TYPE_TAPE:
  1137. snprintf(label, LABEL_SIZE, "tape");
  1138. break;
  1139. case TYPE_MEDIUM_CHANGER:
  1140. snprintf(label, LABEL_SIZE, "changer");
  1141. break;
  1142. default:
  1143. snprintf(label, LABEL_SIZE, "UNKNOWN");
  1144. break;
  1145. }
  1146. dev_printk(level, &h->pdev->dev,
  1147. "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
  1148. h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
  1149. description,
  1150. scsi_device_type(dev->devtype),
  1151. dev->vendor,
  1152. dev->model,
  1153. label,
  1154. dev->offload_config ? '+' : '-',
  1155. dev->offload_to_be_enabled ? '+' : '-',
  1156. dev->expose_device);
  1157. }
  1158. /* Add an entry into h->dev[] array. */
  1159. static int hpsa_scsi_add_entry(struct ctlr_info *h,
  1160. struct hpsa_scsi_dev_t *device,
  1161. struct hpsa_scsi_dev_t *added[], int *nadded)
  1162. {
  1163. /* assumes h->devlock is held */
  1164. int n = h->ndevices;
  1165. int i;
  1166. unsigned char addr1[8], addr2[8];
  1167. struct hpsa_scsi_dev_t *sd;
  1168. if (n >= HPSA_MAX_DEVICES) {
  1169. dev_err(&h->pdev->dev, "too many devices, some will be "
  1170. "inaccessible.\n");
  1171. return -1;
  1172. }
  1173. /* physical devices do not have lun or target assigned until now. */
  1174. if (device->lun != -1)
  1175. /* Logical device, lun is already assigned. */
  1176. goto lun_assigned;
  1177. /* If this device a non-zero lun of a multi-lun device
  1178. * byte 4 of the 8-byte LUN addr will contain the logical
  1179. * unit no, zero otherwise.
  1180. */
  1181. if (device->scsi3addr[4] == 0) {
  1182. /* This is not a non-zero lun of a multi-lun device */
  1183. if (hpsa_find_target_lun(h, device->scsi3addr,
  1184. device->bus, &device->target, &device->lun) != 0)
  1185. return -1;
  1186. goto lun_assigned;
  1187. }
  1188. /* This is a non-zero lun of a multi-lun device.
  1189. * Search through our list and find the device which
  1190. * has the same 8 byte LUN address, excepting byte 4 and 5.
  1191. * Assign the same bus and target for this new LUN.
  1192. * Use the logical unit number from the firmware.
  1193. */
  1194. memcpy(addr1, device->scsi3addr, 8);
  1195. addr1[4] = 0;
  1196. addr1[5] = 0;
  1197. for (i = 0; i < n; i++) {
  1198. sd = h->dev[i];
  1199. memcpy(addr2, sd->scsi3addr, 8);
  1200. addr2[4] = 0;
  1201. addr2[5] = 0;
  1202. /* differ only in byte 4 and 5? */
  1203. if (memcmp(addr1, addr2, 8) == 0) {
  1204. device->bus = sd->bus;
  1205. device->target = sd->target;
  1206. device->lun = device->scsi3addr[4];
  1207. break;
  1208. }
  1209. }
  1210. if (device->lun == -1) {
  1211. dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
  1212. " suspect firmware bug or unsupported hardware "
  1213. "configuration.\n");
  1214. return -1;
  1215. }
  1216. lun_assigned:
  1217. h->dev[n] = device;
  1218. h->ndevices++;
  1219. added[*nadded] = device;
  1220. (*nadded)++;
  1221. hpsa_show_dev_msg(KERN_INFO, h, device,
  1222. device->expose_device ? "added" : "masked");
  1223. return 0;
  1224. }
  1225. /*
  1226. * Called during a scan operation.
  1227. *
  1228. * Update an entry in h->dev[] array.
  1229. */
  1230. static void hpsa_scsi_update_entry(struct ctlr_info *h,
  1231. int entry, struct hpsa_scsi_dev_t *new_entry)
  1232. {
  1233. /* assumes h->devlock is held */
  1234. BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
  1235. /* Raid level changed. */
  1236. h->dev[entry]->raid_level = new_entry->raid_level;
  1237. /*
  1238. * ioacccel_handle may have changed for a dual domain disk
  1239. */
  1240. h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
  1241. /* Raid offload parameters changed. Careful about the ordering. */
  1242. if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
  1243. /*
  1244. * if drive is newly offload_enabled, we want to copy the
  1245. * raid map data first. If previously offload_enabled and
  1246. * offload_config were set, raid map data had better be
  1247. * the same as it was before. If raid map data has changed
  1248. * then it had better be the case that
  1249. * h->dev[entry]->offload_enabled is currently 0.
  1250. */
  1251. h->dev[entry]->raid_map = new_entry->raid_map;
  1252. h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
  1253. }
  1254. if (new_entry->offload_to_be_enabled) {
  1255. h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
  1256. wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
  1257. }
  1258. h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
  1259. h->dev[entry]->offload_config = new_entry->offload_config;
  1260. h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
  1261. h->dev[entry]->queue_depth = new_entry->queue_depth;
  1262. /*
  1263. * We can turn off ioaccel offload now, but need to delay turning
  1264. * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
  1265. * can't do that until all the devices are updated.
  1266. */
  1267. h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
  1268. /*
  1269. * turn ioaccel off immediately if told to do so.
  1270. */
  1271. if (!new_entry->offload_to_be_enabled)
  1272. h->dev[entry]->offload_enabled = 0;
  1273. hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
  1274. }
  1275. /* Replace an entry from h->dev[] array. */
  1276. static void hpsa_scsi_replace_entry(struct ctlr_info *h,
  1277. int entry, struct hpsa_scsi_dev_t *new_entry,
  1278. struct hpsa_scsi_dev_t *added[], int *nadded,
  1279. struct hpsa_scsi_dev_t *removed[], int *nremoved)
  1280. {
  1281. /* assumes h->devlock is held */
  1282. BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
  1283. removed[*nremoved] = h->dev[entry];
  1284. (*nremoved)++;
  1285. /*
  1286. * New physical devices won't have target/lun assigned yet
  1287. * so we need to preserve the values in the slot we are replacing.
  1288. */
  1289. if (new_entry->target == -1) {
  1290. new_entry->target = h->dev[entry]->target;
  1291. new_entry->lun = h->dev[entry]->lun;
  1292. }
  1293. h->dev[entry] = new_entry;
  1294. added[*nadded] = new_entry;
  1295. (*nadded)++;
  1296. hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
  1297. }
  1298. /* Remove an entry from h->dev[] array. */
  1299. static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
  1300. struct hpsa_scsi_dev_t *removed[], int *nremoved)
  1301. {
  1302. /* assumes h->devlock is held */
  1303. int i;
  1304. struct hpsa_scsi_dev_t *sd;
  1305. BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
  1306. sd = h->dev[entry];
  1307. removed[*nremoved] = h->dev[entry];
  1308. (*nremoved)++;
  1309. for (i = entry; i < h->ndevices-1; i++)
  1310. h->dev[i] = h->dev[i+1];
  1311. h->ndevices--;
  1312. hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
  1313. }
  1314. #define SCSI3ADDR_EQ(a, b) ( \
  1315. (a)[7] == (b)[7] && \
  1316. (a)[6] == (b)[6] && \
  1317. (a)[5] == (b)[5] && \
  1318. (a)[4] == (b)[4] && \
  1319. (a)[3] == (b)[3] && \
  1320. (a)[2] == (b)[2] && \
  1321. (a)[1] == (b)[1] && \
  1322. (a)[0] == (b)[0])
  1323. static void fixup_botched_add(struct ctlr_info *h,
  1324. struct hpsa_scsi_dev_t *added)
  1325. {
  1326. /* called when scsi_add_device fails in order to re-adjust
  1327. * h->dev[] to match the mid layer's view.
  1328. */
  1329. unsigned long flags;
  1330. int i, j;
  1331. spin_lock_irqsave(&h->lock, flags);
  1332. for (i = 0; i < h->ndevices; i++) {
  1333. if (h->dev[i] == added) {
  1334. for (j = i; j < h->ndevices-1; j++)
  1335. h->dev[j] = h->dev[j+1];
  1336. h->ndevices--;
  1337. break;
  1338. }
  1339. }
  1340. spin_unlock_irqrestore(&h->lock, flags);
  1341. kfree(added);
  1342. }
  1343. static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
  1344. struct hpsa_scsi_dev_t *dev2)
  1345. {
  1346. /* we compare everything except lun and target as these
  1347. * are not yet assigned. Compare parts likely
  1348. * to differ first
  1349. */
  1350. if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
  1351. sizeof(dev1->scsi3addr)) != 0)
  1352. return 0;
  1353. if (memcmp(dev1->device_id, dev2->device_id,
  1354. sizeof(dev1->device_id)) != 0)
  1355. return 0;
  1356. if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
  1357. return 0;
  1358. if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
  1359. return 0;
  1360. if (dev1->devtype != dev2->devtype)
  1361. return 0;
  1362. if (dev1->bus != dev2->bus)
  1363. return 0;
  1364. return 1;
  1365. }
  1366. static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
  1367. struct hpsa_scsi_dev_t *dev2)
  1368. {
  1369. /* Device attributes that can change, but don't mean
  1370. * that the device is a different device, nor that the OS
  1371. * needs to be told anything about the change.
  1372. */
  1373. if (dev1->raid_level != dev2->raid_level)
  1374. return 1;
  1375. if (dev1->offload_config != dev2->offload_config)
  1376. return 1;
  1377. if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
  1378. return 1;
  1379. if (!is_logical_dev_addr_mode(dev1->scsi3addr))
  1380. if (dev1->queue_depth != dev2->queue_depth)
  1381. return 1;
  1382. /*
  1383. * This can happen for dual domain devices. An active
  1384. * path change causes the ioaccel handle to change
  1385. *
  1386. * for example note the handle differences between p0 and p1
  1387. * Device WWN ,WWN hash,Handle
  1388. * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
  1389. * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004
  1390. */
  1391. if (dev1->ioaccel_handle != dev2->ioaccel_handle)
  1392. return 1;
  1393. return 0;
  1394. }
  1395. /* Find needle in haystack. If exact match found, return DEVICE_SAME,
  1396. * and return needle location in *index. If scsi3addr matches, but not
  1397. * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
  1398. * location in *index.
  1399. * In the case of a minor device attribute change, such as RAID level, just
  1400. * return DEVICE_UPDATED, along with the updated device's location in index.
  1401. * If needle not found, return DEVICE_NOT_FOUND.
  1402. */
  1403. static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
  1404. struct hpsa_scsi_dev_t *haystack[], int haystack_size,
  1405. int *index)
  1406. {
  1407. int i;
  1408. #define DEVICE_NOT_FOUND 0
  1409. #define DEVICE_CHANGED 1
  1410. #define DEVICE_SAME 2
  1411. #define DEVICE_UPDATED 3
  1412. if (needle == NULL)
  1413. return DEVICE_NOT_FOUND;
  1414. for (i = 0; i < haystack_size; i++) {
  1415. if (haystack[i] == NULL) /* previously removed. */
  1416. continue;
  1417. if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
  1418. *index = i;
  1419. if (device_is_the_same(needle, haystack[i])) {
  1420. if (device_updated(needle, haystack[i]))
  1421. return DEVICE_UPDATED;
  1422. return DEVICE_SAME;
  1423. } else {
  1424. /* Keep offline devices offline */
  1425. if (needle->volume_offline)
  1426. return DEVICE_NOT_FOUND;
  1427. return DEVICE_CHANGED;
  1428. }
  1429. }
  1430. }
  1431. *index = -1;
  1432. return DEVICE_NOT_FOUND;
  1433. }
  1434. static void hpsa_monitor_offline_device(struct ctlr_info *h,
  1435. unsigned char scsi3addr[])
  1436. {
  1437. struct offline_device_entry *device;
  1438. unsigned long flags;
  1439. /* Check to see if device is already on the list */
  1440. spin_lock_irqsave(&h->offline_device_lock, flags);
  1441. list_for_each_entry(device, &h->offline_device_list, offline_list) {
  1442. if (memcmp(device->scsi3addr, scsi3addr,
  1443. sizeof(device->scsi3addr)) == 0) {
  1444. spin_unlock_irqrestore(&h->offline_device_lock, flags);
  1445. return;
  1446. }
  1447. }
  1448. spin_unlock_irqrestore(&h->offline_device_lock, flags);
  1449. /* Device is not on the list, add it. */
  1450. device = kmalloc(sizeof(*device), GFP_KERNEL);
  1451. if (!device)
  1452. return;
  1453. memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
  1454. spin_lock_irqsave(&h->offline_device_lock, flags);
  1455. list_add_tail(&device->offline_list, &h->offline_device_list);
  1456. spin_unlock_irqrestore(&h->offline_device_lock, flags);
  1457. }
  1458. /* Print a message explaining various offline volume states */
  1459. static void hpsa_show_volume_status(struct ctlr_info *h,
  1460. struct hpsa_scsi_dev_t *sd)
  1461. {
  1462. if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
  1463. dev_info(&h->pdev->dev,
  1464. "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
  1465. h->scsi_host->host_no,
  1466. sd->bus, sd->target, sd->lun);
  1467. switch (sd->volume_offline) {
  1468. case HPSA_LV_OK:
  1469. break;
  1470. case HPSA_LV_UNDERGOING_ERASE:
  1471. dev_info(&h->pdev->dev,
  1472. "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
  1473. h->scsi_host->host_no,
  1474. sd->bus, sd->target, sd->lun);
  1475. break;
  1476. case HPSA_LV_NOT_AVAILABLE:
  1477. dev_info(&h->pdev->dev,
  1478. "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
  1479. h->scsi_host->host_no,
  1480. sd->bus, sd->target, sd->lun);
  1481. break;
  1482. case HPSA_LV_UNDERGOING_RPI:
  1483. dev_info(&h->pdev->dev,
  1484. "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
  1485. h->scsi_host->host_no,
  1486. sd->bus, sd->target, sd->lun);
  1487. break;
  1488. case HPSA_LV_PENDING_RPI:
  1489. dev_info(&h->pdev->dev,
  1490. "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
  1491. h->scsi_host->host_no,
  1492. sd->bus, sd->target, sd->lun);
  1493. break;
  1494. case HPSA_LV_ENCRYPTED_NO_KEY:
  1495. dev_info(&h->pdev->dev,
  1496. "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
  1497. h->scsi_host->host_no,
  1498. sd->bus, sd->target, sd->lun);
  1499. break;
  1500. case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
  1501. dev_info(&h->pdev->dev,
  1502. "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
  1503. h->scsi_host->host_no,
  1504. sd->bus, sd->target, sd->lun);
  1505. break;
  1506. case HPSA_LV_UNDERGOING_ENCRYPTION:
  1507. dev_info(&h->pdev->dev,
  1508. "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
  1509. h->scsi_host->host_no,
  1510. sd->bus, sd->target, sd->lun);
  1511. break;
  1512. case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
  1513. dev_info(&h->pdev->dev,
  1514. "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
  1515. h->scsi_host->host_no,
  1516. sd->bus, sd->target, sd->lun);
  1517. break;
  1518. case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
  1519. dev_info(&h->pdev->dev,
  1520. "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
  1521. h->scsi_host->host_no,
  1522. sd->bus, sd->target, sd->lun);
  1523. break;
  1524. case HPSA_LV_PENDING_ENCRYPTION:
  1525. dev_info(&h->pdev->dev,
  1526. "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
  1527. h->scsi_host->host_no,
  1528. sd->bus, sd->target, sd->lun);
  1529. break;
  1530. case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
  1531. dev_info(&h->pdev->dev,
  1532. "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
  1533. h->scsi_host->host_no,
  1534. sd->bus, sd->target, sd->lun);
  1535. break;
  1536. }
  1537. }
  1538. /*
  1539. * Figure the list of physical drive pointers for a logical drive with
  1540. * raid offload configured.
  1541. */
  1542. static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
  1543. struct hpsa_scsi_dev_t *dev[], int ndevices,
  1544. struct hpsa_scsi_dev_t *logical_drive)
  1545. {
  1546. struct raid_map_data *map = &logical_drive->raid_map;
  1547. struct raid_map_disk_data *dd = &map->data[0];
  1548. int i, j;
  1549. int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
  1550. le16_to_cpu(map->metadata_disks_per_row);
  1551. int nraid_map_entries = le16_to_cpu(map->row_cnt) *
  1552. le16_to_cpu(map->layout_map_count) *
  1553. total_disks_per_row;
  1554. int nphys_disk = le16_to_cpu(map->layout_map_count) *
  1555. total_disks_per_row;
  1556. int qdepth;
  1557. if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
  1558. nraid_map_entries = RAID_MAP_MAX_ENTRIES;
  1559. logical_drive->nphysical_disks = nraid_map_entries;
  1560. qdepth = 0;
  1561. for (i = 0; i < nraid_map_entries; i++) {
  1562. logical_drive->phys_disk[i] = NULL;
  1563. if (!logical_drive->offload_config)
  1564. continue;
  1565. for (j = 0; j < ndevices; j++) {
  1566. if (dev[j] == NULL)
  1567. continue;
  1568. if (dev[j]->devtype != TYPE_DISK &&
  1569. dev[j]->devtype != TYPE_ZBC)
  1570. continue;
  1571. if (is_logical_device(dev[j]))
  1572. continue;
  1573. if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
  1574. continue;
  1575. logical_drive->phys_disk[i] = dev[j];
  1576. if (i < nphys_disk)
  1577. qdepth = min(h->nr_cmds, qdepth +
  1578. logical_drive->phys_disk[i]->queue_depth);
  1579. break;
  1580. }
  1581. /*
  1582. * This can happen if a physical drive is removed and
  1583. * the logical drive is degraded. In that case, the RAID
  1584. * map data will refer to a physical disk which isn't actually
  1585. * present. And in that case offload_enabled should already
  1586. * be 0, but we'll turn it off here just in case
  1587. */
  1588. if (!logical_drive->phys_disk[i]) {
  1589. dev_warn(&h->pdev->dev,
  1590. "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
  1591. __func__,
  1592. h->scsi_host->host_no, logical_drive->bus,
  1593. logical_drive->target, logical_drive->lun);
  1594. hpsa_turn_off_ioaccel_for_device(logical_drive);
  1595. logical_drive->queue_depth = 8;
  1596. }
  1597. }
  1598. if (nraid_map_entries)
  1599. /*
  1600. * This is correct for reads, too high for full stripe writes,
  1601. * way too high for partial stripe writes
  1602. */
  1603. logical_drive->queue_depth = qdepth;
  1604. else {
  1605. if (logical_drive->external)
  1606. logical_drive->queue_depth = EXTERNAL_QD;
  1607. else
  1608. logical_drive->queue_depth = h->nr_cmds;
  1609. }
  1610. }
  1611. static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
  1612. struct hpsa_scsi_dev_t *dev[], int ndevices)
  1613. {
  1614. int i;
  1615. for (i = 0; i < ndevices; i++) {
  1616. if (dev[i] == NULL)
  1617. continue;
  1618. if (dev[i]->devtype != TYPE_DISK &&
  1619. dev[i]->devtype != TYPE_ZBC)
  1620. continue;
  1621. if (!is_logical_device(dev[i]))
  1622. continue;
  1623. /*
  1624. * If offload is currently enabled, the RAID map and
  1625. * phys_disk[] assignment *better* not be changing
  1626. * because we would be changing ioaccel phsy_disk[] pointers
  1627. * on a ioaccel volume processing I/O requests.
  1628. *
  1629. * If an ioaccel volume status changed, initially because it was
  1630. * re-configured and thus underwent a transformation, or
  1631. * a drive failed, we would have received a state change
  1632. * request and ioaccel should have been turned off. When the
  1633. * transformation completes, we get another state change
  1634. * request to turn ioaccel back on. In this case, we need
  1635. * to update the ioaccel information.
  1636. *
  1637. * Thus: If it is not currently enabled, but will be after
  1638. * the scan completes, make sure the ioaccel pointers
  1639. * are up to date.
  1640. */
  1641. if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
  1642. hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
  1643. }
  1644. }
  1645. static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
  1646. {
  1647. int rc = 0;
  1648. if (!h->scsi_host)
  1649. return 1;
  1650. if (is_logical_device(device)) /* RAID */
  1651. rc = scsi_add_device(h->scsi_host, device->bus,
  1652. device->target, device->lun);
  1653. else /* HBA */
  1654. rc = hpsa_add_sas_device(h->sas_host, device);
  1655. return rc;
  1656. }
  1657. static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
  1658. struct hpsa_scsi_dev_t *dev)
  1659. {
  1660. int i;
  1661. int count = 0;
  1662. for (i = 0; i < h->nr_cmds; i++) {
  1663. struct CommandList *c = h->cmd_pool + i;
  1664. int refcount = atomic_inc_return(&c->refcount);
  1665. if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
  1666. dev->scsi3addr)) {
  1667. unsigned long flags;
  1668. spin_lock_irqsave(&h->lock, flags); /* Implied MB */
  1669. if (!hpsa_is_cmd_idle(c))
  1670. ++count;
  1671. spin_unlock_irqrestore(&h->lock, flags);
  1672. }
  1673. cmd_free(h, c);
  1674. }
  1675. return count;
  1676. }
  1677. static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
  1678. struct hpsa_scsi_dev_t *device)
  1679. {
  1680. int cmds = 0;
  1681. int waits = 0;
  1682. while (1) {
  1683. cmds = hpsa_find_outstanding_commands_for_dev(h, device);
  1684. if (cmds == 0)
  1685. break;
  1686. if (++waits > 20)
  1687. break;
  1688. msleep(1000);
  1689. }
  1690. if (waits > 20)
  1691. dev_warn(&h->pdev->dev,
  1692. "%s: removing device with %d outstanding commands!\n",
  1693. __func__, cmds);
  1694. }
  1695. static void hpsa_remove_device(struct ctlr_info *h,
  1696. struct hpsa_scsi_dev_t *device)
  1697. {
  1698. struct scsi_device *sdev = NULL;
  1699. if (!h->scsi_host)
  1700. return;
  1701. /*
  1702. * Allow for commands to drain
  1703. */
  1704. device->removed = 1;
  1705. hpsa_wait_for_outstanding_commands_for_dev(h, device);
  1706. if (is_logical_device(device)) { /* RAID */
  1707. sdev = scsi_device_lookup(h->scsi_host, device->bus,
  1708. device->target, device->lun);
  1709. if (sdev) {
  1710. scsi_remove_device(sdev);
  1711. scsi_device_put(sdev);
  1712. } else {
  1713. /*
  1714. * We don't expect to get here. Future commands
  1715. * to this device will get a selection timeout as
  1716. * if the device were gone.
  1717. */
  1718. hpsa_show_dev_msg(KERN_WARNING, h, device,
  1719. "didn't find device for removal.");
  1720. }
  1721. } else { /* HBA */
  1722. hpsa_remove_sas_device(device);
  1723. }
  1724. }
  1725. static void adjust_hpsa_scsi_table(struct ctlr_info *h,
  1726. struct hpsa_scsi_dev_t *sd[], int nsds)
  1727. {
  1728. /* sd contains scsi3 addresses and devtypes, and inquiry
  1729. * data. This function takes what's in sd to be the current
  1730. * reality and updates h->dev[] to reflect that reality.
  1731. */
  1732. int i, entry, device_change, changes = 0;
  1733. struct hpsa_scsi_dev_t *csd;
  1734. unsigned long flags;
  1735. struct hpsa_scsi_dev_t **added, **removed;
  1736. int nadded, nremoved;
  1737. /*
  1738. * A reset can cause a device status to change
  1739. * re-schedule the scan to see what happened.
  1740. */
  1741. spin_lock_irqsave(&h->reset_lock, flags);
  1742. if (h->reset_in_progress) {
  1743. h->drv_req_rescan = 1;
  1744. spin_unlock_irqrestore(&h->reset_lock, flags);
  1745. return;
  1746. }
  1747. spin_unlock_irqrestore(&h->reset_lock, flags);
  1748. added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
  1749. removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
  1750. if (!added || !removed) {
  1751. dev_warn(&h->pdev->dev, "out of memory in "
  1752. "adjust_hpsa_scsi_table\n");
  1753. goto free_and_out;
  1754. }
  1755. spin_lock_irqsave(&h->devlock, flags);
  1756. /* find any devices in h->dev[] that are not in
  1757. * sd[] and remove them from h->dev[], and for any
  1758. * devices which have changed, remove the old device
  1759. * info and add the new device info.
  1760. * If minor device attributes change, just update
  1761. * the existing device structure.
  1762. */
  1763. i = 0;
  1764. nremoved = 0;
  1765. nadded = 0;
  1766. while (i < h->ndevices) {
  1767. csd = h->dev[i];
  1768. device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
  1769. if (device_change == DEVICE_NOT_FOUND) {
  1770. changes++;
  1771. hpsa_scsi_remove_entry(h, i, removed, &nremoved);
  1772. continue; /* remove ^^^, hence i not incremented */
  1773. } else if (device_change == DEVICE_CHANGED) {
  1774. changes++;
  1775. hpsa_scsi_replace_entry(h, i, sd[entry],
  1776. added, &nadded, removed, &nremoved);
  1777. /* Set it to NULL to prevent it from being freed
  1778. * at the bottom of hpsa_update_scsi_devices()
  1779. */
  1780. sd[entry] = NULL;
  1781. } else if (device_change == DEVICE_UPDATED) {
  1782. hpsa_scsi_update_entry(h, i, sd[entry]);
  1783. }
  1784. i++;
  1785. }
  1786. /* Now, make sure every device listed in sd[] is also
  1787. * listed in h->dev[], adding them if they aren't found
  1788. */
  1789. for (i = 0; i < nsds; i++) {
  1790. if (!sd[i]) /* if already added above. */
  1791. continue;
  1792. /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
  1793. * as the SCSI mid-layer does not handle such devices well.
  1794. * It relentlessly loops sending TUR at 3Hz, then READ(10)
  1795. * at 160Hz, and prevents the system from coming up.
  1796. */
  1797. if (sd[i]->volume_offline) {
  1798. hpsa_show_volume_status(h, sd[i]);
  1799. hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
  1800. continue;
  1801. }
  1802. device_change = hpsa_scsi_find_entry(sd[i], h->dev,
  1803. h->ndevices, &entry);
  1804. if (device_change == DEVICE_NOT_FOUND) {
  1805. changes++;
  1806. if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
  1807. break;
  1808. sd[i] = NULL; /* prevent from being freed later. */
  1809. } else if (device_change == DEVICE_CHANGED) {
  1810. /* should never happen... */
  1811. changes++;
  1812. dev_warn(&h->pdev->dev,
  1813. "device unexpectedly changed.\n");
  1814. /* but if it does happen, we just ignore that device */
  1815. }
  1816. }
  1817. hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
  1818. /*
  1819. * Now that h->dev[]->phys_disk[] is coherent, we can enable
  1820. * any logical drives that need it enabled.
  1821. *
  1822. * The raid map should be current by now.
  1823. *
  1824. * We are updating the device list used for I/O requests.
  1825. */
  1826. for (i = 0; i < h->ndevices; i++) {
  1827. if (h->dev[i] == NULL)
  1828. continue;
  1829. h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
  1830. }
  1831. spin_unlock_irqrestore(&h->devlock, flags);
  1832. /* Monitor devices which are in one of several NOT READY states to be
  1833. * brought online later. This must be done without holding h->devlock,
  1834. * so don't touch h->dev[]
  1835. */
  1836. for (i = 0; i < nsds; i++) {
  1837. if (!sd[i]) /* if already added above. */
  1838. continue;
  1839. if (sd[i]->volume_offline)
  1840. hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
  1841. }
  1842. /* Don't notify scsi mid layer of any changes the first time through
  1843. * (or if there are no changes) scsi_scan_host will do it later the
  1844. * first time through.
  1845. */
  1846. if (!changes)
  1847. goto free_and_out;
  1848. /* Notify scsi mid layer of any removed devices */
  1849. for (i = 0; i < nremoved; i++) {
  1850. if (removed[i] == NULL)
  1851. continue;
  1852. if (removed[i]->expose_device)
  1853. hpsa_remove_device(h, removed[i]);
  1854. kfree(removed[i]);
  1855. removed[i] = NULL;
  1856. }
  1857. /* Notify scsi mid layer of any added devices */
  1858. for (i = 0; i < nadded; i++) {
  1859. int rc = 0;
  1860. if (added[i] == NULL)
  1861. continue;
  1862. if (!(added[i]->expose_device))
  1863. continue;
  1864. rc = hpsa_add_device(h, added[i]);
  1865. if (!rc)
  1866. continue;
  1867. dev_warn(&h->pdev->dev,
  1868. "addition failed %d, device not added.", rc);
  1869. /* now we have to remove it from h->dev,
  1870. * since it didn't get added to scsi mid layer
  1871. */
  1872. fixup_botched_add(h, added[i]);
  1873. h->drv_req_rescan = 1;
  1874. }
  1875. free_and_out:
  1876. kfree(added);
  1877. kfree(removed);
  1878. }
  1879. /*
  1880. * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
  1881. * Assume's h->devlock is held.
  1882. */
  1883. static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
  1884. int bus, int target, int lun)
  1885. {
  1886. int i;
  1887. struct hpsa_scsi_dev_t *sd;
  1888. for (i = 0; i < h->ndevices; i++) {
  1889. sd = h->dev[i];
  1890. if (sd->bus == bus && sd->target == target && sd->lun == lun)
  1891. return sd;
  1892. }
  1893. return NULL;
  1894. }
  1895. static int hpsa_slave_alloc(struct scsi_device *sdev)
  1896. {
  1897. struct hpsa_scsi_dev_t *sd = NULL;
  1898. unsigned long flags;
  1899. struct ctlr_info *h;
  1900. h = sdev_to_hba(sdev);
  1901. spin_lock_irqsave(&h->devlock, flags);
  1902. if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
  1903. struct scsi_target *starget;
  1904. struct sas_rphy *rphy;
  1905. starget = scsi_target(sdev);
  1906. rphy = target_to_rphy(starget);
  1907. sd = hpsa_find_device_by_sas_rphy(h, rphy);
  1908. if (sd) {
  1909. sd->target = sdev_id(sdev);
  1910. sd->lun = sdev->lun;
  1911. }
  1912. }
  1913. if (!sd)
  1914. sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
  1915. sdev_id(sdev), sdev->lun);
  1916. if (sd && sd->expose_device) {
  1917. atomic_set(&sd->ioaccel_cmds_out, 0);
  1918. sdev->hostdata = sd;
  1919. } else
  1920. sdev->hostdata = NULL;
  1921. spin_unlock_irqrestore(&h->devlock, flags);
  1922. return 0;
  1923. }
  1924. /* configure scsi device based on internal per-device structure */
  1925. static int hpsa_slave_configure(struct scsi_device *sdev)
  1926. {
  1927. struct hpsa_scsi_dev_t *sd;
  1928. int queue_depth;
  1929. sd = sdev->hostdata;
  1930. sdev->no_uld_attach = !sd || !sd->expose_device;
  1931. if (sd) {
  1932. if (sd->external)
  1933. queue_depth = EXTERNAL_QD;
  1934. else
  1935. queue_depth = sd->queue_depth != 0 ?
  1936. sd->queue_depth : sdev->host->can_queue;
  1937. } else
  1938. queue_depth = sdev->host->can_queue;
  1939. scsi_change_queue_depth(sdev, queue_depth);
  1940. return 0;
  1941. }
  1942. static void hpsa_slave_destroy(struct scsi_device *sdev)
  1943. {
  1944. /* nothing to do. */
  1945. }
  1946. static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
  1947. {
  1948. int i;
  1949. if (!h->ioaccel2_cmd_sg_list)
  1950. return;
  1951. for (i = 0; i < h->nr_cmds; i++) {
  1952. kfree(h->ioaccel2_cmd_sg_list[i]);
  1953. h->ioaccel2_cmd_sg_list[i] = NULL;
  1954. }
  1955. kfree(h->ioaccel2_cmd_sg_list);
  1956. h->ioaccel2_cmd_sg_list = NULL;
  1957. }
  1958. static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
  1959. {
  1960. int i;
  1961. if (h->chainsize <= 0)
  1962. return 0;
  1963. h->ioaccel2_cmd_sg_list =
  1964. kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
  1965. GFP_KERNEL);
  1966. if (!h->ioaccel2_cmd_sg_list)
  1967. return -ENOMEM;
  1968. for (i = 0; i < h->nr_cmds; i++) {
  1969. h->ioaccel2_cmd_sg_list[i] =
  1970. kmalloc_array(h->maxsgentries,
  1971. sizeof(*h->ioaccel2_cmd_sg_list[i]),
  1972. GFP_KERNEL);
  1973. if (!h->ioaccel2_cmd_sg_list[i])
  1974. goto clean;
  1975. }
  1976. return 0;
  1977. clean:
  1978. hpsa_free_ioaccel2_sg_chain_blocks(h);
  1979. return -ENOMEM;
  1980. }
  1981. static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
  1982. {
  1983. int i;
  1984. if (!h->cmd_sg_list)
  1985. return;
  1986. for (i = 0; i < h->nr_cmds; i++) {
  1987. kfree(h->cmd_sg_list[i]);
  1988. h->cmd_sg_list[i] = NULL;
  1989. }
  1990. kfree(h->cmd_sg_list);
  1991. h->cmd_sg_list = NULL;
  1992. }
  1993. static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
  1994. {
  1995. int i;
  1996. if (h->chainsize <= 0)
  1997. return 0;
  1998. h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
  1999. GFP_KERNEL);
  2000. if (!h->cmd_sg_list)
  2001. return -ENOMEM;
  2002. for (i = 0; i < h->nr_cmds; i++) {
  2003. h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
  2004. sizeof(*h->cmd_sg_list[i]),
  2005. GFP_KERNEL);
  2006. if (!h->cmd_sg_list[i])
  2007. goto clean;
  2008. }
  2009. return 0;
  2010. clean:
  2011. hpsa_free_sg_chain_blocks(h);
  2012. return -ENOMEM;
  2013. }
  2014. static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
  2015. struct io_accel2_cmd *cp, struct CommandList *c)
  2016. {
  2017. struct ioaccel2_sg_element *chain_block;
  2018. u64 temp64;
  2019. u32 chain_size;
  2020. chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
  2021. chain_size = le32_to_cpu(cp->sg[0].length);
  2022. temp64 = pci_map_single(h->pdev, chain_block, chain_size,
  2023. PCI_DMA_TODEVICE);
  2024. if (dma_mapping_error(&h->pdev->dev, temp64)) {
  2025. /* prevent subsequent unmapping */
  2026. cp->sg->address = 0;
  2027. return -1;
  2028. }
  2029. cp->sg->address = cpu_to_le64(temp64);
  2030. return 0;
  2031. }
  2032. static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
  2033. struct io_accel2_cmd *cp)
  2034. {
  2035. struct ioaccel2_sg_element *chain_sg;
  2036. u64 temp64;
  2037. u32 chain_size;
  2038. chain_sg = cp->sg;
  2039. temp64 = le64_to_cpu(chain_sg->address);
  2040. chain_size = le32_to_cpu(cp->sg[0].length);
  2041. pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
  2042. }
  2043. static int hpsa_map_sg_chain_block(struct ctlr_info *h,
  2044. struct CommandList *c)
  2045. {
  2046. struct SGDescriptor *chain_sg, *chain_block;
  2047. u64 temp64;
  2048. u32 chain_len;
  2049. chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
  2050. chain_block = h->cmd_sg_list[c->cmdindex];
  2051. chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
  2052. chain_len = sizeof(*chain_sg) *
  2053. (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
  2054. chain_sg->Len = cpu_to_le32(chain_len);
  2055. temp64 = pci_map_single(h->pdev, chain_block, chain_len,
  2056. PCI_DMA_TODEVICE);
  2057. if (dma_mapping_error(&h->pdev->dev, temp64)) {
  2058. /* prevent subsequent unmapping */
  2059. chain_sg->Addr = cpu_to_le64(0);
  2060. return -1;
  2061. }
  2062. chain_sg->Addr = cpu_to_le64(temp64);
  2063. return 0;
  2064. }
  2065. static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
  2066. struct CommandList *c)
  2067. {
  2068. struct SGDescriptor *chain_sg;
  2069. if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
  2070. return;
  2071. chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
  2072. pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
  2073. le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
  2074. }
  2075. /* Decode the various types of errors on ioaccel2 path.
  2076. * Return 1 for any error that should generate a RAID path retry.
  2077. * Return 0 for errors that don't require a RAID path retry.
  2078. */
  2079. static int handle_ioaccel_mode2_error(struct ctlr_info *h,
  2080. struct CommandList *c,
  2081. struct scsi_cmnd *cmd,
  2082. struct io_accel2_cmd *c2,
  2083. struct hpsa_scsi_dev_t *dev)
  2084. {
  2085. int data_len;
  2086. int retry = 0;
  2087. u32 ioaccel2_resid = 0;
  2088. switch (c2->error_data.serv_response) {
  2089. case IOACCEL2_SERV_RESPONSE_COMPLETE:
  2090. switch (c2->error_data.status) {
  2091. case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
  2092. if (cmd)
  2093. cmd->result = 0;
  2094. break;
  2095. case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
  2096. cmd->result |= SAM_STAT_CHECK_CONDITION;
  2097. if (c2->error_data.data_present !=
  2098. IOACCEL2_SENSE_DATA_PRESENT) {
  2099. memset(cmd->sense_buffer, 0,
  2100. SCSI_SENSE_BUFFERSIZE);
  2101. break;
  2102. }
  2103. /* copy the sense data */
  2104. data_len = c2->error_data.sense_data_len;
  2105. if (data_len > SCSI_SENSE_BUFFERSIZE)
  2106. data_len = SCSI_SENSE_BUFFERSIZE;
  2107. if (data_len > sizeof(c2->error_data.sense_data_buff))
  2108. data_len =
  2109. sizeof(c2->error_data.sense_data_buff);
  2110. memcpy(cmd->sense_buffer,
  2111. c2->error_data.sense_data_buff, data_len);
  2112. retry = 1;
  2113. break;
  2114. case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
  2115. retry = 1;
  2116. break;
  2117. case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
  2118. retry = 1;
  2119. break;
  2120. case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
  2121. retry = 1;
  2122. break;
  2123. case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
  2124. retry = 1;
  2125. break;
  2126. default:
  2127. retry = 1;
  2128. break;
  2129. }
  2130. break;
  2131. case IOACCEL2_SERV_RESPONSE_FAILURE:
  2132. switch (c2->error_data.status) {
  2133. case IOACCEL2_STATUS_SR_IO_ERROR:
  2134. case IOACCEL2_STATUS_SR_IO_ABORTED:
  2135. case IOACCEL2_STATUS_SR_OVERRUN:
  2136. retry = 1;
  2137. break;
  2138. case IOACCEL2_STATUS_SR_UNDERRUN:
  2139. cmd->result = (DID_OK << 16); /* host byte */
  2140. cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
  2141. ioaccel2_resid = get_unaligned_le32(
  2142. &c2->error_data.resid_cnt[0]);
  2143. scsi_set_resid(cmd, ioaccel2_resid);
  2144. break;
  2145. case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
  2146. case IOACCEL2_STATUS_SR_INVALID_DEVICE:
  2147. case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
  2148. /*
  2149. * Did an HBA disk disappear? We will eventually
  2150. * get a state change event from the controller but
  2151. * in the meantime, we need to tell the OS that the
  2152. * HBA disk is no longer there and stop I/O
  2153. * from going down. This allows the potential re-insert
  2154. * of the disk to get the same device node.
  2155. */
  2156. if (dev->physical_device && dev->expose_device) {
  2157. cmd->result = DID_NO_CONNECT << 16;
  2158. dev->removed = 1;
  2159. h->drv_req_rescan = 1;
  2160. dev_warn(&h->pdev->dev,
  2161. "%s: device is gone!\n", __func__);
  2162. } else
  2163. /*
  2164. * Retry by sending down the RAID path.
  2165. * We will get an event from ctlr to
  2166. * trigger rescan regardless.
  2167. */
  2168. retry = 1;
  2169. break;
  2170. default:
  2171. retry = 1;
  2172. }
  2173. break;
  2174. case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
  2175. break;
  2176. case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
  2177. break;
  2178. case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
  2179. retry = 1;
  2180. break;
  2181. case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
  2182. break;
  2183. default:
  2184. retry = 1;
  2185. break;
  2186. }
  2187. return retry; /* retry on raid path? */
  2188. }
  2189. static void hpsa_cmd_resolve_events(struct ctlr_info *h,
  2190. struct CommandList *c)
  2191. {
  2192. bool do_wake = false;
  2193. /*
  2194. * Reset c->scsi_cmd here so that the reset handler will know
  2195. * this command has completed. Then, check to see if the handler is
  2196. * waiting for this command, and, if so, wake it.
  2197. */
  2198. c->scsi_cmd = SCSI_CMD_IDLE;
  2199. mb(); /* Declare command idle before checking for pending events. */
  2200. if (c->reset_pending) {
  2201. unsigned long flags;
  2202. struct hpsa_scsi_dev_t *dev;
  2203. /*
  2204. * There appears to be a reset pending; lock the lock and
  2205. * reconfirm. If so, then decrement the count of outstanding
  2206. * commands and wake the reset command if this is the last one.
  2207. */
  2208. spin_lock_irqsave(&h->lock, flags);
  2209. dev = c->reset_pending; /* Re-fetch under the lock. */
  2210. if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
  2211. do_wake = true;
  2212. c->reset_pending = NULL;
  2213. spin_unlock_irqrestore(&h->lock, flags);
  2214. }
  2215. if (do_wake)
  2216. wake_up_all(&h->event_sync_wait_queue);
  2217. }
  2218. static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
  2219. struct CommandList *c)
  2220. {
  2221. hpsa_cmd_resolve_events(h, c);
  2222. cmd_tagged_free(h, c);
  2223. }
  2224. static void hpsa_cmd_free_and_done(struct ctlr_info *h,
  2225. struct CommandList *c, struct scsi_cmnd *cmd)
  2226. {
  2227. hpsa_cmd_resolve_and_free(h, c);
  2228. if (cmd && cmd->scsi_done)
  2229. cmd->scsi_done(cmd);
  2230. }
  2231. static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
  2232. {
  2233. INIT_WORK(&c->work, hpsa_command_resubmit_worker);
  2234. queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
  2235. }
  2236. static void process_ioaccel2_completion(struct ctlr_info *h,
  2237. struct CommandList *c, struct scsi_cmnd *cmd,
  2238. struct hpsa_scsi_dev_t *dev)
  2239. {
  2240. struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
  2241. /* check for good status */
  2242. if (likely(c2->error_data.serv_response == 0 &&
  2243. c2->error_data.status == 0)) {
  2244. cmd->result = 0;
  2245. return hpsa_cmd_free_and_done(h, c, cmd);
  2246. }
  2247. /*
  2248. * Any RAID offload error results in retry which will use
  2249. * the normal I/O path so the controller can handle whatever is
  2250. * wrong.
  2251. */
  2252. if (is_logical_device(dev) &&
  2253. c2->error_data.serv_response ==
  2254. IOACCEL2_SERV_RESPONSE_FAILURE) {
  2255. if (c2->error_data.status ==
  2256. IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
  2257. hpsa_turn_off_ioaccel_for_device(dev);
  2258. }
  2259. return hpsa_retry_cmd(h, c);
  2260. }
  2261. if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
  2262. return hpsa_retry_cmd(h, c);
  2263. return hpsa_cmd_free_and_done(h, c, cmd);
  2264. }
  2265. /* Returns 0 on success, < 0 otherwise. */
  2266. static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
  2267. struct CommandList *cp)
  2268. {
  2269. u8 tmf_status = cp->err_info->ScsiStatus;
  2270. switch (tmf_status) {
  2271. case CISS_TMF_COMPLETE:
  2272. /*
  2273. * CISS_TMF_COMPLETE never happens, instead,
  2274. * ei->CommandStatus == 0 for this case.
  2275. */
  2276. case CISS_TMF_SUCCESS:
  2277. return 0;
  2278. case CISS_TMF_INVALID_FRAME:
  2279. case CISS_TMF_NOT_SUPPORTED:
  2280. case CISS_TMF_FAILED:
  2281. case CISS_TMF_WRONG_LUN:
  2282. case CISS_TMF_OVERLAPPED_TAG:
  2283. break;
  2284. default:
  2285. dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
  2286. tmf_status);
  2287. break;
  2288. }
  2289. return -tmf_status;
  2290. }
  2291. static void complete_scsi_command(struct CommandList *cp)
  2292. {
  2293. struct scsi_cmnd *cmd;
  2294. struct ctlr_info *h;
  2295. struct ErrorInfo *ei;
  2296. struct hpsa_scsi_dev_t *dev;
  2297. struct io_accel2_cmd *c2;
  2298. u8 sense_key;
  2299. u8 asc; /* additional sense code */
  2300. u8 ascq; /* additional sense code qualifier */
  2301. unsigned long sense_data_size;
  2302. ei = cp->err_info;
  2303. cmd = cp->scsi_cmd;
  2304. h = cp->h;
  2305. if (!cmd->device) {
  2306. cmd->result = DID_NO_CONNECT << 16;
  2307. return hpsa_cmd_free_and_done(h, cp, cmd);
  2308. }
  2309. dev = cmd->device->hostdata;
  2310. if (!dev) {
  2311. cmd->result = DID_NO_CONNECT << 16;
  2312. return hpsa_cmd_free_and_done(h, cp, cmd);
  2313. }
  2314. c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
  2315. scsi_dma_unmap(cmd); /* undo the DMA mappings */
  2316. if ((cp->cmd_type == CMD_SCSI) &&
  2317. (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
  2318. hpsa_unmap_sg_chain_block(h, cp);
  2319. if ((cp->cmd_type == CMD_IOACCEL2) &&
  2320. (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
  2321. hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
  2322. cmd->result = (DID_OK << 16); /* host byte */
  2323. cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
  2324. if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
  2325. if (dev->physical_device && dev->expose_device &&
  2326. dev->removed) {
  2327. cmd->result = DID_NO_CONNECT << 16;
  2328. return hpsa_cmd_free_and_done(h, cp, cmd);
  2329. }
  2330. if (likely(cp->phys_disk != NULL))
  2331. atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
  2332. }
  2333. /*
  2334. * We check for lockup status here as it may be set for
  2335. * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
  2336. * fail_all_oustanding_cmds()
  2337. */
  2338. if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
  2339. /* DID_NO_CONNECT will prevent a retry */
  2340. cmd->result = DID_NO_CONNECT << 16;
  2341. return hpsa_cmd_free_and_done(h, cp, cmd);
  2342. }
  2343. if ((unlikely(hpsa_is_pending_event(cp))))
  2344. if (cp->reset_pending)
  2345. return hpsa_cmd_free_and_done(h, cp, cmd);
  2346. if (cp->cmd_type == CMD_IOACCEL2)
  2347. return process_ioaccel2_completion(h, cp, cmd, dev);
  2348. scsi_set_resid(cmd, ei->ResidualCnt);
  2349. if (ei->CommandStatus == 0)
  2350. return hpsa_cmd_free_and_done(h, cp, cmd);
  2351. /* For I/O accelerator commands, copy over some fields to the normal
  2352. * CISS header used below for error handling.
  2353. */
  2354. if (cp->cmd_type == CMD_IOACCEL1) {
  2355. struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
  2356. cp->Header.SGList = scsi_sg_count(cmd);
  2357. cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
  2358. cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
  2359. IOACCEL1_IOFLAGS_CDBLEN_MASK;
  2360. cp->Header.tag = c->tag;
  2361. memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
  2362. memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
  2363. /* Any RAID offload error results in retry which will use
  2364. * the normal I/O path so the controller can handle whatever's
  2365. * wrong.
  2366. */
  2367. if (is_logical_device(dev)) {
  2368. if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
  2369. dev->offload_enabled = 0;
  2370. return hpsa_retry_cmd(h, cp);
  2371. }
  2372. }
  2373. /* an error has occurred */
  2374. switch (ei->CommandStatus) {
  2375. case CMD_TARGET_STATUS:
  2376. cmd->result |= ei->ScsiStatus;
  2377. /* copy the sense data */
  2378. if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
  2379. sense_data_size = SCSI_SENSE_BUFFERSIZE;
  2380. else
  2381. sense_data_size = sizeof(ei->SenseInfo);
  2382. if (ei->SenseLen < sense_data_size)
  2383. sense_data_size = ei->SenseLen;
  2384. memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
  2385. if (ei->ScsiStatus)
  2386. decode_sense_data(ei->SenseInfo, sense_data_size,
  2387. &sense_key, &asc, &ascq);
  2388. if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
  2389. if (sense_key == ABORTED_COMMAND) {
  2390. cmd->result |= DID_SOFT_ERROR << 16;
  2391. break;
  2392. }
  2393. break;
  2394. }
  2395. /* Problem was not a check condition
  2396. * Pass it up to the upper layers...
  2397. */
  2398. if (ei->ScsiStatus) {
  2399. dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
  2400. "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
  2401. "Returning result: 0x%x\n",
  2402. cp, ei->ScsiStatus,
  2403. sense_key, asc, ascq,
  2404. cmd->result);
  2405. } else { /* scsi status is zero??? How??? */
  2406. dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
  2407. "Returning no connection.\n", cp),
  2408. /* Ordinarily, this case should never happen,
  2409. * but there is a bug in some released firmware
  2410. * revisions that allows it to happen if, for
  2411. * example, a 4100 backplane loses power and
  2412. * the tape drive is in it. We assume that
  2413. * it's a fatal error of some kind because we
  2414. * can't show that it wasn't. We will make it
  2415. * look like selection timeout since that is
  2416. * the most common reason for this to occur,
  2417. * and it's severe enough.
  2418. */
  2419. cmd->result = DID_NO_CONNECT << 16;
  2420. }
  2421. break;
  2422. case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
  2423. break;
  2424. case CMD_DATA_OVERRUN:
  2425. dev_warn(&h->pdev->dev,
  2426. "CDB %16phN data overrun\n", cp->Request.CDB);
  2427. break;
  2428. case CMD_INVALID: {
  2429. /* print_bytes(cp, sizeof(*cp), 1, 0);
  2430. print_cmd(cp); */
  2431. /* We get CMD_INVALID if you address a non-existent device
  2432. * instead of a selection timeout (no response). You will
  2433. * see this if you yank out a drive, then try to access it.
  2434. * This is kind of a shame because it means that any other
  2435. * CMD_INVALID (e.g. driver bug) will get interpreted as a
  2436. * missing target. */
  2437. cmd->result = DID_NO_CONNECT << 16;
  2438. }
  2439. break;
  2440. case CMD_PROTOCOL_ERR:
  2441. cmd->result = DID_ERROR << 16;
  2442. dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
  2443. cp->Request.CDB);
  2444. break;
  2445. case CMD_HARDWARE_ERR:
  2446. cmd->result = DID_ERROR << 16;
  2447. dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
  2448. cp->Request.CDB);
  2449. break;
  2450. case CMD_CONNECTION_LOST:
  2451. cmd->result = DID_ERROR << 16;
  2452. dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
  2453. cp->Request.CDB);
  2454. break;
  2455. case CMD_ABORTED:
  2456. cmd->result = DID_ABORT << 16;
  2457. break;
  2458. case CMD_ABORT_FAILED:
  2459. cmd->result = DID_ERROR << 16;
  2460. dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
  2461. cp->Request.CDB);
  2462. break;
  2463. case CMD_UNSOLICITED_ABORT:
  2464. cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
  2465. dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
  2466. cp->Request.CDB);
  2467. break;
  2468. case CMD_TIMEOUT:
  2469. cmd->result = DID_TIME_OUT << 16;
  2470. dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
  2471. cp->Request.CDB);
  2472. break;
  2473. case CMD_UNABORTABLE:
  2474. cmd->result = DID_ERROR << 16;
  2475. dev_warn(&h->pdev->dev, "Command unabortable\n");
  2476. break;
  2477. case CMD_TMF_STATUS:
  2478. if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
  2479. cmd->result = DID_ERROR << 16;
  2480. break;
  2481. case CMD_IOACCEL_DISABLED:
  2482. /* This only handles the direct pass-through case since RAID
  2483. * offload is handled above. Just attempt a retry.
  2484. */
  2485. cmd->result = DID_SOFT_ERROR << 16;
  2486. dev_warn(&h->pdev->dev,
  2487. "cp %p had HP SSD Smart Path error\n", cp);
  2488. break;
  2489. default:
  2490. cmd->result = DID_ERROR << 16;
  2491. dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
  2492. cp, ei->CommandStatus);
  2493. }
  2494. return hpsa_cmd_free_and_done(h, cp, cmd);
  2495. }
  2496. static void hpsa_pci_unmap(struct pci_dev *pdev,
  2497. struct CommandList *c, int sg_used, int data_direction)
  2498. {
  2499. int i;
  2500. for (i = 0; i < sg_used; i++)
  2501. pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
  2502. le32_to_cpu(c->SG[i].Len),
  2503. data_direction);
  2504. }
  2505. static int hpsa_map_one(struct pci_dev *pdev,
  2506. struct CommandList *cp,
  2507. unsigned char *buf,
  2508. size_t buflen,
  2509. int data_direction)
  2510. {
  2511. u64 addr64;
  2512. if (buflen == 0 || data_direction == PCI_DMA_NONE) {
  2513. cp->Header.SGList = 0;
  2514. cp->Header.SGTotal = cpu_to_le16(0);
  2515. return 0;
  2516. }
  2517. addr64 = pci_map_single(pdev, buf, buflen, data_direction);
  2518. if (dma_mapping_error(&pdev->dev, addr64)) {
  2519. /* Prevent subsequent unmap of something never mapped */
  2520. cp->Header.SGList = 0;
  2521. cp->Header.SGTotal = cpu_to_le16(0);
  2522. return -1;
  2523. }
  2524. cp->SG[0].Addr = cpu_to_le64(addr64);
  2525. cp->SG[0].Len = cpu_to_le32(buflen);
  2526. cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
  2527. cp->Header.SGList = 1; /* no. SGs contig in this cmd */
  2528. cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
  2529. return 0;
  2530. }
  2531. #define NO_TIMEOUT ((unsigned long) -1)
  2532. #define DEFAULT_TIMEOUT 30000 /* milliseconds */
  2533. static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
  2534. struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
  2535. {
  2536. DECLARE_COMPLETION_ONSTACK(wait);
  2537. c->waiting = &wait;
  2538. __enqueue_cmd_and_start_io(h, c, reply_queue);
  2539. if (timeout_msecs == NO_TIMEOUT) {
  2540. /* TODO: get rid of this no-timeout thing */
  2541. wait_for_completion_io(&wait);
  2542. return IO_OK;
  2543. }
  2544. if (!wait_for_completion_io_timeout(&wait,
  2545. msecs_to_jiffies(timeout_msecs))) {
  2546. dev_warn(&h->pdev->dev, "Command timed out.\n");
  2547. return -ETIMEDOUT;
  2548. }
  2549. return IO_OK;
  2550. }
  2551. static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
  2552. int reply_queue, unsigned long timeout_msecs)
  2553. {
  2554. if (unlikely(lockup_detected(h))) {
  2555. c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
  2556. return IO_OK;
  2557. }
  2558. return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
  2559. }
  2560. static u32 lockup_detected(struct ctlr_info *h)
  2561. {
  2562. int cpu;
  2563. u32 rc, *lockup_detected;
  2564. cpu = get_cpu();
  2565. lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
  2566. rc = *lockup_detected;
  2567. put_cpu();
  2568. return rc;
  2569. }
  2570. #define MAX_DRIVER_CMD_RETRIES 25
  2571. static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
  2572. struct CommandList *c, int data_direction, unsigned long timeout_msecs)
  2573. {
  2574. int backoff_time = 10, retry_count = 0;
  2575. int rc;
  2576. do {
  2577. memset(c->err_info, 0, sizeof(*c->err_info));
  2578. rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
  2579. timeout_msecs);
  2580. if (rc)
  2581. break;
  2582. retry_count++;
  2583. if (retry_count > 3) {
  2584. msleep(backoff_time);
  2585. if (backoff_time < 1000)
  2586. backoff_time *= 2;
  2587. }
  2588. } while ((check_for_unit_attention(h, c) ||
  2589. check_for_busy(h, c)) &&
  2590. retry_count <= MAX_DRIVER_CMD_RETRIES);
  2591. hpsa_pci_unmap(h->pdev, c, 1, data_direction);
  2592. if (retry_count > MAX_DRIVER_CMD_RETRIES)
  2593. rc = -EIO;
  2594. return rc;
  2595. }
  2596. static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
  2597. struct CommandList *c)
  2598. {
  2599. const u8 *cdb = c->Request.CDB;
  2600. const u8 *lun = c->Header.LUN.LunAddrBytes;
  2601. dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
  2602. txt, lun, cdb);
  2603. }
  2604. static void hpsa_scsi_interpret_error(struct ctlr_info *h,
  2605. struct CommandList *cp)
  2606. {
  2607. const struct ErrorInfo *ei = cp->err_info;
  2608. struct device *d = &cp->h->pdev->dev;
  2609. u8 sense_key, asc, ascq;
  2610. int sense_len;
  2611. switch (ei->CommandStatus) {
  2612. case CMD_TARGET_STATUS:
  2613. if (ei->SenseLen > sizeof(ei->SenseInfo))
  2614. sense_len = sizeof(ei->SenseInfo);
  2615. else
  2616. sense_len = ei->SenseLen;
  2617. decode_sense_data(ei->SenseInfo, sense_len,
  2618. &sense_key, &asc, &ascq);
  2619. hpsa_print_cmd(h, "SCSI status", cp);
  2620. if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
  2621. dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
  2622. sense_key, asc, ascq);
  2623. else
  2624. dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
  2625. if (ei->ScsiStatus == 0)
  2626. dev_warn(d, "SCSI status is abnormally zero. "
  2627. "(probably indicates selection timeout "
  2628. "reported incorrectly due to a known "
  2629. "firmware bug, circa July, 2001.)\n");
  2630. break;
  2631. case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
  2632. break;
  2633. case CMD_DATA_OVERRUN:
  2634. hpsa_print_cmd(h, "overrun condition", cp);
  2635. break;
  2636. case CMD_INVALID: {
  2637. /* controller unfortunately reports SCSI passthru's
  2638. * to non-existent targets as invalid commands.
  2639. */
  2640. hpsa_print_cmd(h, "invalid command", cp);
  2641. dev_warn(d, "probably means device no longer present\n");
  2642. }
  2643. break;
  2644. case CMD_PROTOCOL_ERR:
  2645. hpsa_print_cmd(h, "protocol error", cp);
  2646. break;
  2647. case CMD_HARDWARE_ERR:
  2648. hpsa_print_cmd(h, "hardware error", cp);
  2649. break;
  2650. case CMD_CONNECTION_LOST:
  2651. hpsa_print_cmd(h, "connection lost", cp);
  2652. break;
  2653. case CMD_ABORTED:
  2654. hpsa_print_cmd(h, "aborted", cp);
  2655. break;
  2656. case CMD_ABORT_FAILED:
  2657. hpsa_print_cmd(h, "abort failed", cp);
  2658. break;
  2659. case CMD_UNSOLICITED_ABORT:
  2660. hpsa_print_cmd(h, "unsolicited abort", cp);
  2661. break;
  2662. case CMD_TIMEOUT:
  2663. hpsa_print_cmd(h, "timed out", cp);
  2664. break;
  2665. case CMD_UNABORTABLE:
  2666. hpsa_print_cmd(h, "unabortable", cp);
  2667. break;
  2668. case CMD_CTLR_LOCKUP:
  2669. hpsa_print_cmd(h, "controller lockup detected", cp);
  2670. break;
  2671. default:
  2672. hpsa_print_cmd(h, "unknown status", cp);
  2673. dev_warn(d, "Unknown command status %x\n",
  2674. ei->CommandStatus);
  2675. }
  2676. }
  2677. static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
  2678. u8 page, u8 *buf, size_t bufsize)
  2679. {
  2680. int rc = IO_OK;
  2681. struct CommandList *c;
  2682. struct ErrorInfo *ei;
  2683. c = cmd_alloc(h);
  2684. if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
  2685. page, scsi3addr, TYPE_CMD)) {
  2686. rc = -1;
  2687. goto out;
  2688. }
  2689. rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
  2690. PCI_DMA_FROMDEVICE, NO_TIMEOUT);
  2691. if (rc)
  2692. goto out;
  2693. ei = c->err_info;
  2694. if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
  2695. hpsa_scsi_interpret_error(h, c);
  2696. rc = -1;
  2697. }
  2698. out:
  2699. cmd_free(h, c);
  2700. return rc;
  2701. }
  2702. static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
  2703. u8 *scsi3addr)
  2704. {
  2705. u8 *buf;
  2706. u64 sa = 0;
  2707. int rc = 0;
  2708. buf = kzalloc(1024, GFP_KERNEL);
  2709. if (!buf)
  2710. return 0;
  2711. rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
  2712. buf, 1024);
  2713. if (rc)
  2714. goto out;
  2715. sa = get_unaligned_be64(buf+12);
  2716. out:
  2717. kfree(buf);
  2718. return sa;
  2719. }
  2720. static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
  2721. u16 page, unsigned char *buf,
  2722. unsigned char bufsize)
  2723. {
  2724. int rc = IO_OK;
  2725. struct CommandList *c;
  2726. struct ErrorInfo *ei;
  2727. c = cmd_alloc(h);
  2728. if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
  2729. page, scsi3addr, TYPE_CMD)) {
  2730. rc = -1;
  2731. goto out;
  2732. }
  2733. rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
  2734. PCI_DMA_FROMDEVICE, NO_TIMEOUT);
  2735. if (rc)
  2736. goto out;
  2737. ei = c->err_info;
  2738. if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
  2739. hpsa_scsi_interpret_error(h, c);
  2740. rc = -1;
  2741. }
  2742. out:
  2743. cmd_free(h, c);
  2744. return rc;
  2745. }
  2746. static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
  2747. u8 reset_type, int reply_queue)
  2748. {
  2749. int rc = IO_OK;
  2750. struct CommandList *c;
  2751. struct ErrorInfo *ei;
  2752. c = cmd_alloc(h);
  2753. /* fill_cmd can't fail here, no data buffer to map. */
  2754. (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
  2755. scsi3addr, TYPE_MSG);
  2756. rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
  2757. if (rc) {
  2758. dev_warn(&h->pdev->dev, "Failed to send reset command\n");
  2759. goto out;
  2760. }
  2761. /* no unmap needed here because no data xfer. */
  2762. ei = c->err_info;
  2763. if (ei->CommandStatus != 0) {
  2764. hpsa_scsi_interpret_error(h, c);
  2765. rc = -1;
  2766. }
  2767. out:
  2768. cmd_free(h, c);
  2769. return rc;
  2770. }
  2771. static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
  2772. struct hpsa_scsi_dev_t *dev,
  2773. unsigned char *scsi3addr)
  2774. {
  2775. int i;
  2776. bool match = false;
  2777. struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
  2778. struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
  2779. if (hpsa_is_cmd_idle(c))
  2780. return false;
  2781. switch (c->cmd_type) {
  2782. case CMD_SCSI:
  2783. case CMD_IOCTL_PEND:
  2784. match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
  2785. sizeof(c->Header.LUN.LunAddrBytes));
  2786. break;
  2787. case CMD_IOACCEL1:
  2788. case CMD_IOACCEL2:
  2789. if (c->phys_disk == dev) {
  2790. /* HBA mode match */
  2791. match = true;
  2792. } else {
  2793. /* Possible RAID mode -- check each phys dev. */
  2794. /* FIXME: Do we need to take out a lock here? If
  2795. * so, we could just call hpsa_get_pdisk_of_ioaccel2()
  2796. * instead. */
  2797. for (i = 0; i < dev->nphysical_disks && !match; i++) {
  2798. /* FIXME: an alternate test might be
  2799. *
  2800. * match = dev->phys_disk[i]->ioaccel_handle
  2801. * == c2->scsi_nexus; */
  2802. match = dev->phys_disk[i] == c->phys_disk;
  2803. }
  2804. }
  2805. break;
  2806. case IOACCEL2_TMF:
  2807. for (i = 0; i < dev->nphysical_disks && !match; i++) {
  2808. match = dev->phys_disk[i]->ioaccel_handle ==
  2809. le32_to_cpu(ac->it_nexus);
  2810. }
  2811. break;
  2812. case 0: /* The command is in the middle of being initialized. */
  2813. match = false;
  2814. break;
  2815. default:
  2816. dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
  2817. c->cmd_type);
  2818. BUG();
  2819. }
  2820. return match;
  2821. }
  2822. static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
  2823. unsigned char *scsi3addr, u8 reset_type, int reply_queue)
  2824. {
  2825. int i;
  2826. int rc = 0;
  2827. /* We can really only handle one reset at a time */
  2828. if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
  2829. dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
  2830. return -EINTR;
  2831. }
  2832. BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
  2833. for (i = 0; i < h->nr_cmds; i++) {
  2834. struct CommandList *c = h->cmd_pool + i;
  2835. int refcount = atomic_inc_return(&c->refcount);
  2836. if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
  2837. unsigned long flags;
  2838. /*
  2839. * Mark the target command as having a reset pending,
  2840. * then lock a lock so that the command cannot complete
  2841. * while we're considering it. If the command is not
  2842. * idle then count it; otherwise revoke the event.
  2843. */
  2844. c->reset_pending = dev;
  2845. spin_lock_irqsave(&h->lock, flags); /* Implied MB */
  2846. if (!hpsa_is_cmd_idle(c))
  2847. atomic_inc(&dev->reset_cmds_out);
  2848. else
  2849. c->reset_pending = NULL;
  2850. spin_unlock_irqrestore(&h->lock, flags);
  2851. }
  2852. cmd_free(h, c);
  2853. }
  2854. rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
  2855. if (!rc)
  2856. wait_event(h->event_sync_wait_queue,
  2857. atomic_read(&dev->reset_cmds_out) == 0 ||
  2858. lockup_detected(h));
  2859. if (unlikely(lockup_detected(h))) {
  2860. dev_warn(&h->pdev->dev,
  2861. "Controller lockup detected during reset wait\n");
  2862. rc = -ENODEV;
  2863. }
  2864. if (unlikely(rc))
  2865. atomic_set(&dev->reset_cmds_out, 0);
  2866. else
  2867. rc = wait_for_device_to_become_ready(h, scsi3addr, 0);
  2868. mutex_unlock(&h->reset_mutex);
  2869. return rc;
  2870. }
  2871. static void hpsa_get_raid_level(struct ctlr_info *h,
  2872. unsigned char *scsi3addr, unsigned char *raid_level)
  2873. {
  2874. int rc;
  2875. unsigned char *buf;
  2876. *raid_level = RAID_UNKNOWN;
  2877. buf = kzalloc(64, GFP_KERNEL);
  2878. if (!buf)
  2879. return;
  2880. if (!hpsa_vpd_page_supported(h, scsi3addr,
  2881. HPSA_VPD_LV_DEVICE_GEOMETRY))
  2882. goto exit;
  2883. rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
  2884. HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
  2885. if (rc == 0)
  2886. *raid_level = buf[8];
  2887. if (*raid_level > RAID_UNKNOWN)
  2888. *raid_level = RAID_UNKNOWN;
  2889. exit:
  2890. kfree(buf);
  2891. return;
  2892. }
  2893. #define HPSA_MAP_DEBUG
  2894. #ifdef HPSA_MAP_DEBUG
  2895. static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
  2896. struct raid_map_data *map_buff)
  2897. {
  2898. struct raid_map_disk_data *dd = &map_buff->data[0];
  2899. int map, row, col;
  2900. u16 map_cnt, row_cnt, disks_per_row;
  2901. if (rc != 0)
  2902. return;
  2903. /* Show details only if debugging has been activated. */
  2904. if (h->raid_offload_debug < 2)
  2905. return;
  2906. dev_info(&h->pdev->dev, "structure_size = %u\n",
  2907. le32_to_cpu(map_buff->structure_size));
  2908. dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
  2909. le32_to_cpu(map_buff->volume_blk_size));
  2910. dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
  2911. le64_to_cpu(map_buff->volume_blk_cnt));
  2912. dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
  2913. map_buff->phys_blk_shift);
  2914. dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
  2915. map_buff->parity_rotation_shift);
  2916. dev_info(&h->pdev->dev, "strip_size = %u\n",
  2917. le16_to_cpu(map_buff->strip_size));
  2918. dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
  2919. le64_to_cpu(map_buff->disk_starting_blk));
  2920. dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
  2921. le64_to_cpu(map_buff->disk_blk_cnt));
  2922. dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
  2923. le16_to_cpu(map_buff->data_disks_per_row));
  2924. dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
  2925. le16_to_cpu(map_buff->metadata_disks_per_row));
  2926. dev_info(&h->pdev->dev, "row_cnt = %u\n",
  2927. le16_to_cpu(map_buff->row_cnt));
  2928. dev_info(&h->pdev->dev, "layout_map_count = %u\n",
  2929. le16_to_cpu(map_buff->layout_map_count));
  2930. dev_info(&h->pdev->dev, "flags = 0x%x\n",
  2931. le16_to_cpu(map_buff->flags));
  2932. dev_info(&h->pdev->dev, "encryption = %s\n",
  2933. le16_to_cpu(map_buff->flags) &
  2934. RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
  2935. dev_info(&h->pdev->dev, "dekindex = %u\n",
  2936. le16_to_cpu(map_buff->dekindex));
  2937. map_cnt = le16_to_cpu(map_buff->layout_map_count);
  2938. for (map = 0; map < map_cnt; map++) {
  2939. dev_info(&h->pdev->dev, "Map%u:\n", map);
  2940. row_cnt = le16_to_cpu(map_buff->row_cnt);
  2941. for (row = 0; row < row_cnt; row++) {
  2942. dev_info(&h->pdev->dev, " Row%u:\n", row);
  2943. disks_per_row =
  2944. le16_to_cpu(map_buff->data_disks_per_row);
  2945. for (col = 0; col < disks_per_row; col++, dd++)
  2946. dev_info(&h->pdev->dev,
  2947. " D%02u: h=0x%04x xor=%u,%u\n",
  2948. col, dd->ioaccel_handle,
  2949. dd->xor_mult[0], dd->xor_mult[1]);
  2950. disks_per_row =
  2951. le16_to_cpu(map_buff->metadata_disks_per_row);
  2952. for (col = 0; col < disks_per_row; col++, dd++)
  2953. dev_info(&h->pdev->dev,
  2954. " M%02u: h=0x%04x xor=%u,%u\n",
  2955. col, dd->ioaccel_handle,
  2956. dd->xor_mult[0], dd->xor_mult[1]);
  2957. }
  2958. }
  2959. }
  2960. #else
  2961. static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
  2962. __attribute__((unused)) int rc,
  2963. __attribute__((unused)) struct raid_map_data *map_buff)
  2964. {
  2965. }
  2966. #endif
  2967. static int hpsa_get_raid_map(struct ctlr_info *h,
  2968. unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
  2969. {
  2970. int rc = 0;
  2971. struct CommandList *c;
  2972. struct ErrorInfo *ei;
  2973. c = cmd_alloc(h);
  2974. if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
  2975. sizeof(this_device->raid_map), 0,
  2976. scsi3addr, TYPE_CMD)) {
  2977. dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
  2978. cmd_free(h, c);
  2979. return -1;
  2980. }
  2981. rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
  2982. PCI_DMA_FROMDEVICE, NO_TIMEOUT);
  2983. if (rc)
  2984. goto out;
  2985. ei = c->err_info;
  2986. if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
  2987. hpsa_scsi_interpret_error(h, c);
  2988. rc = -1;
  2989. goto out;
  2990. }
  2991. cmd_free(h, c);
  2992. /* @todo in the future, dynamically allocate RAID map memory */
  2993. if (le32_to_cpu(this_device->raid_map.structure_size) >
  2994. sizeof(this_device->raid_map)) {
  2995. dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
  2996. rc = -1;
  2997. }
  2998. hpsa_debug_map_buff(h, rc, &this_device->raid_map);
  2999. return rc;
  3000. out:
  3001. cmd_free(h, c);
  3002. return rc;
  3003. }
  3004. static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
  3005. unsigned char scsi3addr[], u16 bmic_device_index,
  3006. struct bmic_sense_subsystem_info *buf, size_t bufsize)
  3007. {
  3008. int rc = IO_OK;
  3009. struct CommandList *c;
  3010. struct ErrorInfo *ei;
  3011. c = cmd_alloc(h);
  3012. rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
  3013. 0, RAID_CTLR_LUNID, TYPE_CMD);
  3014. if (rc)
  3015. goto out;
  3016. c->Request.CDB[2] = bmic_device_index & 0xff;
  3017. c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
  3018. rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
  3019. PCI_DMA_FROMDEVICE, NO_TIMEOUT);
  3020. if (rc)
  3021. goto out;
  3022. ei = c->err_info;
  3023. if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
  3024. hpsa_scsi_interpret_error(h, c);
  3025. rc = -1;
  3026. }
  3027. out:
  3028. cmd_free(h, c);
  3029. return rc;
  3030. }
  3031. static int hpsa_bmic_id_controller(struct ctlr_info *h,
  3032. struct bmic_identify_controller *buf, size_t bufsize)
  3033. {
  3034. int rc = IO_OK;
  3035. struct CommandList *c;
  3036. struct ErrorInfo *ei;
  3037. c = cmd_alloc(h);
  3038. rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
  3039. 0, RAID_CTLR_LUNID, TYPE_CMD);
  3040. if (rc)
  3041. goto out;
  3042. rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
  3043. PCI_DMA_FROMDEVICE, NO_TIMEOUT);
  3044. if (rc)
  3045. goto out;
  3046. ei = c->err_info;
  3047. if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
  3048. hpsa_scsi_interpret_error(h, c);
  3049. rc = -1;
  3050. }
  3051. out:
  3052. cmd_free(h, c);
  3053. return rc;
  3054. }
  3055. static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
  3056. unsigned char scsi3addr[], u16 bmic_device_index,
  3057. struct bmic_identify_physical_device *buf, size_t bufsize)
  3058. {
  3059. int rc = IO_OK;
  3060. struct CommandList *c;
  3061. struct ErrorInfo *ei;
  3062. c = cmd_alloc(h);
  3063. rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
  3064. 0, RAID_CTLR_LUNID, TYPE_CMD);
  3065. if (rc)
  3066. goto out;
  3067. c->Request.CDB[2] = bmic_device_index & 0xff;
  3068. c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
  3069. hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
  3070. NO_TIMEOUT);
  3071. ei = c->err_info;
  3072. if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
  3073. hpsa_scsi_interpret_error(h, c);
  3074. rc = -1;
  3075. }
  3076. out:
  3077. cmd_free(h, c);
  3078. return rc;
  3079. }
  3080. /*
  3081. * get enclosure information
  3082. * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
  3083. * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
  3084. * Uses id_physical_device to determine the box_index.
  3085. */
  3086. static void hpsa_get_enclosure_info(struct ctlr_info *h,
  3087. unsigned char *scsi3addr,
  3088. struct ReportExtendedLUNdata *rlep, int rle_index,
  3089. struct hpsa_scsi_dev_t *encl_dev)
  3090. {
  3091. int rc = -1;
  3092. struct CommandList *c = NULL;
  3093. struct ErrorInfo *ei = NULL;
  3094. struct bmic_sense_storage_box_params *bssbp = NULL;
  3095. struct bmic_identify_physical_device *id_phys = NULL;
  3096. struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
  3097. u16 bmic_device_index = 0;
  3098. encl_dev->eli =
  3099. hpsa_get_enclosure_logical_identifier(h, scsi3addr);
  3100. bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
  3101. if (encl_dev->target == -1 || encl_dev->lun == -1) {
  3102. rc = IO_OK;
  3103. goto out;
  3104. }
  3105. if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
  3106. rc = IO_OK;
  3107. goto out;
  3108. }
  3109. bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
  3110. if (!bssbp)
  3111. goto out;
  3112. id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
  3113. if (!id_phys)
  3114. goto out;
  3115. rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
  3116. id_phys, sizeof(*id_phys));
  3117. if (rc) {
  3118. dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
  3119. __func__, encl_dev->external, bmic_device_index);
  3120. goto out;
  3121. }
  3122. c = cmd_alloc(h);
  3123. rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
  3124. sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
  3125. if (rc)
  3126. goto out;
  3127. if (id_phys->phys_connector[1] == 'E')
  3128. c->Request.CDB[5] = id_phys->box_index;
  3129. else
  3130. c->Request.CDB[5] = 0;
  3131. rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
  3132. NO_TIMEOUT);
  3133. if (rc)
  3134. goto out;
  3135. ei = c->err_info;
  3136. if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
  3137. rc = -1;
  3138. goto out;
  3139. }
  3140. encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
  3141. memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
  3142. bssbp->phys_connector, sizeof(bssbp->phys_connector));
  3143. rc = IO_OK;
  3144. out:
  3145. kfree(bssbp);
  3146. kfree(id_phys);
  3147. if (c)
  3148. cmd_free(h, c);
  3149. if (rc != IO_OK)
  3150. hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
  3151. "Error, could not get enclosure information");
  3152. }
  3153. static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
  3154. unsigned char *scsi3addr)
  3155. {
  3156. struct ReportExtendedLUNdata *physdev;
  3157. u32 nphysicals;
  3158. u64 sa = 0;
  3159. int i;
  3160. physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
  3161. if (!physdev)
  3162. return 0;
  3163. if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
  3164. dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
  3165. kfree(physdev);
  3166. return 0;
  3167. }
  3168. nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
  3169. for (i = 0; i < nphysicals; i++)
  3170. if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
  3171. sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
  3172. break;
  3173. }
  3174. kfree(physdev);
  3175. return sa;
  3176. }
  3177. static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
  3178. struct hpsa_scsi_dev_t *dev)
  3179. {
  3180. int rc;
  3181. u64 sa = 0;
  3182. if (is_hba_lunid(scsi3addr)) {
  3183. struct bmic_sense_subsystem_info *ssi;
  3184. ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
  3185. if (!ssi)
  3186. return;
  3187. rc = hpsa_bmic_sense_subsystem_information(h,
  3188. scsi3addr, 0, ssi, sizeof(*ssi));
  3189. if (rc == 0) {
  3190. sa = get_unaligned_be64(ssi->primary_world_wide_id);
  3191. h->sas_address = sa;
  3192. }
  3193. kfree(ssi);
  3194. } else
  3195. sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
  3196. dev->sas_address = sa;
  3197. }
  3198. static void hpsa_ext_ctrl_present(struct ctlr_info *h,
  3199. struct ReportExtendedLUNdata *physdev)
  3200. {
  3201. u32 nphysicals;
  3202. int i;
  3203. if (h->discovery_polling)
  3204. return;
  3205. nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
  3206. for (i = 0; i < nphysicals; i++) {
  3207. if (physdev->LUN[i].device_type ==
  3208. BMIC_DEVICE_TYPE_CONTROLLER
  3209. && !is_hba_lunid(physdev->LUN[i].lunid)) {
  3210. dev_info(&h->pdev->dev,
  3211. "External controller present, activate discovery polling and disable rld caching\n");
  3212. hpsa_disable_rld_caching(h);
  3213. h->discovery_polling = 1;
  3214. break;
  3215. }
  3216. }
  3217. }
  3218. /* Get a device id from inquiry page 0x83 */
  3219. static bool hpsa_vpd_page_supported(struct ctlr_info *h,
  3220. unsigned char scsi3addr[], u8 page)
  3221. {
  3222. int rc;
  3223. int i;
  3224. int pages;
  3225. unsigned char *buf, bufsize;
  3226. buf = kzalloc(256, GFP_KERNEL);
  3227. if (!buf)
  3228. return false;
  3229. /* Get the size of the page list first */
  3230. rc = hpsa_scsi_do_inquiry(h, scsi3addr,
  3231. VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
  3232. buf, HPSA_VPD_HEADER_SZ);
  3233. if (rc != 0)
  3234. goto exit_unsupported;
  3235. pages = buf[3];
  3236. if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
  3237. bufsize = pages + HPSA_VPD_HEADER_SZ;
  3238. else
  3239. bufsize = 255;
  3240. /* Get the whole VPD page list */
  3241. rc = hpsa_scsi_do_inquiry(h, scsi3addr,
  3242. VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
  3243. buf, bufsize);
  3244. if (rc != 0)
  3245. goto exit_unsupported;
  3246. pages = buf[3];
  3247. for (i = 1; i <= pages; i++)
  3248. if (buf[3 + i] == page)
  3249. goto exit_supported;
  3250. exit_unsupported:
  3251. kfree(buf);
  3252. return false;
  3253. exit_supported:
  3254. kfree(buf);
  3255. return true;
  3256. }
  3257. /*
  3258. * Called during a scan operation.
  3259. * Sets ioaccel status on the new device list, not the existing device list
  3260. *
  3261. * The device list used during I/O will be updated later in
  3262. * adjust_hpsa_scsi_table.
  3263. */
  3264. static void hpsa_get_ioaccel_status(struct ctlr_info *h,
  3265. unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
  3266. {
  3267. int rc;
  3268. unsigned char *buf;
  3269. u8 ioaccel_status;
  3270. this_device->offload_config = 0;
  3271. this_device->offload_enabled = 0;
  3272. this_device->offload_to_be_enabled = 0;
  3273. buf = kzalloc(64, GFP_KERNEL);
  3274. if (!buf)
  3275. return;
  3276. if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
  3277. goto out;
  3278. rc = hpsa_scsi_do_inquiry(h, scsi3addr,
  3279. VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
  3280. if (rc != 0)
  3281. goto out;
  3282. #define IOACCEL_STATUS_BYTE 4
  3283. #define OFFLOAD_CONFIGURED_BIT 0x01
  3284. #define OFFLOAD_ENABLED_BIT 0x02
  3285. ioaccel_status = buf[IOACCEL_STATUS_BYTE];
  3286. this_device->offload_config =
  3287. !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
  3288. if (this_device->offload_config) {
  3289. bool offload_enabled =
  3290. !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
  3291. /*
  3292. * Check to see if offload can be enabled.
  3293. */
  3294. if (offload_enabled) {
  3295. rc = hpsa_get_raid_map(h, scsi3addr, this_device);
  3296. if (rc) /* could not load raid_map */
  3297. goto out;
  3298. this_device->offload_to_be_enabled = 1;
  3299. }
  3300. }
  3301. out:
  3302. kfree(buf);
  3303. return;
  3304. }
  3305. /* Get the device id from inquiry page 0x83 */
  3306. static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
  3307. unsigned char *device_id, int index, int buflen)
  3308. {
  3309. int rc;
  3310. unsigned char *buf;
  3311. /* Does controller have VPD for device id? */
  3312. if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
  3313. return 1; /* not supported */
  3314. buf = kzalloc(64, GFP_KERNEL);
  3315. if (!buf)
  3316. return -ENOMEM;
  3317. rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
  3318. HPSA_VPD_LV_DEVICE_ID, buf, 64);
  3319. if (rc == 0) {
  3320. if (buflen > 16)
  3321. buflen = 16;
  3322. memcpy(device_id, &buf[8], buflen);
  3323. }
  3324. kfree(buf);
  3325. return rc; /*0 - got id, otherwise, didn't */
  3326. }
  3327. static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
  3328. void *buf, int bufsize,
  3329. int extended_response)
  3330. {
  3331. int rc = IO_OK;
  3332. struct CommandList *c;
  3333. unsigned char scsi3addr[8];
  3334. struct ErrorInfo *ei;
  3335. c = cmd_alloc(h);
  3336. /* address the controller */
  3337. memset(scsi3addr, 0, sizeof(scsi3addr));
  3338. if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
  3339. buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
  3340. rc = -EAGAIN;
  3341. goto out;
  3342. }
  3343. if (extended_response)
  3344. c->Request.CDB[1] = extended_response;
  3345. rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
  3346. PCI_DMA_FROMDEVICE, NO_TIMEOUT);
  3347. if (rc)
  3348. goto out;
  3349. ei = c->err_info;
  3350. if (ei->CommandStatus != 0 &&
  3351. ei->CommandStatus != CMD_DATA_UNDERRUN) {
  3352. hpsa_scsi_interpret_error(h, c);
  3353. rc = -EIO;
  3354. } else {
  3355. struct ReportLUNdata *rld = buf;
  3356. if (rld->extended_response_flag != extended_response) {
  3357. if (!h->legacy_board) {
  3358. dev_err(&h->pdev->dev,
  3359. "report luns requested format %u, got %u\n",
  3360. extended_response,
  3361. rld->extended_response_flag);
  3362. rc = -EINVAL;
  3363. } else
  3364. rc = -EOPNOTSUPP;
  3365. }
  3366. }
  3367. out:
  3368. cmd_free(h, c);
  3369. return rc;
  3370. }
  3371. static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
  3372. struct ReportExtendedLUNdata *buf, int bufsize)
  3373. {
  3374. int rc;
  3375. struct ReportLUNdata *lbuf;
  3376. rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
  3377. HPSA_REPORT_PHYS_EXTENDED);
  3378. if (!rc || rc != -EOPNOTSUPP)
  3379. return rc;
  3380. /* REPORT PHYS EXTENDED is not supported */
  3381. lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
  3382. if (!lbuf)
  3383. return -ENOMEM;
  3384. rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
  3385. if (!rc) {
  3386. int i;
  3387. u32 nphys;
  3388. /* Copy ReportLUNdata header */
  3389. memcpy(buf, lbuf, 8);
  3390. nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
  3391. for (i = 0; i < nphys; i++)
  3392. memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
  3393. }
  3394. kfree(lbuf);
  3395. return rc;
  3396. }
  3397. static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
  3398. struct ReportLUNdata *buf, int bufsize)
  3399. {
  3400. return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
  3401. }
  3402. static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
  3403. int bus, int target, int lun)
  3404. {
  3405. device->bus = bus;
  3406. device->target = target;
  3407. device->lun = lun;
  3408. }
  3409. /* Use VPD inquiry to get details of volume status */
  3410. static int hpsa_get_volume_status(struct ctlr_info *h,
  3411. unsigned char scsi3addr[])
  3412. {
  3413. int rc;
  3414. int status;
  3415. int size;
  3416. unsigned char *buf;
  3417. buf = kzalloc(64, GFP_KERNEL);
  3418. if (!buf)
  3419. return HPSA_VPD_LV_STATUS_UNSUPPORTED;
  3420. /* Does controller have VPD for logical volume status? */
  3421. if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
  3422. goto exit_failed;
  3423. /* Get the size of the VPD return buffer */
  3424. rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
  3425. buf, HPSA_VPD_HEADER_SZ);
  3426. if (rc != 0)
  3427. goto exit_failed;
  3428. size = buf[3];
  3429. /* Now get the whole VPD buffer */
  3430. rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
  3431. buf, size + HPSA_VPD_HEADER_SZ);
  3432. if (rc != 0)
  3433. goto exit_failed;
  3434. status = buf[4]; /* status byte */
  3435. kfree(buf);
  3436. return status;
  3437. exit_failed:
  3438. kfree(buf);
  3439. return HPSA_VPD_LV_STATUS_UNSUPPORTED;
  3440. }
  3441. /* Determine offline status of a volume.
  3442. * Return either:
  3443. * 0 (not offline)
  3444. * 0xff (offline for unknown reasons)
  3445. * # (integer code indicating one of several NOT READY states
  3446. * describing why a volume is to be kept offline)
  3447. */
  3448. static unsigned char hpsa_volume_offline(struct ctlr_info *h,
  3449. unsigned char scsi3addr[])
  3450. {
  3451. struct CommandList *c;
  3452. unsigned char *sense;
  3453. u8 sense_key, asc, ascq;
  3454. int sense_len;
  3455. int rc, ldstat = 0;
  3456. u16 cmd_status;
  3457. u8 scsi_status;
  3458. #define ASC_LUN_NOT_READY 0x04
  3459. #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
  3460. #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
  3461. c = cmd_alloc(h);
  3462. (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
  3463. rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
  3464. NO_TIMEOUT);
  3465. if (rc) {
  3466. cmd_free(h, c);
  3467. return HPSA_VPD_LV_STATUS_UNSUPPORTED;
  3468. }
  3469. sense = c->err_info->SenseInfo;
  3470. if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
  3471. sense_len = sizeof(c->err_info->SenseInfo);
  3472. else
  3473. sense_len = c->err_info->SenseLen;
  3474. decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
  3475. cmd_status = c->err_info->CommandStatus;
  3476. scsi_status = c->err_info->ScsiStatus;
  3477. cmd_free(h, c);
  3478. /* Determine the reason for not ready state */
  3479. ldstat = hpsa_get_volume_status(h, scsi3addr);
  3480. /* Keep volume offline in certain cases: */
  3481. switch (ldstat) {
  3482. case HPSA_LV_FAILED:
  3483. case HPSA_LV_UNDERGOING_ERASE:
  3484. case HPSA_LV_NOT_AVAILABLE:
  3485. case HPSA_LV_UNDERGOING_RPI:
  3486. case HPSA_LV_PENDING_RPI:
  3487. case HPSA_LV_ENCRYPTED_NO_KEY:
  3488. case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
  3489. case HPSA_LV_UNDERGOING_ENCRYPTION:
  3490. case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
  3491. case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
  3492. return ldstat;
  3493. case HPSA_VPD_LV_STATUS_UNSUPPORTED:
  3494. /* If VPD status page isn't available,
  3495. * use ASC/ASCQ to determine state
  3496. */
  3497. if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
  3498. (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
  3499. return ldstat;
  3500. break;
  3501. default:
  3502. break;
  3503. }
  3504. return HPSA_LV_OK;
  3505. }
  3506. static int hpsa_update_device_info(struct ctlr_info *h,
  3507. unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
  3508. unsigned char *is_OBDR_device)
  3509. {
  3510. #define OBDR_SIG_OFFSET 43
  3511. #define OBDR_TAPE_SIG "$DR-10"
  3512. #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
  3513. #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
  3514. unsigned char *inq_buff;
  3515. unsigned char *obdr_sig;
  3516. int rc = 0;
  3517. inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
  3518. if (!inq_buff) {
  3519. rc = -ENOMEM;
  3520. goto bail_out;
  3521. }
  3522. /* Do an inquiry to the device to see what it is. */
  3523. if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
  3524. (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
  3525. dev_err(&h->pdev->dev,
  3526. "%s: inquiry failed, device will be skipped.\n",
  3527. __func__);
  3528. rc = HPSA_INQUIRY_FAILED;
  3529. goto bail_out;
  3530. }
  3531. scsi_sanitize_inquiry_string(&inq_buff[8], 8);
  3532. scsi_sanitize_inquiry_string(&inq_buff[16], 16);
  3533. this_device->devtype = (inq_buff[0] & 0x1f);
  3534. memcpy(this_device->scsi3addr, scsi3addr, 8);
  3535. memcpy(this_device->vendor, &inq_buff[8],
  3536. sizeof(this_device->vendor));
  3537. memcpy(this_device->model, &inq_buff[16],
  3538. sizeof(this_device->model));
  3539. this_device->rev = inq_buff[2];
  3540. memset(this_device->device_id, 0,
  3541. sizeof(this_device->device_id));
  3542. if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
  3543. sizeof(this_device->device_id)) < 0)
  3544. dev_err(&h->pdev->dev,
  3545. "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
  3546. h->ctlr, __func__,
  3547. h->scsi_host->host_no,
  3548. this_device->target, this_device->lun,
  3549. scsi_device_type(this_device->devtype),
  3550. this_device->model);
  3551. if ((this_device->devtype == TYPE_DISK ||
  3552. this_device->devtype == TYPE_ZBC) &&
  3553. is_logical_dev_addr_mode(scsi3addr)) {
  3554. unsigned char volume_offline;
  3555. hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
  3556. if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
  3557. hpsa_get_ioaccel_status(h, scsi3addr, this_device);
  3558. volume_offline = hpsa_volume_offline(h, scsi3addr);
  3559. if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
  3560. h->legacy_board) {
  3561. /*
  3562. * Legacy boards might not support volume status
  3563. */
  3564. dev_info(&h->pdev->dev,
  3565. "C0:T%d:L%d Volume status not available, assuming online.\n",
  3566. this_device->target, this_device->lun);
  3567. volume_offline = 0;
  3568. }
  3569. this_device->volume_offline = volume_offline;
  3570. if (volume_offline == HPSA_LV_FAILED) {
  3571. rc = HPSA_LV_FAILED;
  3572. dev_err(&h->pdev->dev,
  3573. "%s: LV failed, device will be skipped.\n",
  3574. __func__);
  3575. goto bail_out;
  3576. }
  3577. } else {
  3578. this_device->raid_level = RAID_UNKNOWN;
  3579. this_device->offload_config = 0;
  3580. hpsa_turn_off_ioaccel_for_device(this_device);
  3581. this_device->hba_ioaccel_enabled = 0;
  3582. this_device->volume_offline = 0;
  3583. this_device->queue_depth = h->nr_cmds;
  3584. }
  3585. if (this_device->external)
  3586. this_device->queue_depth = EXTERNAL_QD;
  3587. if (is_OBDR_device) {
  3588. /* See if this is a One-Button-Disaster-Recovery device
  3589. * by looking for "$DR-10" at offset 43 in inquiry data.
  3590. */
  3591. obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
  3592. *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
  3593. strncmp(obdr_sig, OBDR_TAPE_SIG,
  3594. OBDR_SIG_LEN) == 0);
  3595. }
  3596. kfree(inq_buff);
  3597. return 0;
  3598. bail_out:
  3599. kfree(inq_buff);
  3600. return rc;
  3601. }
  3602. /*
  3603. * Helper function to assign bus, target, lun mapping of devices.
  3604. * Logical drive target and lun are assigned at this time, but
  3605. * physical device lun and target assignment are deferred (assigned
  3606. * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
  3607. */
  3608. static void figure_bus_target_lun(struct ctlr_info *h,
  3609. u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
  3610. {
  3611. u32 lunid = get_unaligned_le32(lunaddrbytes);
  3612. if (!is_logical_dev_addr_mode(lunaddrbytes)) {
  3613. /* physical device, target and lun filled in later */
  3614. if (is_hba_lunid(lunaddrbytes)) {
  3615. int bus = HPSA_HBA_BUS;
  3616. if (!device->rev)
  3617. bus = HPSA_LEGACY_HBA_BUS;
  3618. hpsa_set_bus_target_lun(device,
  3619. bus, 0, lunid & 0x3fff);
  3620. } else
  3621. /* defer target, lun assignment for physical devices */
  3622. hpsa_set_bus_target_lun(device,
  3623. HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
  3624. return;
  3625. }
  3626. /* It's a logical device */
  3627. if (device->external) {
  3628. hpsa_set_bus_target_lun(device,
  3629. HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
  3630. lunid & 0x00ff);
  3631. return;
  3632. }
  3633. hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
  3634. 0, lunid & 0x3fff);
  3635. }
  3636. static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
  3637. int i, int nphysicals, int nlocal_logicals)
  3638. {
  3639. /* In report logicals, local logicals are listed first,
  3640. * then any externals.
  3641. */
  3642. int logicals_start = nphysicals + (raid_ctlr_position == 0);
  3643. if (i == raid_ctlr_position)
  3644. return 0;
  3645. if (i < logicals_start)
  3646. return 0;
  3647. /* i is in logicals range, but still within local logicals */
  3648. if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
  3649. return 0;
  3650. return 1; /* it's an external lun */
  3651. }
  3652. /*
  3653. * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
  3654. * logdev. The number of luns in physdev and logdev are returned in
  3655. * *nphysicals and *nlogicals, respectively.
  3656. * Returns 0 on success, -1 otherwise.
  3657. */
  3658. static int hpsa_gather_lun_info(struct ctlr_info *h,
  3659. struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
  3660. struct ReportLUNdata *logdev, u32 *nlogicals)
  3661. {
  3662. if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
  3663. dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
  3664. return -1;
  3665. }
  3666. *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
  3667. if (*nphysicals > HPSA_MAX_PHYS_LUN) {
  3668. dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
  3669. HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
  3670. *nphysicals = HPSA_MAX_PHYS_LUN;
  3671. }
  3672. if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
  3673. dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
  3674. return -1;
  3675. }
  3676. *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
  3677. /* Reject Logicals in excess of our max capability. */
  3678. if (*nlogicals > HPSA_MAX_LUN) {
  3679. dev_warn(&h->pdev->dev,
  3680. "maximum logical LUNs (%d) exceeded. "
  3681. "%d LUNs ignored.\n", HPSA_MAX_LUN,
  3682. *nlogicals - HPSA_MAX_LUN);
  3683. *nlogicals = HPSA_MAX_LUN;
  3684. }
  3685. if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
  3686. dev_warn(&h->pdev->dev,
  3687. "maximum logical + physical LUNs (%d) exceeded. "
  3688. "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
  3689. *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
  3690. *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
  3691. }
  3692. return 0;
  3693. }
  3694. static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
  3695. int i, int nphysicals, int nlogicals,
  3696. struct ReportExtendedLUNdata *physdev_list,
  3697. struct ReportLUNdata *logdev_list)
  3698. {
  3699. /* Helper function, figure out where the LUN ID info is coming from
  3700. * given index i, lists of physical and logical devices, where in
  3701. * the list the raid controller is supposed to appear (first or last)
  3702. */
  3703. int logicals_start = nphysicals + (raid_ctlr_position == 0);
  3704. int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
  3705. if (i == raid_ctlr_position)
  3706. return RAID_CTLR_LUNID;
  3707. if (i < logicals_start)
  3708. return &physdev_list->LUN[i -
  3709. (raid_ctlr_position == 0)].lunid[0];
  3710. if (i < last_device)
  3711. return &logdev_list->LUN[i - nphysicals -
  3712. (raid_ctlr_position == 0)][0];
  3713. BUG();
  3714. return NULL;
  3715. }
  3716. /* get physical drive ioaccel handle and queue depth */
  3717. static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
  3718. struct hpsa_scsi_dev_t *dev,
  3719. struct ReportExtendedLUNdata *rlep, int rle_index,
  3720. struct bmic_identify_physical_device *id_phys)
  3721. {
  3722. int rc;
  3723. struct ext_report_lun_entry *rle;
  3724. rle = &rlep->LUN[rle_index];
  3725. dev->ioaccel_handle = rle->ioaccel_handle;
  3726. if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
  3727. dev->hba_ioaccel_enabled = 1;
  3728. memset(id_phys, 0, sizeof(*id_phys));
  3729. rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
  3730. GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
  3731. sizeof(*id_phys));
  3732. if (!rc)
  3733. /* Reserve space for FW operations */
  3734. #define DRIVE_CMDS_RESERVED_FOR_FW 2
  3735. #define DRIVE_QUEUE_DEPTH 7
  3736. dev->queue_depth =
  3737. le16_to_cpu(id_phys->current_queue_depth_limit) -
  3738. DRIVE_CMDS_RESERVED_FOR_FW;
  3739. else
  3740. dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
  3741. }
  3742. static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
  3743. struct ReportExtendedLUNdata *rlep, int rle_index,
  3744. struct bmic_identify_physical_device *id_phys)
  3745. {
  3746. struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
  3747. if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
  3748. this_device->hba_ioaccel_enabled = 1;
  3749. memcpy(&this_device->active_path_index,
  3750. &id_phys->active_path_number,
  3751. sizeof(this_device->active_path_index));
  3752. memcpy(&this_device->path_map,
  3753. &id_phys->redundant_path_present_map,
  3754. sizeof(this_device->path_map));
  3755. memcpy(&this_device->box,
  3756. &id_phys->alternate_paths_phys_box_on_port,
  3757. sizeof(this_device->box));
  3758. memcpy(&this_device->phys_connector,
  3759. &id_phys->alternate_paths_phys_connector,
  3760. sizeof(this_device->phys_connector));
  3761. memcpy(&this_device->bay,
  3762. &id_phys->phys_bay_in_box,
  3763. sizeof(this_device->bay));
  3764. }
  3765. /* get number of local logical disks. */
  3766. static int hpsa_set_local_logical_count(struct ctlr_info *h,
  3767. struct bmic_identify_controller *id_ctlr,
  3768. u32 *nlocals)
  3769. {
  3770. int rc;
  3771. if (!id_ctlr) {
  3772. dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
  3773. __func__);
  3774. return -ENOMEM;
  3775. }
  3776. memset(id_ctlr, 0, sizeof(*id_ctlr));
  3777. rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
  3778. if (!rc)
  3779. if (id_ctlr->configured_logical_drive_count < 255)
  3780. *nlocals = id_ctlr->configured_logical_drive_count;
  3781. else
  3782. *nlocals = le16_to_cpu(
  3783. id_ctlr->extended_logical_unit_count);
  3784. else
  3785. *nlocals = -1;
  3786. return rc;
  3787. }
  3788. static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
  3789. {
  3790. struct bmic_identify_physical_device *id_phys;
  3791. bool is_spare = false;
  3792. int rc;
  3793. id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
  3794. if (!id_phys)
  3795. return false;
  3796. rc = hpsa_bmic_id_physical_device(h,
  3797. lunaddrbytes,
  3798. GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
  3799. id_phys, sizeof(*id_phys));
  3800. if (rc == 0)
  3801. is_spare = (id_phys->more_flags >> 6) & 0x01;
  3802. kfree(id_phys);
  3803. return is_spare;
  3804. }
  3805. #define RPL_DEV_FLAG_NON_DISK 0x1
  3806. #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
  3807. #define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
  3808. #define BMIC_DEVICE_TYPE_ENCLOSURE 6
  3809. static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
  3810. struct ext_report_lun_entry *rle)
  3811. {
  3812. u8 device_flags;
  3813. u8 device_type;
  3814. if (!MASKED_DEVICE(lunaddrbytes))
  3815. return false;
  3816. device_flags = rle->device_flags;
  3817. device_type = rle->device_type;
  3818. if (device_flags & RPL_DEV_FLAG_NON_DISK) {
  3819. if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
  3820. return false;
  3821. return true;
  3822. }
  3823. if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
  3824. return false;
  3825. if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
  3826. return false;
  3827. /*
  3828. * Spares may be spun down, we do not want to
  3829. * do an Inquiry to a RAID set spare drive as
  3830. * that would have them spun up, that is a
  3831. * performance hit because I/O to the RAID device
  3832. * stops while the spin up occurs which can take
  3833. * over 50 seconds.
  3834. */
  3835. if (hpsa_is_disk_spare(h, lunaddrbytes))
  3836. return true;
  3837. return false;
  3838. }
  3839. static void hpsa_update_scsi_devices(struct ctlr_info *h)
  3840. {
  3841. /* the idea here is we could get notified
  3842. * that some devices have changed, so we do a report
  3843. * physical luns and report logical luns cmd, and adjust
  3844. * our list of devices accordingly.
  3845. *
  3846. * The scsi3addr's of devices won't change so long as the
  3847. * adapter is not reset. That means we can rescan and
  3848. * tell which devices we already know about, vs. new
  3849. * devices, vs. disappearing devices.
  3850. */
  3851. struct ReportExtendedLUNdata *physdev_list = NULL;
  3852. struct ReportLUNdata *logdev_list = NULL;
  3853. struct bmic_identify_physical_device *id_phys = NULL;
  3854. struct bmic_identify_controller *id_ctlr = NULL;
  3855. u32 nphysicals = 0;
  3856. u32 nlogicals = 0;
  3857. u32 nlocal_logicals = 0;
  3858. u32 ndev_allocated = 0;
  3859. struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
  3860. int ncurrent = 0;
  3861. int i, n_ext_target_devs, ndevs_to_allocate;
  3862. int raid_ctlr_position;
  3863. bool physical_device;
  3864. DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
  3865. currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
  3866. physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
  3867. logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
  3868. tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
  3869. id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
  3870. id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
  3871. if (!currentsd || !physdev_list || !logdev_list ||
  3872. !tmpdevice || !id_phys || !id_ctlr) {
  3873. dev_err(&h->pdev->dev, "out of memory\n");
  3874. goto out;
  3875. }
  3876. memset(lunzerobits, 0, sizeof(lunzerobits));
  3877. h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
  3878. if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
  3879. logdev_list, &nlogicals)) {
  3880. h->drv_req_rescan = 1;
  3881. goto out;
  3882. }
  3883. /* Set number of local logicals (non PTRAID) */
  3884. if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
  3885. dev_warn(&h->pdev->dev,
  3886. "%s: Can't determine number of local logical devices.\n",
  3887. __func__);
  3888. }
  3889. /* We might see up to the maximum number of logical and physical disks
  3890. * plus external target devices, and a device for the local RAID
  3891. * controller.
  3892. */
  3893. ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
  3894. hpsa_ext_ctrl_present(h, physdev_list);
  3895. /* Allocate the per device structures */
  3896. for (i = 0; i < ndevs_to_allocate; i++) {
  3897. if (i >= HPSA_MAX_DEVICES) {
  3898. dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
  3899. " %d devices ignored.\n", HPSA_MAX_DEVICES,
  3900. ndevs_to_allocate - HPSA_MAX_DEVICES);
  3901. break;
  3902. }
  3903. currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
  3904. if (!currentsd[i]) {
  3905. h->drv_req_rescan = 1;
  3906. goto out;
  3907. }
  3908. ndev_allocated++;
  3909. }
  3910. if (is_scsi_rev_5(h))
  3911. raid_ctlr_position = 0;
  3912. else
  3913. raid_ctlr_position = nphysicals + nlogicals;
  3914. /* adjust our table of devices */
  3915. n_ext_target_devs = 0;
  3916. for (i = 0; i < nphysicals + nlogicals + 1; i++) {
  3917. u8 *lunaddrbytes, is_OBDR = 0;
  3918. int rc = 0;
  3919. int phys_dev_index = i - (raid_ctlr_position == 0);
  3920. bool skip_device = false;
  3921. memset(tmpdevice, 0, sizeof(*tmpdevice));
  3922. physical_device = i < nphysicals + (raid_ctlr_position == 0);
  3923. /* Figure out where the LUN ID info is coming from */
  3924. lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
  3925. i, nphysicals, nlogicals, physdev_list, logdev_list);
  3926. /* Determine if this is a lun from an external target array */
  3927. tmpdevice->external =
  3928. figure_external_status(h, raid_ctlr_position, i,
  3929. nphysicals, nlocal_logicals);
  3930. /*
  3931. * Skip over some devices such as a spare.
  3932. */
  3933. if (!tmpdevice->external && physical_device) {
  3934. skip_device = hpsa_skip_device(h, lunaddrbytes,
  3935. &physdev_list->LUN[phys_dev_index]);
  3936. if (skip_device)
  3937. continue;
  3938. }
  3939. /* Get device type, vendor, model, device id, raid_map */
  3940. rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
  3941. &is_OBDR);
  3942. if (rc == -ENOMEM) {
  3943. dev_warn(&h->pdev->dev,
  3944. "Out of memory, rescan deferred.\n");
  3945. h->drv_req_rescan = 1;
  3946. goto out;
  3947. }
  3948. if (rc) {
  3949. h->drv_req_rescan = 1;
  3950. continue;
  3951. }
  3952. figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
  3953. this_device = currentsd[ncurrent];
  3954. *this_device = *tmpdevice;
  3955. this_device->physical_device = physical_device;
  3956. /*
  3957. * Expose all devices except for physical devices that
  3958. * are masked.
  3959. */
  3960. if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
  3961. this_device->expose_device = 0;
  3962. else
  3963. this_device->expose_device = 1;
  3964. /*
  3965. * Get the SAS address for physical devices that are exposed.
  3966. */
  3967. if (this_device->physical_device && this_device->expose_device)
  3968. hpsa_get_sas_address(h, lunaddrbytes, this_device);
  3969. switch (this_device->devtype) {
  3970. case TYPE_ROM:
  3971. /* We don't *really* support actual CD-ROM devices,
  3972. * just "One Button Disaster Recovery" tape drive
  3973. * which temporarily pretends to be a CD-ROM drive.
  3974. * So we check that the device is really an OBDR tape
  3975. * device by checking for "$DR-10" in bytes 43-48 of
  3976. * the inquiry data.
  3977. */
  3978. if (is_OBDR)
  3979. ncurrent++;
  3980. break;
  3981. case TYPE_DISK:
  3982. case TYPE_ZBC:
  3983. if (this_device->physical_device) {
  3984. /* The disk is in HBA mode. */
  3985. /* Never use RAID mapper in HBA mode. */
  3986. this_device->offload_enabled = 0;
  3987. hpsa_get_ioaccel_drive_info(h, this_device,
  3988. physdev_list, phys_dev_index, id_phys);
  3989. hpsa_get_path_info(this_device,
  3990. physdev_list, phys_dev_index, id_phys);
  3991. }
  3992. ncurrent++;
  3993. break;
  3994. case TYPE_TAPE:
  3995. case TYPE_MEDIUM_CHANGER:
  3996. ncurrent++;
  3997. break;
  3998. case TYPE_ENCLOSURE:
  3999. if (!this_device->external)
  4000. hpsa_get_enclosure_info(h, lunaddrbytes,
  4001. physdev_list, phys_dev_index,
  4002. this_device);
  4003. ncurrent++;
  4004. break;
  4005. case TYPE_RAID:
  4006. /* Only present the Smartarray HBA as a RAID controller.
  4007. * If it's a RAID controller other than the HBA itself
  4008. * (an external RAID controller, MSA500 or similar)
  4009. * don't present it.
  4010. */
  4011. if (!is_hba_lunid(lunaddrbytes))
  4012. break;
  4013. ncurrent++;
  4014. break;
  4015. default:
  4016. break;
  4017. }
  4018. if (ncurrent >= HPSA_MAX_DEVICES)
  4019. break;
  4020. }
  4021. if (h->sas_host == NULL) {
  4022. int rc = 0;
  4023. rc = hpsa_add_sas_host(h);
  4024. if (rc) {
  4025. dev_warn(&h->pdev->dev,
  4026. "Could not add sas host %d\n", rc);
  4027. goto out;
  4028. }
  4029. }
  4030. adjust_hpsa_scsi_table(h, currentsd, ncurrent);
  4031. out:
  4032. kfree(tmpdevice);
  4033. for (i = 0; i < ndev_allocated; i++)
  4034. kfree(currentsd[i]);
  4035. kfree(currentsd);
  4036. kfree(physdev_list);
  4037. kfree(logdev_list);
  4038. kfree(id_ctlr);
  4039. kfree(id_phys);
  4040. }
  4041. static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
  4042. struct scatterlist *sg)
  4043. {
  4044. u64 addr64 = (u64) sg_dma_address(sg);
  4045. unsigned int len = sg_dma_len(sg);
  4046. desc->Addr = cpu_to_le64(addr64);
  4047. desc->Len = cpu_to_le32(len);
  4048. desc->Ext = 0;
  4049. }
  4050. /*
  4051. * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
  4052. * dma mapping and fills in the scatter gather entries of the
  4053. * hpsa command, cp.
  4054. */
  4055. static int hpsa_scatter_gather(struct ctlr_info *h,
  4056. struct CommandList *cp,
  4057. struct scsi_cmnd *cmd)
  4058. {
  4059. struct scatterlist *sg;
  4060. int use_sg, i, sg_limit, chained, last_sg;
  4061. struct SGDescriptor *curr_sg;
  4062. BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
  4063. use_sg = scsi_dma_map(cmd);
  4064. if (use_sg < 0)
  4065. return use_sg;
  4066. if (!use_sg)
  4067. goto sglist_finished;
  4068. /*
  4069. * If the number of entries is greater than the max for a single list,
  4070. * then we have a chained list; we will set up all but one entry in the
  4071. * first list (the last entry is saved for link information);
  4072. * otherwise, we don't have a chained list and we'll set up at each of
  4073. * the entries in the one list.
  4074. */
  4075. curr_sg = cp->SG;
  4076. chained = use_sg > h->max_cmd_sg_entries;
  4077. sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
  4078. last_sg = scsi_sg_count(cmd) - 1;
  4079. scsi_for_each_sg(cmd, sg, sg_limit, i) {
  4080. hpsa_set_sg_descriptor(curr_sg, sg);
  4081. curr_sg++;
  4082. }
  4083. if (chained) {
  4084. /*
  4085. * Continue with the chained list. Set curr_sg to the chained
  4086. * list. Modify the limit to the total count less the entries
  4087. * we've already set up. Resume the scan at the list entry
  4088. * where the previous loop left off.
  4089. */
  4090. curr_sg = h->cmd_sg_list[cp->cmdindex];
  4091. sg_limit = use_sg - sg_limit;
  4092. for_each_sg(sg, sg, sg_limit, i) {
  4093. hpsa_set_sg_descriptor(curr_sg, sg);
  4094. curr_sg++;
  4095. }
  4096. }
  4097. /* Back the pointer up to the last entry and mark it as "last". */
  4098. (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
  4099. if (use_sg + chained > h->maxSG)
  4100. h->maxSG = use_sg + chained;
  4101. if (chained) {
  4102. cp->Header.SGList = h->max_cmd_sg_entries;
  4103. cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
  4104. if (hpsa_map_sg_chain_block(h, cp)) {
  4105. scsi_dma_unmap(cmd);
  4106. return -1;
  4107. }
  4108. return 0;
  4109. }
  4110. sglist_finished:
  4111. cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
  4112. cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
  4113. return 0;
  4114. }
  4115. static inline void warn_zero_length_transfer(struct ctlr_info *h,
  4116. u8 *cdb, int cdb_len,
  4117. const char *func)
  4118. {
  4119. dev_warn(&h->pdev->dev,
  4120. "%s: Blocking zero-length request: CDB:%*phN\n",
  4121. func, cdb_len, cdb);
  4122. }
  4123. #define IO_ACCEL_INELIGIBLE 1
  4124. /* zero-length transfers trigger hardware errors. */
  4125. static bool is_zero_length_transfer(u8 *cdb)
  4126. {
  4127. u32 block_cnt;
  4128. /* Block zero-length transfer sizes on certain commands. */
  4129. switch (cdb[0]) {
  4130. case READ_10:
  4131. case WRITE_10:
  4132. case VERIFY: /* 0x2F */
  4133. case WRITE_VERIFY: /* 0x2E */
  4134. block_cnt = get_unaligned_be16(&cdb[7]);
  4135. break;
  4136. case READ_12:
  4137. case WRITE_12:
  4138. case VERIFY_12: /* 0xAF */
  4139. case WRITE_VERIFY_12: /* 0xAE */
  4140. block_cnt = get_unaligned_be32(&cdb[6]);
  4141. break;
  4142. case READ_16:
  4143. case WRITE_16:
  4144. case VERIFY_16: /* 0x8F */
  4145. block_cnt = get_unaligned_be32(&cdb[10]);
  4146. break;
  4147. default:
  4148. return false;
  4149. }
  4150. return block_cnt == 0;
  4151. }
  4152. static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
  4153. {
  4154. int is_write = 0;
  4155. u32 block;
  4156. u32 block_cnt;
  4157. /* Perform some CDB fixups if needed using 10 byte reads/writes only */
  4158. switch (cdb[0]) {
  4159. case WRITE_6:
  4160. case WRITE_12:
  4161. is_write = 1;
  4162. case READ_6:
  4163. case READ_12:
  4164. if (*cdb_len == 6) {
  4165. block = (((cdb[1] & 0x1F) << 16) |
  4166. (cdb[2] << 8) |
  4167. cdb[3]);
  4168. block_cnt = cdb[4];
  4169. if (block_cnt == 0)
  4170. block_cnt = 256;
  4171. } else {
  4172. BUG_ON(*cdb_len != 12);
  4173. block = get_unaligned_be32(&cdb[2]);
  4174. block_cnt = get_unaligned_be32(&cdb[6]);
  4175. }
  4176. if (block_cnt > 0xffff)
  4177. return IO_ACCEL_INELIGIBLE;
  4178. cdb[0] = is_write ? WRITE_10 : READ_10;
  4179. cdb[1] = 0;
  4180. cdb[2] = (u8) (block >> 24);
  4181. cdb[3] = (u8) (block >> 16);
  4182. cdb[4] = (u8) (block >> 8);
  4183. cdb[5] = (u8) (block);
  4184. cdb[6] = 0;
  4185. cdb[7] = (u8) (block_cnt >> 8);
  4186. cdb[8] = (u8) (block_cnt);
  4187. cdb[9] = 0;
  4188. *cdb_len = 10;
  4189. break;
  4190. }
  4191. return 0;
  4192. }
  4193. static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
  4194. struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
  4195. u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
  4196. {
  4197. struct scsi_cmnd *cmd = c->scsi_cmd;
  4198. struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
  4199. unsigned int len;
  4200. unsigned int total_len = 0;
  4201. struct scatterlist *sg;
  4202. u64 addr64;
  4203. int use_sg, i;
  4204. struct SGDescriptor *curr_sg;
  4205. u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
  4206. /* TODO: implement chaining support */
  4207. if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
  4208. atomic_dec(&phys_disk->ioaccel_cmds_out);
  4209. return IO_ACCEL_INELIGIBLE;
  4210. }
  4211. BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
  4212. if (is_zero_length_transfer(cdb)) {
  4213. warn_zero_length_transfer(h, cdb, cdb_len, __func__);
  4214. atomic_dec(&phys_disk->ioaccel_cmds_out);
  4215. return IO_ACCEL_INELIGIBLE;
  4216. }
  4217. if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
  4218. atomic_dec(&phys_disk->ioaccel_cmds_out);
  4219. return IO_ACCEL_INELIGIBLE;
  4220. }
  4221. c->cmd_type = CMD_IOACCEL1;
  4222. /* Adjust the DMA address to point to the accelerated command buffer */
  4223. c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
  4224. (c->cmdindex * sizeof(*cp));
  4225. BUG_ON(c->busaddr & 0x0000007F);
  4226. use_sg = scsi_dma_map(cmd);
  4227. if (use_sg < 0) {
  4228. atomic_dec(&phys_disk->ioaccel_cmds_out);
  4229. return use_sg;
  4230. }
  4231. if (use_sg) {
  4232. curr_sg = cp->SG;
  4233. scsi_for_each_sg(cmd, sg, use_sg, i) {
  4234. addr64 = (u64) sg_dma_address(sg);
  4235. len = sg_dma_len(sg);
  4236. total_len += len;
  4237. curr_sg->Addr = cpu_to_le64(addr64);
  4238. curr_sg->Len = cpu_to_le32(len);
  4239. curr_sg->Ext = cpu_to_le32(0);
  4240. curr_sg++;
  4241. }
  4242. (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
  4243. switch (cmd->sc_data_direction) {
  4244. case DMA_TO_DEVICE:
  4245. control |= IOACCEL1_CONTROL_DATA_OUT;
  4246. break;
  4247. case DMA_FROM_DEVICE:
  4248. control |= IOACCEL1_CONTROL_DATA_IN;
  4249. break;
  4250. case DMA_NONE:
  4251. control |= IOACCEL1_CONTROL_NODATAXFER;
  4252. break;
  4253. default:
  4254. dev_err(&h->pdev->dev, "unknown data direction: %d\n",
  4255. cmd->sc_data_direction);
  4256. BUG();
  4257. break;
  4258. }
  4259. } else {
  4260. control |= IOACCEL1_CONTROL_NODATAXFER;
  4261. }
  4262. c->Header.SGList = use_sg;
  4263. /* Fill out the command structure to submit */
  4264. cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
  4265. cp->transfer_len = cpu_to_le32(total_len);
  4266. cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
  4267. (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
  4268. cp->control = cpu_to_le32(control);
  4269. memcpy(cp->CDB, cdb, cdb_len);
  4270. memcpy(cp->CISS_LUN, scsi3addr, 8);
  4271. /* Tag was already set at init time. */
  4272. enqueue_cmd_and_start_io(h, c);
  4273. return 0;
  4274. }
  4275. /*
  4276. * Queue a command directly to a device behind the controller using the
  4277. * I/O accelerator path.
  4278. */
  4279. static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
  4280. struct CommandList *c)
  4281. {
  4282. struct scsi_cmnd *cmd = c->scsi_cmd;
  4283. struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
  4284. if (!dev)
  4285. return -1;
  4286. c->phys_disk = dev;
  4287. return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
  4288. cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
  4289. }
  4290. /*
  4291. * Set encryption parameters for the ioaccel2 request
  4292. */
  4293. static void set_encrypt_ioaccel2(struct ctlr_info *h,
  4294. struct CommandList *c, struct io_accel2_cmd *cp)
  4295. {
  4296. struct scsi_cmnd *cmd = c->scsi_cmd;
  4297. struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
  4298. struct raid_map_data *map = &dev->raid_map;
  4299. u64 first_block;
  4300. /* Are we doing encryption on this device */
  4301. if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
  4302. return;
  4303. /* Set the data encryption key index. */
  4304. cp->dekindex = map->dekindex;
  4305. /* Set the encryption enable flag, encoded into direction field. */
  4306. cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
  4307. /* Set encryption tweak values based on logical block address
  4308. * If block size is 512, tweak value is LBA.
  4309. * For other block sizes, tweak is (LBA * block size)/ 512)
  4310. */
  4311. switch (cmd->cmnd[0]) {
  4312. /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
  4313. case READ_6:
  4314. case WRITE_6:
  4315. first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
  4316. (cmd->cmnd[2] << 8) |
  4317. cmd->cmnd[3]);
  4318. break;
  4319. case WRITE_10:
  4320. case READ_10:
  4321. /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
  4322. case WRITE_12:
  4323. case READ_12:
  4324. first_block = get_unaligned_be32(&cmd->cmnd[2]);
  4325. break;
  4326. case WRITE_16:
  4327. case READ_16:
  4328. first_block = get_unaligned_be64(&cmd->cmnd[2]);
  4329. break;
  4330. default:
  4331. dev_err(&h->pdev->dev,
  4332. "ERROR: %s: size (0x%x) not supported for encryption\n",
  4333. __func__, cmd->cmnd[0]);
  4334. BUG();
  4335. break;
  4336. }
  4337. if (le32_to_cpu(map->volume_blk_size) != 512)
  4338. first_block = first_block *
  4339. le32_to_cpu(map->volume_blk_size)/512;
  4340. cp->tweak_lower = cpu_to_le32(first_block);
  4341. cp->tweak_upper = cpu_to_le32(first_block >> 32);
  4342. }
  4343. static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
  4344. struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
  4345. u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
  4346. {
  4347. struct scsi_cmnd *cmd = c->scsi_cmd;
  4348. struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
  4349. struct ioaccel2_sg_element *curr_sg;
  4350. int use_sg, i;
  4351. struct scatterlist *sg;
  4352. u64 addr64;
  4353. u32 len;
  4354. u32 total_len = 0;
  4355. if (!cmd->device)
  4356. return -1;
  4357. if (!cmd->device->hostdata)
  4358. return -1;
  4359. BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
  4360. if (is_zero_length_transfer(cdb)) {
  4361. warn_zero_length_transfer(h, cdb, cdb_len, __func__);
  4362. atomic_dec(&phys_disk->ioaccel_cmds_out);
  4363. return IO_ACCEL_INELIGIBLE;
  4364. }
  4365. if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
  4366. atomic_dec(&phys_disk->ioaccel_cmds_out);
  4367. return IO_ACCEL_INELIGIBLE;
  4368. }
  4369. c->cmd_type = CMD_IOACCEL2;
  4370. /* Adjust the DMA address to point to the accelerated command buffer */
  4371. c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
  4372. (c->cmdindex * sizeof(*cp));
  4373. BUG_ON(c->busaddr & 0x0000007F);
  4374. memset(cp, 0, sizeof(*cp));
  4375. cp->IU_type = IOACCEL2_IU_TYPE;
  4376. use_sg = scsi_dma_map(cmd);
  4377. if (use_sg < 0) {
  4378. atomic_dec(&phys_disk->ioaccel_cmds_out);
  4379. return use_sg;
  4380. }
  4381. if (use_sg) {
  4382. curr_sg = cp->sg;
  4383. if (use_sg > h->ioaccel_maxsg) {
  4384. addr64 = le64_to_cpu(
  4385. h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
  4386. curr_sg->address = cpu_to_le64(addr64);
  4387. curr_sg->length = 0;
  4388. curr_sg->reserved[0] = 0;
  4389. curr_sg->reserved[1] = 0;
  4390. curr_sg->reserved[2] = 0;
  4391. curr_sg->chain_indicator = IOACCEL2_CHAIN;
  4392. curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
  4393. }
  4394. scsi_for_each_sg(cmd, sg, use_sg, i) {
  4395. addr64 = (u64) sg_dma_address(sg);
  4396. len = sg_dma_len(sg);
  4397. total_len += len;
  4398. curr_sg->address = cpu_to_le64(addr64);
  4399. curr_sg->length = cpu_to_le32(len);
  4400. curr_sg->reserved[0] = 0;
  4401. curr_sg->reserved[1] = 0;
  4402. curr_sg->reserved[2] = 0;
  4403. curr_sg->chain_indicator = 0;
  4404. curr_sg++;
  4405. }
  4406. /*
  4407. * Set the last s/g element bit
  4408. */
  4409. (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
  4410. switch (cmd->sc_data_direction) {
  4411. case DMA_TO_DEVICE:
  4412. cp->direction &= ~IOACCEL2_DIRECTION_MASK;
  4413. cp->direction |= IOACCEL2_DIR_DATA_OUT;
  4414. break;
  4415. case DMA_FROM_DEVICE:
  4416. cp->direction &= ~IOACCEL2_DIRECTION_MASK;
  4417. cp->direction |= IOACCEL2_DIR_DATA_IN;
  4418. break;
  4419. case DMA_NONE:
  4420. cp->direction &= ~IOACCEL2_DIRECTION_MASK;
  4421. cp->direction |= IOACCEL2_DIR_NO_DATA;
  4422. break;
  4423. default:
  4424. dev_err(&h->pdev->dev, "unknown data direction: %d\n",
  4425. cmd->sc_data_direction);
  4426. BUG();
  4427. break;
  4428. }
  4429. } else {
  4430. cp->direction &= ~IOACCEL2_DIRECTION_MASK;
  4431. cp->direction |= IOACCEL2_DIR_NO_DATA;
  4432. }
  4433. /* Set encryption parameters, if necessary */
  4434. set_encrypt_ioaccel2(h, c, cp);
  4435. cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
  4436. cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
  4437. memcpy(cp->cdb, cdb, sizeof(cp->cdb));
  4438. cp->data_len = cpu_to_le32(total_len);
  4439. cp->err_ptr = cpu_to_le64(c->busaddr +
  4440. offsetof(struct io_accel2_cmd, error_data));
  4441. cp->err_len = cpu_to_le32(sizeof(cp->error_data));
  4442. /* fill in sg elements */
  4443. if (use_sg > h->ioaccel_maxsg) {
  4444. cp->sg_count = 1;
  4445. cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
  4446. if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
  4447. atomic_dec(&phys_disk->ioaccel_cmds_out);
  4448. scsi_dma_unmap(cmd);
  4449. return -1;
  4450. }
  4451. } else
  4452. cp->sg_count = (u8) use_sg;
  4453. enqueue_cmd_and_start_io(h, c);
  4454. return 0;
  4455. }
  4456. /*
  4457. * Queue a command to the correct I/O accelerator path.
  4458. */
  4459. static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
  4460. struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
  4461. u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
  4462. {
  4463. if (!c->scsi_cmd->device)
  4464. return -1;
  4465. if (!c->scsi_cmd->device->hostdata)
  4466. return -1;
  4467. /* Try to honor the device's queue depth */
  4468. if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
  4469. phys_disk->queue_depth) {
  4470. atomic_dec(&phys_disk->ioaccel_cmds_out);
  4471. return IO_ACCEL_INELIGIBLE;
  4472. }
  4473. if (h->transMethod & CFGTBL_Trans_io_accel1)
  4474. return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
  4475. cdb, cdb_len, scsi3addr,
  4476. phys_disk);
  4477. else
  4478. return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
  4479. cdb, cdb_len, scsi3addr,
  4480. phys_disk);
  4481. }
  4482. static void raid_map_helper(struct raid_map_data *map,
  4483. int offload_to_mirror, u32 *map_index, u32 *current_group)
  4484. {
  4485. if (offload_to_mirror == 0) {
  4486. /* use physical disk in the first mirrored group. */
  4487. *map_index %= le16_to_cpu(map->data_disks_per_row);
  4488. return;
  4489. }
  4490. do {
  4491. /* determine mirror group that *map_index indicates */
  4492. *current_group = *map_index /
  4493. le16_to_cpu(map->data_disks_per_row);
  4494. if (offload_to_mirror == *current_group)
  4495. continue;
  4496. if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
  4497. /* select map index from next group */
  4498. *map_index += le16_to_cpu(map->data_disks_per_row);
  4499. (*current_group)++;
  4500. } else {
  4501. /* select map index from first group */
  4502. *map_index %= le16_to_cpu(map->data_disks_per_row);
  4503. *current_group = 0;
  4504. }
  4505. } while (offload_to_mirror != *current_group);
  4506. }
  4507. /*
  4508. * Attempt to perform offload RAID mapping for a logical volume I/O.
  4509. */
  4510. static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
  4511. struct CommandList *c)
  4512. {
  4513. struct scsi_cmnd *cmd = c->scsi_cmd;
  4514. struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
  4515. struct raid_map_data *map = &dev->raid_map;
  4516. struct raid_map_disk_data *dd = &map->data[0];
  4517. int is_write = 0;
  4518. u32 map_index;
  4519. u64 first_block, last_block;
  4520. u32 block_cnt;
  4521. u32 blocks_per_row;
  4522. u64 first_row, last_row;
  4523. u32 first_row_offset, last_row_offset;
  4524. u32 first_column, last_column;
  4525. u64 r0_first_row, r0_last_row;
  4526. u32 r5or6_blocks_per_row;
  4527. u64 r5or6_first_row, r5or6_last_row;
  4528. u32 r5or6_first_row_offset, r5or6_last_row_offset;
  4529. u32 r5or6_first_column, r5or6_last_column;
  4530. u32 total_disks_per_row;
  4531. u32 stripesize;
  4532. u32 first_group, last_group, current_group;
  4533. u32 map_row;
  4534. u32 disk_handle;
  4535. u64 disk_block;
  4536. u32 disk_block_cnt;
  4537. u8 cdb[16];
  4538. u8 cdb_len;
  4539. u16 strip_size;
  4540. #if BITS_PER_LONG == 32
  4541. u64 tmpdiv;
  4542. #endif
  4543. int offload_to_mirror;
  4544. if (!dev)
  4545. return -1;
  4546. /* check for valid opcode, get LBA and block count */
  4547. switch (cmd->cmnd[0]) {
  4548. case WRITE_6:
  4549. is_write = 1;
  4550. case READ_6:
  4551. first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
  4552. (cmd->cmnd[2] << 8) |
  4553. cmd->cmnd[3]);
  4554. block_cnt = cmd->cmnd[4];
  4555. if (block_cnt == 0)
  4556. block_cnt = 256;
  4557. break;
  4558. case WRITE_10:
  4559. is_write = 1;
  4560. case READ_10:
  4561. first_block =
  4562. (((u64) cmd->cmnd[2]) << 24) |
  4563. (((u64) cmd->cmnd[3]) << 16) |
  4564. (((u64) cmd->cmnd[4]) << 8) |
  4565. cmd->cmnd[5];
  4566. block_cnt =
  4567. (((u32) cmd->cmnd[7]) << 8) |
  4568. cmd->cmnd[8];
  4569. break;
  4570. case WRITE_12:
  4571. is_write = 1;
  4572. case READ_12:
  4573. first_block =
  4574. (((u64) cmd->cmnd[2]) << 24) |
  4575. (((u64) cmd->cmnd[3]) << 16) |
  4576. (((u64) cmd->cmnd[4]) << 8) |
  4577. cmd->cmnd[5];
  4578. block_cnt =
  4579. (((u32) cmd->cmnd[6]) << 24) |
  4580. (((u32) cmd->cmnd[7]) << 16) |
  4581. (((u32) cmd->cmnd[8]) << 8) |
  4582. cmd->cmnd[9];
  4583. break;
  4584. case WRITE_16:
  4585. is_write = 1;
  4586. case READ_16:
  4587. first_block =
  4588. (((u64) cmd->cmnd[2]) << 56) |
  4589. (((u64) cmd->cmnd[3]) << 48) |
  4590. (((u64) cmd->cmnd[4]) << 40) |
  4591. (((u64) cmd->cmnd[5]) << 32) |
  4592. (((u64) cmd->cmnd[6]) << 24) |
  4593. (((u64) cmd->cmnd[7]) << 16) |
  4594. (((u64) cmd->cmnd[8]) << 8) |
  4595. cmd->cmnd[9];
  4596. block_cnt =
  4597. (((u32) cmd->cmnd[10]) << 24) |
  4598. (((u32) cmd->cmnd[11]) << 16) |
  4599. (((u32) cmd->cmnd[12]) << 8) |
  4600. cmd->cmnd[13];
  4601. break;
  4602. default:
  4603. return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
  4604. }
  4605. last_block = first_block + block_cnt - 1;
  4606. /* check for write to non-RAID-0 */
  4607. if (is_write && dev->raid_level != 0)
  4608. return IO_ACCEL_INELIGIBLE;
  4609. /* check for invalid block or wraparound */
  4610. if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
  4611. last_block < first_block)
  4612. return IO_ACCEL_INELIGIBLE;
  4613. /* calculate stripe information for the request */
  4614. blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
  4615. le16_to_cpu(map->strip_size);
  4616. strip_size = le16_to_cpu(map->strip_size);
  4617. #if BITS_PER_LONG == 32
  4618. tmpdiv = first_block;
  4619. (void) do_div(tmpdiv, blocks_per_row);
  4620. first_row = tmpdiv;
  4621. tmpdiv = last_block;
  4622. (void) do_div(tmpdiv, blocks_per_row);
  4623. last_row = tmpdiv;
  4624. first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
  4625. last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
  4626. tmpdiv = first_row_offset;
  4627. (void) do_div(tmpdiv, strip_size);
  4628. first_column = tmpdiv;
  4629. tmpdiv = last_row_offset;
  4630. (void) do_div(tmpdiv, strip_size);
  4631. last_column = tmpdiv;
  4632. #else
  4633. first_row = first_block / blocks_per_row;
  4634. last_row = last_block / blocks_per_row;
  4635. first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
  4636. last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
  4637. first_column = first_row_offset / strip_size;
  4638. last_column = last_row_offset / strip_size;
  4639. #endif
  4640. /* if this isn't a single row/column then give to the controller */
  4641. if ((first_row != last_row) || (first_column != last_column))
  4642. return IO_ACCEL_INELIGIBLE;
  4643. /* proceeding with driver mapping */
  4644. total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
  4645. le16_to_cpu(map->metadata_disks_per_row);
  4646. map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
  4647. le16_to_cpu(map->row_cnt);
  4648. map_index = (map_row * total_disks_per_row) + first_column;
  4649. switch (dev->raid_level) {
  4650. case HPSA_RAID_0:
  4651. break; /* nothing special to do */
  4652. case HPSA_RAID_1:
  4653. /* Handles load balance across RAID 1 members.
  4654. * (2-drive R1 and R10 with even # of drives.)
  4655. * Appropriate for SSDs, not optimal for HDDs
  4656. * Ensure we have the correct raid_map.
  4657. */
  4658. if (le16_to_cpu(map->layout_map_count) != 2) {
  4659. hpsa_turn_off_ioaccel_for_device(dev);
  4660. return IO_ACCEL_INELIGIBLE;
  4661. }
  4662. if (dev->offload_to_mirror)
  4663. map_index += le16_to_cpu(map->data_disks_per_row);
  4664. dev->offload_to_mirror = !dev->offload_to_mirror;
  4665. break;
  4666. case HPSA_RAID_ADM:
  4667. /* Handles N-way mirrors (R1-ADM)
  4668. * and R10 with # of drives divisible by 3.)
  4669. * Ensure we have the correct raid_map.
  4670. */
  4671. if (le16_to_cpu(map->layout_map_count) != 3) {
  4672. hpsa_turn_off_ioaccel_for_device(dev);
  4673. return IO_ACCEL_INELIGIBLE;
  4674. }
  4675. offload_to_mirror = dev->offload_to_mirror;
  4676. raid_map_helper(map, offload_to_mirror,
  4677. &map_index, &current_group);
  4678. /* set mirror group to use next time */
  4679. offload_to_mirror =
  4680. (offload_to_mirror >=
  4681. le16_to_cpu(map->layout_map_count) - 1)
  4682. ? 0 : offload_to_mirror + 1;
  4683. dev->offload_to_mirror = offload_to_mirror;
  4684. /* Avoid direct use of dev->offload_to_mirror within this
  4685. * function since multiple threads might simultaneously
  4686. * increment it beyond the range of dev->layout_map_count -1.
  4687. */
  4688. break;
  4689. case HPSA_RAID_5:
  4690. case HPSA_RAID_6:
  4691. if (le16_to_cpu(map->layout_map_count) <= 1)
  4692. break;
  4693. /* Verify first and last block are in same RAID group */
  4694. r5or6_blocks_per_row =
  4695. le16_to_cpu(map->strip_size) *
  4696. le16_to_cpu(map->data_disks_per_row);
  4697. if (r5or6_blocks_per_row == 0) {
  4698. hpsa_turn_off_ioaccel_for_device(dev);
  4699. return IO_ACCEL_INELIGIBLE;
  4700. }
  4701. stripesize = r5or6_blocks_per_row *
  4702. le16_to_cpu(map->layout_map_count);
  4703. #if BITS_PER_LONG == 32
  4704. tmpdiv = first_block;
  4705. first_group = do_div(tmpdiv, stripesize);
  4706. tmpdiv = first_group;
  4707. (void) do_div(tmpdiv, r5or6_blocks_per_row);
  4708. first_group = tmpdiv;
  4709. tmpdiv = last_block;
  4710. last_group = do_div(tmpdiv, stripesize);
  4711. tmpdiv = last_group;
  4712. (void) do_div(tmpdiv, r5or6_blocks_per_row);
  4713. last_group = tmpdiv;
  4714. #else
  4715. first_group = (first_block % stripesize) / r5or6_blocks_per_row;
  4716. last_group = (last_block % stripesize) / r5or6_blocks_per_row;
  4717. #endif
  4718. if (first_group != last_group)
  4719. return IO_ACCEL_INELIGIBLE;
  4720. /* Verify request is in a single row of RAID 5/6 */
  4721. #if BITS_PER_LONG == 32
  4722. tmpdiv = first_block;
  4723. (void) do_div(tmpdiv, stripesize);
  4724. first_row = r5or6_first_row = r0_first_row = tmpdiv;
  4725. tmpdiv = last_block;
  4726. (void) do_div(tmpdiv, stripesize);
  4727. r5or6_last_row = r0_last_row = tmpdiv;
  4728. #else
  4729. first_row = r5or6_first_row = r0_first_row =
  4730. first_block / stripesize;
  4731. r5or6_last_row = r0_last_row = last_block / stripesize;
  4732. #endif
  4733. if (r5or6_first_row != r5or6_last_row)
  4734. return IO_ACCEL_INELIGIBLE;
  4735. /* Verify request is in a single column */
  4736. #if BITS_PER_LONG == 32
  4737. tmpdiv = first_block;
  4738. first_row_offset = do_div(tmpdiv, stripesize);
  4739. tmpdiv = first_row_offset;
  4740. first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
  4741. r5or6_first_row_offset = first_row_offset;
  4742. tmpdiv = last_block;
  4743. r5or6_last_row_offset = do_div(tmpdiv, stripesize);
  4744. tmpdiv = r5or6_last_row_offset;
  4745. r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
  4746. tmpdiv = r5or6_first_row_offset;
  4747. (void) do_div(tmpdiv, map->strip_size);
  4748. first_column = r5or6_first_column = tmpdiv;
  4749. tmpdiv = r5or6_last_row_offset;
  4750. (void) do_div(tmpdiv, map->strip_size);
  4751. r5or6_last_column = tmpdiv;
  4752. #else
  4753. first_row_offset = r5or6_first_row_offset =
  4754. (u32)((first_block % stripesize) %
  4755. r5or6_blocks_per_row);
  4756. r5or6_last_row_offset =
  4757. (u32)((last_block % stripesize) %
  4758. r5or6_blocks_per_row);
  4759. first_column = r5or6_first_column =
  4760. r5or6_first_row_offset / le16_to_cpu(map->strip_size);
  4761. r5or6_last_column =
  4762. r5or6_last_row_offset / le16_to_cpu(map->strip_size);
  4763. #endif
  4764. if (r5or6_first_column != r5or6_last_column)
  4765. return IO_ACCEL_INELIGIBLE;
  4766. /* Request is eligible */
  4767. map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
  4768. le16_to_cpu(map->row_cnt);
  4769. map_index = (first_group *
  4770. (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
  4771. (map_row * total_disks_per_row) + first_column;
  4772. break;
  4773. default:
  4774. return IO_ACCEL_INELIGIBLE;
  4775. }
  4776. if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
  4777. return IO_ACCEL_INELIGIBLE;
  4778. c->phys_disk = dev->phys_disk[map_index];
  4779. if (!c->phys_disk)
  4780. return IO_ACCEL_INELIGIBLE;
  4781. disk_handle = dd[map_index].ioaccel_handle;
  4782. disk_block = le64_to_cpu(map->disk_starting_blk) +
  4783. first_row * le16_to_cpu(map->strip_size) +
  4784. (first_row_offset - first_column *
  4785. le16_to_cpu(map->strip_size));
  4786. disk_block_cnt = block_cnt;
  4787. /* handle differing logical/physical block sizes */
  4788. if (map->phys_blk_shift) {
  4789. disk_block <<= map->phys_blk_shift;
  4790. disk_block_cnt <<= map->phys_blk_shift;
  4791. }
  4792. BUG_ON(disk_block_cnt > 0xffff);
  4793. /* build the new CDB for the physical disk I/O */
  4794. if (disk_block > 0xffffffff) {
  4795. cdb[0] = is_write ? WRITE_16 : READ_16;
  4796. cdb[1] = 0;
  4797. cdb[2] = (u8) (disk_block >> 56);
  4798. cdb[3] = (u8) (disk_block >> 48);
  4799. cdb[4] = (u8) (disk_block >> 40);
  4800. cdb[5] = (u8) (disk_block >> 32);
  4801. cdb[6] = (u8) (disk_block >> 24);
  4802. cdb[7] = (u8) (disk_block >> 16);
  4803. cdb[8] = (u8) (disk_block >> 8);
  4804. cdb[9] = (u8) (disk_block);
  4805. cdb[10] = (u8) (disk_block_cnt >> 24);
  4806. cdb[11] = (u8) (disk_block_cnt >> 16);
  4807. cdb[12] = (u8) (disk_block_cnt >> 8);
  4808. cdb[13] = (u8) (disk_block_cnt);
  4809. cdb[14] = 0;
  4810. cdb[15] = 0;
  4811. cdb_len = 16;
  4812. } else {
  4813. cdb[0] = is_write ? WRITE_10 : READ_10;
  4814. cdb[1] = 0;
  4815. cdb[2] = (u8) (disk_block >> 24);
  4816. cdb[3] = (u8) (disk_block >> 16);
  4817. cdb[4] = (u8) (disk_block >> 8);
  4818. cdb[5] = (u8) (disk_block);
  4819. cdb[6] = 0;
  4820. cdb[7] = (u8) (disk_block_cnt >> 8);
  4821. cdb[8] = (u8) (disk_block_cnt);
  4822. cdb[9] = 0;
  4823. cdb_len = 10;
  4824. }
  4825. return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
  4826. dev->scsi3addr,
  4827. dev->phys_disk[map_index]);
  4828. }
  4829. /*
  4830. * Submit commands down the "normal" RAID stack path
  4831. * All callers to hpsa_ciss_submit must check lockup_detected
  4832. * beforehand, before (opt.) and after calling cmd_alloc
  4833. */
  4834. static int hpsa_ciss_submit(struct ctlr_info *h,
  4835. struct CommandList *c, struct scsi_cmnd *cmd,
  4836. unsigned char scsi3addr[])
  4837. {
  4838. cmd->host_scribble = (unsigned char *) c;
  4839. c->cmd_type = CMD_SCSI;
  4840. c->scsi_cmd = cmd;
  4841. c->Header.ReplyQueue = 0; /* unused in simple mode */
  4842. memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
  4843. c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
  4844. /* Fill in the request block... */
  4845. c->Request.Timeout = 0;
  4846. BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
  4847. c->Request.CDBLen = cmd->cmd_len;
  4848. memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
  4849. switch (cmd->sc_data_direction) {
  4850. case DMA_TO_DEVICE:
  4851. c->Request.type_attr_dir =
  4852. TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
  4853. break;
  4854. case DMA_FROM_DEVICE:
  4855. c->Request.type_attr_dir =
  4856. TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
  4857. break;
  4858. case DMA_NONE:
  4859. c->Request.type_attr_dir =
  4860. TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
  4861. break;
  4862. case DMA_BIDIRECTIONAL:
  4863. /* This can happen if a buggy application does a scsi passthru
  4864. * and sets both inlen and outlen to non-zero. ( see
  4865. * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
  4866. */
  4867. c->Request.type_attr_dir =
  4868. TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
  4869. /* This is technically wrong, and hpsa controllers should
  4870. * reject it with CMD_INVALID, which is the most correct
  4871. * response, but non-fibre backends appear to let it
  4872. * slide by, and give the same results as if this field
  4873. * were set correctly. Either way is acceptable for
  4874. * our purposes here.
  4875. */
  4876. break;
  4877. default:
  4878. dev_err(&h->pdev->dev, "unknown data direction: %d\n",
  4879. cmd->sc_data_direction);
  4880. BUG();
  4881. break;
  4882. }
  4883. if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
  4884. hpsa_cmd_resolve_and_free(h, c);
  4885. return SCSI_MLQUEUE_HOST_BUSY;
  4886. }
  4887. enqueue_cmd_and_start_io(h, c);
  4888. /* the cmd'll come back via intr handler in complete_scsi_command() */
  4889. return 0;
  4890. }
  4891. static void hpsa_cmd_init(struct ctlr_info *h, int index,
  4892. struct CommandList *c)
  4893. {
  4894. dma_addr_t cmd_dma_handle, err_dma_handle;
  4895. /* Zero out all of commandlist except the last field, refcount */
  4896. memset(c, 0, offsetof(struct CommandList, refcount));
  4897. c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
  4898. cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
  4899. c->err_info = h->errinfo_pool + index;
  4900. memset(c->err_info, 0, sizeof(*c->err_info));
  4901. err_dma_handle = h->errinfo_pool_dhandle
  4902. + index * sizeof(*c->err_info);
  4903. c->cmdindex = index;
  4904. c->busaddr = (u32) cmd_dma_handle;
  4905. c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
  4906. c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
  4907. c->h = h;
  4908. c->scsi_cmd = SCSI_CMD_IDLE;
  4909. }
  4910. static void hpsa_preinitialize_commands(struct ctlr_info *h)
  4911. {
  4912. int i;
  4913. for (i = 0; i < h->nr_cmds; i++) {
  4914. struct CommandList *c = h->cmd_pool + i;
  4915. hpsa_cmd_init(h, i, c);
  4916. atomic_set(&c->refcount, 0);
  4917. }
  4918. }
  4919. static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
  4920. struct CommandList *c)
  4921. {
  4922. dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
  4923. BUG_ON(c->cmdindex != index);
  4924. memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
  4925. memset(c->err_info, 0, sizeof(*c->err_info));
  4926. c->busaddr = (u32) cmd_dma_handle;
  4927. }
  4928. static int hpsa_ioaccel_submit(struct ctlr_info *h,
  4929. struct CommandList *c, struct scsi_cmnd *cmd,
  4930. unsigned char *scsi3addr)
  4931. {
  4932. struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
  4933. int rc = IO_ACCEL_INELIGIBLE;
  4934. if (!dev)
  4935. return SCSI_MLQUEUE_HOST_BUSY;
  4936. cmd->host_scribble = (unsigned char *) c;
  4937. if (dev->offload_enabled) {
  4938. hpsa_cmd_init(h, c->cmdindex, c);
  4939. c->cmd_type = CMD_SCSI;
  4940. c->scsi_cmd = cmd;
  4941. rc = hpsa_scsi_ioaccel_raid_map(h, c);
  4942. if (rc < 0) /* scsi_dma_map failed. */
  4943. rc = SCSI_MLQUEUE_HOST_BUSY;
  4944. } else if (dev->hba_ioaccel_enabled) {
  4945. hpsa_cmd_init(h, c->cmdindex, c);
  4946. c->cmd_type = CMD_SCSI;
  4947. c->scsi_cmd = cmd;
  4948. rc = hpsa_scsi_ioaccel_direct_map(h, c);
  4949. if (rc < 0) /* scsi_dma_map failed. */
  4950. rc = SCSI_MLQUEUE_HOST_BUSY;
  4951. }
  4952. return rc;
  4953. }
  4954. static void hpsa_command_resubmit_worker(struct work_struct *work)
  4955. {
  4956. struct scsi_cmnd *cmd;
  4957. struct hpsa_scsi_dev_t *dev;
  4958. struct CommandList *c = container_of(work, struct CommandList, work);
  4959. cmd = c->scsi_cmd;
  4960. dev = cmd->device->hostdata;
  4961. if (!dev) {
  4962. cmd->result = DID_NO_CONNECT << 16;
  4963. return hpsa_cmd_free_and_done(c->h, c, cmd);
  4964. }
  4965. if (c->reset_pending)
  4966. return hpsa_cmd_free_and_done(c->h, c, cmd);
  4967. if (c->cmd_type == CMD_IOACCEL2) {
  4968. struct ctlr_info *h = c->h;
  4969. struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
  4970. int rc;
  4971. if (c2->error_data.serv_response ==
  4972. IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
  4973. rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
  4974. if (rc == 0)
  4975. return;
  4976. if (rc == SCSI_MLQUEUE_HOST_BUSY) {
  4977. /*
  4978. * If we get here, it means dma mapping failed.
  4979. * Try again via scsi mid layer, which will
  4980. * then get SCSI_MLQUEUE_HOST_BUSY.
  4981. */
  4982. cmd->result = DID_IMM_RETRY << 16;
  4983. return hpsa_cmd_free_and_done(h, c, cmd);
  4984. }
  4985. /* else, fall thru and resubmit down CISS path */
  4986. }
  4987. }
  4988. hpsa_cmd_partial_init(c->h, c->cmdindex, c);
  4989. if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
  4990. /*
  4991. * If we get here, it means dma mapping failed. Try
  4992. * again via scsi mid layer, which will then get
  4993. * SCSI_MLQUEUE_HOST_BUSY.
  4994. *
  4995. * hpsa_ciss_submit will have already freed c
  4996. * if it encountered a dma mapping failure.
  4997. */
  4998. cmd->result = DID_IMM_RETRY << 16;
  4999. cmd->scsi_done(cmd);
  5000. }
  5001. }
  5002. /* Running in struct Scsi_Host->host_lock less mode */
  5003. static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
  5004. {
  5005. struct ctlr_info *h;
  5006. struct hpsa_scsi_dev_t *dev;
  5007. unsigned char scsi3addr[8];
  5008. struct CommandList *c;
  5009. int rc = 0;
  5010. /* Get the ptr to our adapter structure out of cmd->host. */
  5011. h = sdev_to_hba(cmd->device);
  5012. BUG_ON(cmd->request->tag < 0);
  5013. dev = cmd->device->hostdata;
  5014. if (!dev) {
  5015. cmd->result = DID_NO_CONNECT << 16;
  5016. cmd->scsi_done(cmd);
  5017. return 0;
  5018. }
  5019. if (dev->removed) {
  5020. cmd->result = DID_NO_CONNECT << 16;
  5021. cmd->scsi_done(cmd);
  5022. return 0;
  5023. }
  5024. memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
  5025. if (unlikely(lockup_detected(h))) {
  5026. cmd->result = DID_NO_CONNECT << 16;
  5027. cmd->scsi_done(cmd);
  5028. return 0;
  5029. }
  5030. c = cmd_tagged_alloc(h, cmd);
  5031. /*
  5032. * This is necessary because the SML doesn't zero out this field during
  5033. * error recovery.
  5034. */
  5035. cmd->result = 0;
  5036. /*
  5037. * Call alternate submit routine for I/O accelerated commands.
  5038. * Retries always go down the normal I/O path.
  5039. */
  5040. if (likely(cmd->retries == 0 &&
  5041. !blk_rq_is_passthrough(cmd->request) &&
  5042. h->acciopath_status)) {
  5043. rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
  5044. if (rc == 0)
  5045. return 0;
  5046. if (rc == SCSI_MLQUEUE_HOST_BUSY) {
  5047. hpsa_cmd_resolve_and_free(h, c);
  5048. return SCSI_MLQUEUE_HOST_BUSY;
  5049. }
  5050. }
  5051. return hpsa_ciss_submit(h, c, cmd, scsi3addr);
  5052. }
  5053. static void hpsa_scan_complete(struct ctlr_info *h)
  5054. {
  5055. unsigned long flags;
  5056. spin_lock_irqsave(&h->scan_lock, flags);
  5057. h->scan_finished = 1;
  5058. wake_up(&h->scan_wait_queue);
  5059. spin_unlock_irqrestore(&h->scan_lock, flags);
  5060. }
  5061. static void hpsa_scan_start(struct Scsi_Host *sh)
  5062. {
  5063. struct ctlr_info *h = shost_to_hba(sh);
  5064. unsigned long flags;
  5065. /*
  5066. * Don't let rescans be initiated on a controller known to be locked
  5067. * up. If the controller locks up *during* a rescan, that thread is
  5068. * probably hosed, but at least we can prevent new rescan threads from
  5069. * piling up on a locked up controller.
  5070. */
  5071. if (unlikely(lockup_detected(h)))
  5072. return hpsa_scan_complete(h);
  5073. /*
  5074. * If a scan is already waiting to run, no need to add another
  5075. */
  5076. spin_lock_irqsave(&h->scan_lock, flags);
  5077. if (h->scan_waiting) {
  5078. spin_unlock_irqrestore(&h->scan_lock, flags);
  5079. return;
  5080. }
  5081. spin_unlock_irqrestore(&h->scan_lock, flags);
  5082. /* wait until any scan already in progress is finished. */
  5083. while (1) {
  5084. spin_lock_irqsave(&h->scan_lock, flags);
  5085. if (h->scan_finished)
  5086. break;
  5087. h->scan_waiting = 1;
  5088. spin_unlock_irqrestore(&h->scan_lock, flags);
  5089. wait_event(h->scan_wait_queue, h->scan_finished);
  5090. /* Note: We don't need to worry about a race between this
  5091. * thread and driver unload because the midlayer will
  5092. * have incremented the reference count, so unload won't
  5093. * happen if we're in here.
  5094. */
  5095. }
  5096. h->scan_finished = 0; /* mark scan as in progress */
  5097. h->scan_waiting = 0;
  5098. spin_unlock_irqrestore(&h->scan_lock, flags);
  5099. if (unlikely(lockup_detected(h)))
  5100. return hpsa_scan_complete(h);
  5101. /*
  5102. * Do the scan after a reset completion
  5103. */
  5104. spin_lock_irqsave(&h->reset_lock, flags);
  5105. if (h->reset_in_progress) {
  5106. h->drv_req_rescan = 1;
  5107. spin_unlock_irqrestore(&h->reset_lock, flags);
  5108. hpsa_scan_complete(h);
  5109. return;
  5110. }
  5111. spin_unlock_irqrestore(&h->reset_lock, flags);
  5112. hpsa_update_scsi_devices(h);
  5113. hpsa_scan_complete(h);
  5114. }
  5115. static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
  5116. {
  5117. struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
  5118. if (!logical_drive)
  5119. return -ENODEV;
  5120. if (qdepth < 1)
  5121. qdepth = 1;
  5122. else if (qdepth > logical_drive->queue_depth)
  5123. qdepth = logical_drive->queue_depth;
  5124. return scsi_change_queue_depth(sdev, qdepth);
  5125. }
  5126. static int hpsa_scan_finished(struct Scsi_Host *sh,
  5127. unsigned long elapsed_time)
  5128. {
  5129. struct ctlr_info *h = shost_to_hba(sh);
  5130. unsigned long flags;
  5131. int finished;
  5132. spin_lock_irqsave(&h->scan_lock, flags);
  5133. finished = h->scan_finished;
  5134. spin_unlock_irqrestore(&h->scan_lock, flags);
  5135. return finished;
  5136. }
  5137. static int hpsa_scsi_host_alloc(struct ctlr_info *h)
  5138. {
  5139. struct Scsi_Host *sh;
  5140. sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
  5141. if (sh == NULL) {
  5142. dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
  5143. return -ENOMEM;
  5144. }
  5145. sh->io_port = 0;
  5146. sh->n_io_port = 0;
  5147. sh->this_id = -1;
  5148. sh->max_channel = 3;
  5149. sh->max_cmd_len = MAX_COMMAND_SIZE;
  5150. sh->max_lun = HPSA_MAX_LUN;
  5151. sh->max_id = HPSA_MAX_LUN;
  5152. sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
  5153. sh->cmd_per_lun = sh->can_queue;
  5154. sh->sg_tablesize = h->maxsgentries;
  5155. sh->transportt = hpsa_sas_transport_template;
  5156. sh->hostdata[0] = (unsigned long) h;
  5157. sh->irq = pci_irq_vector(h->pdev, 0);
  5158. sh->unique_id = sh->irq;
  5159. h->scsi_host = sh;
  5160. return 0;
  5161. }
  5162. static int hpsa_scsi_add_host(struct ctlr_info *h)
  5163. {
  5164. int rv;
  5165. rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
  5166. if (rv) {
  5167. dev_err(&h->pdev->dev, "scsi_add_host failed\n");
  5168. return rv;
  5169. }
  5170. scsi_scan_host(h->scsi_host);
  5171. return 0;
  5172. }
  5173. /*
  5174. * The block layer has already gone to the trouble of picking out a unique,
  5175. * small-integer tag for this request. We use an offset from that value as
  5176. * an index to select our command block. (The offset allows us to reserve the
  5177. * low-numbered entries for our own uses.)
  5178. */
  5179. static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
  5180. {
  5181. int idx = scmd->request->tag;
  5182. if (idx < 0)
  5183. return idx;
  5184. /* Offset to leave space for internal cmds. */
  5185. return idx += HPSA_NRESERVED_CMDS;
  5186. }
  5187. /*
  5188. * Send a TEST_UNIT_READY command to the specified LUN using the specified
  5189. * reply queue; returns zero if the unit is ready, and non-zero otherwise.
  5190. */
  5191. static int hpsa_send_test_unit_ready(struct ctlr_info *h,
  5192. struct CommandList *c, unsigned char lunaddr[],
  5193. int reply_queue)
  5194. {
  5195. int rc;
  5196. /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
  5197. (void) fill_cmd(c, TEST_UNIT_READY, h,
  5198. NULL, 0, 0, lunaddr, TYPE_CMD);
  5199. rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
  5200. if (rc)
  5201. return rc;
  5202. /* no unmap needed here because no data xfer. */
  5203. /* Check if the unit is already ready. */
  5204. if (c->err_info->CommandStatus == CMD_SUCCESS)
  5205. return 0;
  5206. /*
  5207. * The first command sent after reset will receive "unit attention" to
  5208. * indicate that the LUN has been reset...this is actually what we're
  5209. * looking for (but, success is good too).
  5210. */
  5211. if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
  5212. c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
  5213. (c->err_info->SenseInfo[2] == NO_SENSE ||
  5214. c->err_info->SenseInfo[2] == UNIT_ATTENTION))
  5215. return 0;
  5216. return 1;
  5217. }
  5218. /*
  5219. * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
  5220. * returns zero when the unit is ready, and non-zero when giving up.
  5221. */
  5222. static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
  5223. struct CommandList *c,
  5224. unsigned char lunaddr[], int reply_queue)
  5225. {
  5226. int rc;
  5227. int count = 0;
  5228. int waittime = 1; /* seconds */
  5229. /* Send test unit ready until device ready, or give up. */
  5230. for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
  5231. /*
  5232. * Wait for a bit. do this first, because if we send
  5233. * the TUR right away, the reset will just abort it.
  5234. */
  5235. msleep(1000 * waittime);
  5236. rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
  5237. if (!rc)
  5238. break;
  5239. /* Increase wait time with each try, up to a point. */
  5240. if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
  5241. waittime *= 2;
  5242. dev_warn(&h->pdev->dev,
  5243. "waiting %d secs for device to become ready.\n",
  5244. waittime);
  5245. }
  5246. return rc;
  5247. }
  5248. static int wait_for_device_to_become_ready(struct ctlr_info *h,
  5249. unsigned char lunaddr[],
  5250. int reply_queue)
  5251. {
  5252. int first_queue;
  5253. int last_queue;
  5254. int rq;
  5255. int rc = 0;
  5256. struct CommandList *c;
  5257. c = cmd_alloc(h);
  5258. /*
  5259. * If no specific reply queue was requested, then send the TUR
  5260. * repeatedly, requesting a reply on each reply queue; otherwise execute
  5261. * the loop exactly once using only the specified queue.
  5262. */
  5263. if (reply_queue == DEFAULT_REPLY_QUEUE) {
  5264. first_queue = 0;
  5265. last_queue = h->nreply_queues - 1;
  5266. } else {
  5267. first_queue = reply_queue;
  5268. last_queue = reply_queue;
  5269. }
  5270. for (rq = first_queue; rq <= last_queue; rq++) {
  5271. rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
  5272. if (rc)
  5273. break;
  5274. }
  5275. if (rc)
  5276. dev_warn(&h->pdev->dev, "giving up on device.\n");
  5277. else
  5278. dev_warn(&h->pdev->dev, "device is ready.\n");
  5279. cmd_free(h, c);
  5280. return rc;
  5281. }
  5282. /* Need at least one of these error handlers to keep ../scsi/hosts.c from
  5283. * complaining. Doing a host- or bus-reset can't do anything good here.
  5284. */
  5285. static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
  5286. {
  5287. int rc = SUCCESS;
  5288. struct ctlr_info *h;
  5289. struct hpsa_scsi_dev_t *dev;
  5290. u8 reset_type;
  5291. char msg[48];
  5292. unsigned long flags;
  5293. /* find the controller to which the command to be aborted was sent */
  5294. h = sdev_to_hba(scsicmd->device);
  5295. if (h == NULL) /* paranoia */
  5296. return FAILED;
  5297. spin_lock_irqsave(&h->reset_lock, flags);
  5298. h->reset_in_progress = 1;
  5299. spin_unlock_irqrestore(&h->reset_lock, flags);
  5300. if (lockup_detected(h)) {
  5301. rc = FAILED;
  5302. goto return_reset_status;
  5303. }
  5304. dev = scsicmd->device->hostdata;
  5305. if (!dev) {
  5306. dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
  5307. rc = FAILED;
  5308. goto return_reset_status;
  5309. }
  5310. if (dev->devtype == TYPE_ENCLOSURE) {
  5311. rc = SUCCESS;
  5312. goto return_reset_status;
  5313. }
  5314. /* if controller locked up, we can guarantee command won't complete */
  5315. if (lockup_detected(h)) {
  5316. snprintf(msg, sizeof(msg),
  5317. "cmd %d RESET FAILED, lockup detected",
  5318. hpsa_get_cmd_index(scsicmd));
  5319. hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
  5320. rc = FAILED;
  5321. goto return_reset_status;
  5322. }
  5323. /* this reset request might be the result of a lockup; check */
  5324. if (detect_controller_lockup(h)) {
  5325. snprintf(msg, sizeof(msg),
  5326. "cmd %d RESET FAILED, new lockup detected",
  5327. hpsa_get_cmd_index(scsicmd));
  5328. hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
  5329. rc = FAILED;
  5330. goto return_reset_status;
  5331. }
  5332. /* Do not attempt on controller */
  5333. if (is_hba_lunid(dev->scsi3addr)) {
  5334. rc = SUCCESS;
  5335. goto return_reset_status;
  5336. }
  5337. if (is_logical_dev_addr_mode(dev->scsi3addr))
  5338. reset_type = HPSA_DEVICE_RESET_MSG;
  5339. else
  5340. reset_type = HPSA_PHYS_TARGET_RESET;
  5341. sprintf(msg, "resetting %s",
  5342. reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
  5343. hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
  5344. /* send a reset to the SCSI LUN which the command was sent to */
  5345. rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
  5346. DEFAULT_REPLY_QUEUE);
  5347. if (rc == 0)
  5348. rc = SUCCESS;
  5349. else
  5350. rc = FAILED;
  5351. sprintf(msg, "reset %s %s",
  5352. reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
  5353. rc == SUCCESS ? "completed successfully" : "failed");
  5354. hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
  5355. return_reset_status:
  5356. spin_lock_irqsave(&h->reset_lock, flags);
  5357. h->reset_in_progress = 0;
  5358. spin_unlock_irqrestore(&h->reset_lock, flags);
  5359. return rc;
  5360. }
  5361. /*
  5362. * For operations with an associated SCSI command, a command block is allocated
  5363. * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
  5364. * block request tag as an index into a table of entries. cmd_tagged_free() is
  5365. * the complement, although cmd_free() may be called instead.
  5366. */
  5367. static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
  5368. struct scsi_cmnd *scmd)
  5369. {
  5370. int idx = hpsa_get_cmd_index(scmd);
  5371. struct CommandList *c = h->cmd_pool + idx;
  5372. if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
  5373. dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
  5374. idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
  5375. /* The index value comes from the block layer, so if it's out of
  5376. * bounds, it's probably not our bug.
  5377. */
  5378. BUG();
  5379. }
  5380. atomic_inc(&c->refcount);
  5381. if (unlikely(!hpsa_is_cmd_idle(c))) {
  5382. /*
  5383. * We expect that the SCSI layer will hand us a unique tag
  5384. * value. Thus, there should never be a collision here between
  5385. * two requests...because if the selected command isn't idle
  5386. * then someone is going to be very disappointed.
  5387. */
  5388. dev_err(&h->pdev->dev,
  5389. "tag collision (tag=%d) in cmd_tagged_alloc().\n",
  5390. idx);
  5391. if (c->scsi_cmd != NULL)
  5392. scsi_print_command(c->scsi_cmd);
  5393. scsi_print_command(scmd);
  5394. }
  5395. hpsa_cmd_partial_init(h, idx, c);
  5396. return c;
  5397. }
  5398. static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
  5399. {
  5400. /*
  5401. * Release our reference to the block. We don't need to do anything
  5402. * else to free it, because it is accessed by index.
  5403. */
  5404. (void)atomic_dec(&c->refcount);
  5405. }
  5406. /*
  5407. * For operations that cannot sleep, a command block is allocated at init,
  5408. * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
  5409. * which ones are free or in use. Lock must be held when calling this.
  5410. * cmd_free() is the complement.
  5411. * This function never gives up and returns NULL. If it hangs,
  5412. * another thread must call cmd_free() to free some tags.
  5413. */
  5414. static struct CommandList *cmd_alloc(struct ctlr_info *h)
  5415. {
  5416. struct CommandList *c;
  5417. int refcount, i;
  5418. int offset = 0;
  5419. /*
  5420. * There is some *extremely* small but non-zero chance that that
  5421. * multiple threads could get in here, and one thread could
  5422. * be scanning through the list of bits looking for a free
  5423. * one, but the free ones are always behind him, and other
  5424. * threads sneak in behind him and eat them before he can
  5425. * get to them, so that while there is always a free one, a
  5426. * very unlucky thread might be starved anyway, never able to
  5427. * beat the other threads. In reality, this happens so
  5428. * infrequently as to be indistinguishable from never.
  5429. *
  5430. * Note that we start allocating commands before the SCSI host structure
  5431. * is initialized. Since the search starts at bit zero, this
  5432. * all works, since we have at least one command structure available;
  5433. * however, it means that the structures with the low indexes have to be
  5434. * reserved for driver-initiated requests, while requests from the block
  5435. * layer will use the higher indexes.
  5436. */
  5437. for (;;) {
  5438. i = find_next_zero_bit(h->cmd_pool_bits,
  5439. HPSA_NRESERVED_CMDS,
  5440. offset);
  5441. if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
  5442. offset = 0;
  5443. continue;
  5444. }
  5445. c = h->cmd_pool + i;
  5446. refcount = atomic_inc_return(&c->refcount);
  5447. if (unlikely(refcount > 1)) {
  5448. cmd_free(h, c); /* already in use */
  5449. offset = (i + 1) % HPSA_NRESERVED_CMDS;
  5450. continue;
  5451. }
  5452. set_bit(i & (BITS_PER_LONG - 1),
  5453. h->cmd_pool_bits + (i / BITS_PER_LONG));
  5454. break; /* it's ours now. */
  5455. }
  5456. hpsa_cmd_partial_init(h, i, c);
  5457. return c;
  5458. }
  5459. /*
  5460. * This is the complementary operation to cmd_alloc(). Note, however, in some
  5461. * corner cases it may also be used to free blocks allocated by
  5462. * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
  5463. * the clear-bit is harmless.
  5464. */
  5465. static void cmd_free(struct ctlr_info *h, struct CommandList *c)
  5466. {
  5467. if (atomic_dec_and_test(&c->refcount)) {
  5468. int i;
  5469. i = c - h->cmd_pool;
  5470. clear_bit(i & (BITS_PER_LONG - 1),
  5471. h->cmd_pool_bits + (i / BITS_PER_LONG));
  5472. }
  5473. }
  5474. #ifdef CONFIG_COMPAT
  5475. static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
  5476. void __user *arg)
  5477. {
  5478. IOCTL32_Command_struct __user *arg32 =
  5479. (IOCTL32_Command_struct __user *) arg;
  5480. IOCTL_Command_struct arg64;
  5481. IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
  5482. int err;
  5483. u32 cp;
  5484. memset(&arg64, 0, sizeof(arg64));
  5485. err = 0;
  5486. err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
  5487. sizeof(arg64.LUN_info));
  5488. err |= copy_from_user(&arg64.Request, &arg32->Request,
  5489. sizeof(arg64.Request));
  5490. err |= copy_from_user(&arg64.error_info, &arg32->error_info,
  5491. sizeof(arg64.error_info));
  5492. err |= get_user(arg64.buf_size, &arg32->buf_size);
  5493. err |= get_user(cp, &arg32->buf);
  5494. arg64.buf = compat_ptr(cp);
  5495. err |= copy_to_user(p, &arg64, sizeof(arg64));
  5496. if (err)
  5497. return -EFAULT;
  5498. err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
  5499. if (err)
  5500. return err;
  5501. err |= copy_in_user(&arg32->error_info, &p->error_info,
  5502. sizeof(arg32->error_info));
  5503. if (err)
  5504. return -EFAULT;
  5505. return err;
  5506. }
  5507. static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
  5508. int cmd, void __user *arg)
  5509. {
  5510. BIG_IOCTL32_Command_struct __user *arg32 =
  5511. (BIG_IOCTL32_Command_struct __user *) arg;
  5512. BIG_IOCTL_Command_struct arg64;
  5513. BIG_IOCTL_Command_struct __user *p =
  5514. compat_alloc_user_space(sizeof(arg64));
  5515. int err;
  5516. u32 cp;
  5517. memset(&arg64, 0, sizeof(arg64));
  5518. err = 0;
  5519. err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
  5520. sizeof(arg64.LUN_info));
  5521. err |= copy_from_user(&arg64.Request, &arg32->Request,
  5522. sizeof(arg64.Request));
  5523. err |= copy_from_user(&arg64.error_info, &arg32->error_info,
  5524. sizeof(arg64.error_info));
  5525. err |= get_user(arg64.buf_size, &arg32->buf_size);
  5526. err |= get_user(arg64.malloc_size, &arg32->malloc_size);
  5527. err |= get_user(cp, &arg32->buf);
  5528. arg64.buf = compat_ptr(cp);
  5529. err |= copy_to_user(p, &arg64, sizeof(arg64));
  5530. if (err)
  5531. return -EFAULT;
  5532. err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
  5533. if (err)
  5534. return err;
  5535. err |= copy_in_user(&arg32->error_info, &p->error_info,
  5536. sizeof(arg32->error_info));
  5537. if (err)
  5538. return -EFAULT;
  5539. return err;
  5540. }
  5541. static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
  5542. {
  5543. switch (cmd) {
  5544. case CCISS_GETPCIINFO:
  5545. case CCISS_GETINTINFO:
  5546. case CCISS_SETINTINFO:
  5547. case CCISS_GETNODENAME:
  5548. case CCISS_SETNODENAME:
  5549. case CCISS_GETHEARTBEAT:
  5550. case CCISS_GETBUSTYPES:
  5551. case CCISS_GETFIRMVER:
  5552. case CCISS_GETDRIVVER:
  5553. case CCISS_REVALIDVOLS:
  5554. case CCISS_DEREGDISK:
  5555. case CCISS_REGNEWDISK:
  5556. case CCISS_REGNEWD:
  5557. case CCISS_RESCANDISK:
  5558. case CCISS_GETLUNINFO:
  5559. return hpsa_ioctl(dev, cmd, arg);
  5560. case CCISS_PASSTHRU32:
  5561. return hpsa_ioctl32_passthru(dev, cmd, arg);
  5562. case CCISS_BIG_PASSTHRU32:
  5563. return hpsa_ioctl32_big_passthru(dev, cmd, arg);
  5564. default:
  5565. return -ENOIOCTLCMD;
  5566. }
  5567. }
  5568. #endif
  5569. static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
  5570. {
  5571. struct hpsa_pci_info pciinfo;
  5572. if (!argp)
  5573. return -EINVAL;
  5574. pciinfo.domain = pci_domain_nr(h->pdev->bus);
  5575. pciinfo.bus = h->pdev->bus->number;
  5576. pciinfo.dev_fn = h->pdev->devfn;
  5577. pciinfo.board_id = h->board_id;
  5578. if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
  5579. return -EFAULT;
  5580. return 0;
  5581. }
  5582. static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
  5583. {
  5584. DriverVer_type DriverVer;
  5585. unsigned char vmaj, vmin, vsubmin;
  5586. int rc;
  5587. rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
  5588. &vmaj, &vmin, &vsubmin);
  5589. if (rc != 3) {
  5590. dev_info(&h->pdev->dev, "driver version string '%s' "
  5591. "unrecognized.", HPSA_DRIVER_VERSION);
  5592. vmaj = 0;
  5593. vmin = 0;
  5594. vsubmin = 0;
  5595. }
  5596. DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
  5597. if (!argp)
  5598. return -EINVAL;
  5599. if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
  5600. return -EFAULT;
  5601. return 0;
  5602. }
  5603. static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
  5604. {
  5605. IOCTL_Command_struct iocommand;
  5606. struct CommandList *c;
  5607. char *buff = NULL;
  5608. u64 temp64;
  5609. int rc = 0;
  5610. if (!argp)
  5611. return -EINVAL;
  5612. if (!capable(CAP_SYS_RAWIO))
  5613. return -EPERM;
  5614. if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
  5615. return -EFAULT;
  5616. if ((iocommand.buf_size < 1) &&
  5617. (iocommand.Request.Type.Direction != XFER_NONE)) {
  5618. return -EINVAL;
  5619. }
  5620. if (iocommand.buf_size > 0) {
  5621. buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
  5622. if (buff == NULL)
  5623. return -ENOMEM;
  5624. if (iocommand.Request.Type.Direction & XFER_WRITE) {
  5625. /* Copy the data into the buffer we created */
  5626. if (copy_from_user(buff, iocommand.buf,
  5627. iocommand.buf_size)) {
  5628. rc = -EFAULT;
  5629. goto out_kfree;
  5630. }
  5631. } else {
  5632. memset(buff, 0, iocommand.buf_size);
  5633. }
  5634. }
  5635. c = cmd_alloc(h);
  5636. /* Fill in the command type */
  5637. c->cmd_type = CMD_IOCTL_PEND;
  5638. c->scsi_cmd = SCSI_CMD_BUSY;
  5639. /* Fill in Command Header */
  5640. c->Header.ReplyQueue = 0; /* unused in simple mode */
  5641. if (iocommand.buf_size > 0) { /* buffer to fill */
  5642. c->Header.SGList = 1;
  5643. c->Header.SGTotal = cpu_to_le16(1);
  5644. } else { /* no buffers to fill */
  5645. c->Header.SGList = 0;
  5646. c->Header.SGTotal = cpu_to_le16(0);
  5647. }
  5648. memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
  5649. /* Fill in Request block */
  5650. memcpy(&c->Request, &iocommand.Request,
  5651. sizeof(c->Request));
  5652. /* Fill in the scatter gather information */
  5653. if (iocommand.buf_size > 0) {
  5654. temp64 = pci_map_single(h->pdev, buff,
  5655. iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
  5656. if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
  5657. c->SG[0].Addr = cpu_to_le64(0);
  5658. c->SG[0].Len = cpu_to_le32(0);
  5659. rc = -ENOMEM;
  5660. goto out;
  5661. }
  5662. c->SG[0].Addr = cpu_to_le64(temp64);
  5663. c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
  5664. c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
  5665. }
  5666. rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
  5667. NO_TIMEOUT);
  5668. if (iocommand.buf_size > 0)
  5669. hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
  5670. check_ioctl_unit_attention(h, c);
  5671. if (rc) {
  5672. rc = -EIO;
  5673. goto out;
  5674. }
  5675. /* Copy the error information out */
  5676. memcpy(&iocommand.error_info, c->err_info,
  5677. sizeof(iocommand.error_info));
  5678. if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
  5679. rc = -EFAULT;
  5680. goto out;
  5681. }
  5682. if ((iocommand.Request.Type.Direction & XFER_READ) &&
  5683. iocommand.buf_size > 0) {
  5684. /* Copy the data out of the buffer we created */
  5685. if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
  5686. rc = -EFAULT;
  5687. goto out;
  5688. }
  5689. }
  5690. out:
  5691. cmd_free(h, c);
  5692. out_kfree:
  5693. kfree(buff);
  5694. return rc;
  5695. }
  5696. static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
  5697. {
  5698. BIG_IOCTL_Command_struct *ioc;
  5699. struct CommandList *c;
  5700. unsigned char **buff = NULL;
  5701. int *buff_size = NULL;
  5702. u64 temp64;
  5703. BYTE sg_used = 0;
  5704. int status = 0;
  5705. u32 left;
  5706. u32 sz;
  5707. BYTE __user *data_ptr;
  5708. if (!argp)
  5709. return -EINVAL;
  5710. if (!capable(CAP_SYS_RAWIO))
  5711. return -EPERM;
  5712. ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
  5713. if (!ioc) {
  5714. status = -ENOMEM;
  5715. goto cleanup1;
  5716. }
  5717. if (copy_from_user(ioc, argp, sizeof(*ioc))) {
  5718. status = -EFAULT;
  5719. goto cleanup1;
  5720. }
  5721. if ((ioc->buf_size < 1) &&
  5722. (ioc->Request.Type.Direction != XFER_NONE)) {
  5723. status = -EINVAL;
  5724. goto cleanup1;
  5725. }
  5726. /* Check kmalloc limits using all SGs */
  5727. if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
  5728. status = -EINVAL;
  5729. goto cleanup1;
  5730. }
  5731. if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
  5732. status = -EINVAL;
  5733. goto cleanup1;
  5734. }
  5735. buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
  5736. if (!buff) {
  5737. status = -ENOMEM;
  5738. goto cleanup1;
  5739. }
  5740. buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL);
  5741. if (!buff_size) {
  5742. status = -ENOMEM;
  5743. goto cleanup1;
  5744. }
  5745. left = ioc->buf_size;
  5746. data_ptr = ioc->buf;
  5747. while (left) {
  5748. sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
  5749. buff_size[sg_used] = sz;
  5750. buff[sg_used] = kmalloc(sz, GFP_KERNEL);
  5751. if (buff[sg_used] == NULL) {
  5752. status = -ENOMEM;
  5753. goto cleanup1;
  5754. }
  5755. if (ioc->Request.Type.Direction & XFER_WRITE) {
  5756. if (copy_from_user(buff[sg_used], data_ptr, sz)) {
  5757. status = -EFAULT;
  5758. goto cleanup1;
  5759. }
  5760. } else
  5761. memset(buff[sg_used], 0, sz);
  5762. left -= sz;
  5763. data_ptr += sz;
  5764. sg_used++;
  5765. }
  5766. c = cmd_alloc(h);
  5767. c->cmd_type = CMD_IOCTL_PEND;
  5768. c->scsi_cmd = SCSI_CMD_BUSY;
  5769. c->Header.ReplyQueue = 0;
  5770. c->Header.SGList = (u8) sg_used;
  5771. c->Header.SGTotal = cpu_to_le16(sg_used);
  5772. memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
  5773. memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
  5774. if (ioc->buf_size > 0) {
  5775. int i;
  5776. for (i = 0; i < sg_used; i++) {
  5777. temp64 = pci_map_single(h->pdev, buff[i],
  5778. buff_size[i], PCI_DMA_BIDIRECTIONAL);
  5779. if (dma_mapping_error(&h->pdev->dev,
  5780. (dma_addr_t) temp64)) {
  5781. c->SG[i].Addr = cpu_to_le64(0);
  5782. c->SG[i].Len = cpu_to_le32(0);
  5783. hpsa_pci_unmap(h->pdev, c, i,
  5784. PCI_DMA_BIDIRECTIONAL);
  5785. status = -ENOMEM;
  5786. goto cleanup0;
  5787. }
  5788. c->SG[i].Addr = cpu_to_le64(temp64);
  5789. c->SG[i].Len = cpu_to_le32(buff_size[i]);
  5790. c->SG[i].Ext = cpu_to_le32(0);
  5791. }
  5792. c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
  5793. }
  5794. status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
  5795. NO_TIMEOUT);
  5796. if (sg_used)
  5797. hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
  5798. check_ioctl_unit_attention(h, c);
  5799. if (status) {
  5800. status = -EIO;
  5801. goto cleanup0;
  5802. }
  5803. /* Copy the error information out */
  5804. memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
  5805. if (copy_to_user(argp, ioc, sizeof(*ioc))) {
  5806. status = -EFAULT;
  5807. goto cleanup0;
  5808. }
  5809. if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
  5810. int i;
  5811. /* Copy the data out of the buffer we created */
  5812. BYTE __user *ptr = ioc->buf;
  5813. for (i = 0; i < sg_used; i++) {
  5814. if (copy_to_user(ptr, buff[i], buff_size[i])) {
  5815. status = -EFAULT;
  5816. goto cleanup0;
  5817. }
  5818. ptr += buff_size[i];
  5819. }
  5820. }
  5821. status = 0;
  5822. cleanup0:
  5823. cmd_free(h, c);
  5824. cleanup1:
  5825. if (buff) {
  5826. int i;
  5827. for (i = 0; i < sg_used; i++)
  5828. kfree(buff[i]);
  5829. kfree(buff);
  5830. }
  5831. kfree(buff_size);
  5832. kfree(ioc);
  5833. return status;
  5834. }
  5835. static void check_ioctl_unit_attention(struct ctlr_info *h,
  5836. struct CommandList *c)
  5837. {
  5838. if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
  5839. c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
  5840. (void) check_for_unit_attention(h, c);
  5841. }
  5842. /*
  5843. * ioctl
  5844. */
  5845. static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
  5846. {
  5847. struct ctlr_info *h;
  5848. void __user *argp = (void __user *)arg;
  5849. int rc;
  5850. h = sdev_to_hba(dev);
  5851. switch (cmd) {
  5852. case CCISS_DEREGDISK:
  5853. case CCISS_REGNEWDISK:
  5854. case CCISS_REGNEWD:
  5855. hpsa_scan_start(h->scsi_host);
  5856. return 0;
  5857. case CCISS_GETPCIINFO:
  5858. return hpsa_getpciinfo_ioctl(h, argp);
  5859. case CCISS_GETDRIVVER:
  5860. return hpsa_getdrivver_ioctl(h, argp);
  5861. case CCISS_PASSTHRU:
  5862. if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
  5863. return -EAGAIN;
  5864. rc = hpsa_passthru_ioctl(h, argp);
  5865. atomic_inc(&h->passthru_cmds_avail);
  5866. return rc;
  5867. case CCISS_BIG_PASSTHRU:
  5868. if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
  5869. return -EAGAIN;
  5870. rc = hpsa_big_passthru_ioctl(h, argp);
  5871. atomic_inc(&h->passthru_cmds_avail);
  5872. return rc;
  5873. default:
  5874. return -ENOTTY;
  5875. }
  5876. }
  5877. static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
  5878. u8 reset_type)
  5879. {
  5880. struct CommandList *c;
  5881. c = cmd_alloc(h);
  5882. /* fill_cmd can't fail here, no data buffer to map */
  5883. (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
  5884. RAID_CTLR_LUNID, TYPE_MSG);
  5885. c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
  5886. c->waiting = NULL;
  5887. enqueue_cmd_and_start_io(h, c);
  5888. /* Don't wait for completion, the reset won't complete. Don't free
  5889. * the command either. This is the last command we will send before
  5890. * re-initializing everything, so it doesn't matter and won't leak.
  5891. */
  5892. return;
  5893. }
  5894. static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
  5895. void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
  5896. int cmd_type)
  5897. {
  5898. int pci_dir = XFER_NONE;
  5899. c->cmd_type = CMD_IOCTL_PEND;
  5900. c->scsi_cmd = SCSI_CMD_BUSY;
  5901. c->Header.ReplyQueue = 0;
  5902. if (buff != NULL && size > 0) {
  5903. c->Header.SGList = 1;
  5904. c->Header.SGTotal = cpu_to_le16(1);
  5905. } else {
  5906. c->Header.SGList = 0;
  5907. c->Header.SGTotal = cpu_to_le16(0);
  5908. }
  5909. memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
  5910. if (cmd_type == TYPE_CMD) {
  5911. switch (cmd) {
  5912. case HPSA_INQUIRY:
  5913. /* are we trying to read a vital product page */
  5914. if (page_code & VPD_PAGE) {
  5915. c->Request.CDB[1] = 0x01;
  5916. c->Request.CDB[2] = (page_code & 0xff);
  5917. }
  5918. c->Request.CDBLen = 6;
  5919. c->Request.type_attr_dir =
  5920. TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
  5921. c->Request.Timeout = 0;
  5922. c->Request.CDB[0] = HPSA_INQUIRY;
  5923. c->Request.CDB[4] = size & 0xFF;
  5924. break;
  5925. case RECEIVE_DIAGNOSTIC:
  5926. c->Request.CDBLen = 6;
  5927. c->Request.type_attr_dir =
  5928. TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
  5929. c->Request.Timeout = 0;
  5930. c->Request.CDB[0] = cmd;
  5931. c->Request.CDB[1] = 1;
  5932. c->Request.CDB[2] = 1;
  5933. c->Request.CDB[3] = (size >> 8) & 0xFF;
  5934. c->Request.CDB[4] = size & 0xFF;
  5935. break;
  5936. case HPSA_REPORT_LOG:
  5937. case HPSA_REPORT_PHYS:
  5938. /* Talking to controller so It's a physical command
  5939. mode = 00 target = 0. Nothing to write.
  5940. */
  5941. c->Request.CDBLen = 12;
  5942. c->Request.type_attr_dir =
  5943. TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
  5944. c->Request.Timeout = 0;
  5945. c->Request.CDB[0] = cmd;
  5946. c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
  5947. c->Request.CDB[7] = (size >> 16) & 0xFF;
  5948. c->Request.CDB[8] = (size >> 8) & 0xFF;
  5949. c->Request.CDB[9] = size & 0xFF;
  5950. break;
  5951. case BMIC_SENSE_DIAG_OPTIONS:
  5952. c->Request.CDBLen = 16;
  5953. c->Request.type_attr_dir =
  5954. TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
  5955. c->Request.Timeout = 0;
  5956. /* Spec says this should be BMIC_WRITE */
  5957. c->Request.CDB[0] = BMIC_READ;
  5958. c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
  5959. break;
  5960. case BMIC_SET_DIAG_OPTIONS:
  5961. c->Request.CDBLen = 16;
  5962. c->Request.type_attr_dir =
  5963. TYPE_ATTR_DIR(cmd_type,
  5964. ATTR_SIMPLE, XFER_WRITE);
  5965. c->Request.Timeout = 0;
  5966. c->Request.CDB[0] = BMIC_WRITE;
  5967. c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
  5968. break;
  5969. case HPSA_CACHE_FLUSH:
  5970. c->Request.CDBLen = 12;
  5971. c->Request.type_attr_dir =
  5972. TYPE_ATTR_DIR(cmd_type,
  5973. ATTR_SIMPLE, XFER_WRITE);
  5974. c->Request.Timeout = 0;
  5975. c->Request.CDB[0] = BMIC_WRITE;
  5976. c->Request.CDB[6] = BMIC_CACHE_FLUSH;
  5977. c->Request.CDB[7] = (size >> 8) & 0xFF;
  5978. c->Request.CDB[8] = size & 0xFF;
  5979. break;
  5980. case TEST_UNIT_READY:
  5981. c->Request.CDBLen = 6;
  5982. c->Request.type_attr_dir =
  5983. TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
  5984. c->Request.Timeout = 0;
  5985. break;
  5986. case HPSA_GET_RAID_MAP:
  5987. c->Request.CDBLen = 12;
  5988. c->Request.type_attr_dir =
  5989. TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
  5990. c->Request.Timeout = 0;
  5991. c->Request.CDB[0] = HPSA_CISS_READ;
  5992. c->Request.CDB[1] = cmd;
  5993. c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
  5994. c->Request.CDB[7] = (size >> 16) & 0xFF;
  5995. c->Request.CDB[8] = (size >> 8) & 0xFF;
  5996. c->Request.CDB[9] = size & 0xFF;
  5997. break;
  5998. case BMIC_SENSE_CONTROLLER_PARAMETERS:
  5999. c->Request.CDBLen = 10;
  6000. c->Request.type_attr_dir =
  6001. TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
  6002. c->Request.Timeout = 0;
  6003. c->Request.CDB[0] = BMIC_READ;
  6004. c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
  6005. c->Request.CDB[7] = (size >> 16) & 0xFF;
  6006. c->Request.CDB[8] = (size >> 8) & 0xFF;
  6007. break;
  6008. case BMIC_IDENTIFY_PHYSICAL_DEVICE:
  6009. c->Request.CDBLen = 10;
  6010. c->Request.type_attr_dir =
  6011. TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
  6012. c->Request.Timeout = 0;
  6013. c->Request.CDB[0] = BMIC_READ;
  6014. c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
  6015. c->Request.CDB[7] = (size >> 16) & 0xFF;
  6016. c->Request.CDB[8] = (size >> 8) & 0XFF;
  6017. break;
  6018. case BMIC_SENSE_SUBSYSTEM_INFORMATION:
  6019. c->Request.CDBLen = 10;
  6020. c->Request.type_attr_dir =
  6021. TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
  6022. c->Request.Timeout = 0;
  6023. c->Request.CDB[0] = BMIC_READ;
  6024. c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
  6025. c->Request.CDB[7] = (size >> 16) & 0xFF;
  6026. c->Request.CDB[8] = (size >> 8) & 0XFF;
  6027. break;
  6028. case BMIC_SENSE_STORAGE_BOX_PARAMS:
  6029. c->Request.CDBLen = 10;
  6030. c->Request.type_attr_dir =
  6031. TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
  6032. c->Request.Timeout = 0;
  6033. c->Request.CDB[0] = BMIC_READ;
  6034. c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
  6035. c->Request.CDB[7] = (size >> 16) & 0xFF;
  6036. c->Request.CDB[8] = (size >> 8) & 0XFF;
  6037. break;
  6038. case BMIC_IDENTIFY_CONTROLLER:
  6039. c->Request.CDBLen = 10;
  6040. c->Request.type_attr_dir =
  6041. TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
  6042. c->Request.Timeout = 0;
  6043. c->Request.CDB[0] = BMIC_READ;
  6044. c->Request.CDB[1] = 0;
  6045. c->Request.CDB[2] = 0;
  6046. c->Request.CDB[3] = 0;
  6047. c->Request.CDB[4] = 0;
  6048. c->Request.CDB[5] = 0;
  6049. c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
  6050. c->Request.CDB[7] = (size >> 16) & 0xFF;
  6051. c->Request.CDB[8] = (size >> 8) & 0XFF;
  6052. c->Request.CDB[9] = 0;
  6053. break;
  6054. default:
  6055. dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
  6056. BUG();
  6057. }
  6058. } else if (cmd_type == TYPE_MSG) {
  6059. switch (cmd) {
  6060. case HPSA_PHYS_TARGET_RESET:
  6061. c->Request.CDBLen = 16;
  6062. c->Request.type_attr_dir =
  6063. TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
  6064. c->Request.Timeout = 0; /* Don't time out */
  6065. memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
  6066. c->Request.CDB[0] = HPSA_RESET;
  6067. c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
  6068. /* Physical target reset needs no control bytes 4-7*/
  6069. c->Request.CDB[4] = 0x00;
  6070. c->Request.CDB[5] = 0x00;
  6071. c->Request.CDB[6] = 0x00;
  6072. c->Request.CDB[7] = 0x00;
  6073. break;
  6074. case HPSA_DEVICE_RESET_MSG:
  6075. c->Request.CDBLen = 16;
  6076. c->Request.type_attr_dir =
  6077. TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
  6078. c->Request.Timeout = 0; /* Don't time out */
  6079. memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
  6080. c->Request.CDB[0] = cmd;
  6081. c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
  6082. /* If bytes 4-7 are zero, it means reset the */
  6083. /* LunID device */
  6084. c->Request.CDB[4] = 0x00;
  6085. c->Request.CDB[5] = 0x00;
  6086. c->Request.CDB[6] = 0x00;
  6087. c->Request.CDB[7] = 0x00;
  6088. break;
  6089. default:
  6090. dev_warn(&h->pdev->dev, "unknown message type %d\n",
  6091. cmd);
  6092. BUG();
  6093. }
  6094. } else {
  6095. dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
  6096. BUG();
  6097. }
  6098. switch (GET_DIR(c->Request.type_attr_dir)) {
  6099. case XFER_READ:
  6100. pci_dir = PCI_DMA_FROMDEVICE;
  6101. break;
  6102. case XFER_WRITE:
  6103. pci_dir = PCI_DMA_TODEVICE;
  6104. break;
  6105. case XFER_NONE:
  6106. pci_dir = PCI_DMA_NONE;
  6107. break;
  6108. default:
  6109. pci_dir = PCI_DMA_BIDIRECTIONAL;
  6110. }
  6111. if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
  6112. return -1;
  6113. return 0;
  6114. }
  6115. /*
  6116. * Map (physical) PCI mem into (virtual) kernel space
  6117. */
  6118. static void __iomem *remap_pci_mem(ulong base, ulong size)
  6119. {
  6120. ulong page_base = ((ulong) base) & PAGE_MASK;
  6121. ulong page_offs = ((ulong) base) - page_base;
  6122. void __iomem *page_remapped = ioremap_nocache(page_base,
  6123. page_offs + size);
  6124. return page_remapped ? (page_remapped + page_offs) : NULL;
  6125. }
  6126. static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
  6127. {
  6128. return h->access.command_completed(h, q);
  6129. }
  6130. static inline bool interrupt_pending(struct ctlr_info *h)
  6131. {
  6132. return h->access.intr_pending(h);
  6133. }
  6134. static inline long interrupt_not_for_us(struct ctlr_info *h)
  6135. {
  6136. return (h->access.intr_pending(h) == 0) ||
  6137. (h->interrupts_enabled == 0);
  6138. }
  6139. static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
  6140. u32 raw_tag)
  6141. {
  6142. if (unlikely(tag_index >= h->nr_cmds)) {
  6143. dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
  6144. return 1;
  6145. }
  6146. return 0;
  6147. }
  6148. static inline void finish_cmd(struct CommandList *c)
  6149. {
  6150. dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
  6151. if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
  6152. || c->cmd_type == CMD_IOACCEL2))
  6153. complete_scsi_command(c);
  6154. else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
  6155. complete(c->waiting);
  6156. }
  6157. /* process completion of an indexed ("direct lookup") command */
  6158. static inline void process_indexed_cmd(struct ctlr_info *h,
  6159. u32 raw_tag)
  6160. {
  6161. u32 tag_index;
  6162. struct CommandList *c;
  6163. tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
  6164. if (!bad_tag(h, tag_index, raw_tag)) {
  6165. c = h->cmd_pool + tag_index;
  6166. finish_cmd(c);
  6167. }
  6168. }
  6169. /* Some controllers, like p400, will give us one interrupt
  6170. * after a soft reset, even if we turned interrupts off.
  6171. * Only need to check for this in the hpsa_xxx_discard_completions
  6172. * functions.
  6173. */
  6174. static int ignore_bogus_interrupt(struct ctlr_info *h)
  6175. {
  6176. if (likely(!reset_devices))
  6177. return 0;
  6178. if (likely(h->interrupts_enabled))
  6179. return 0;
  6180. dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
  6181. "(known firmware bug.) Ignoring.\n");
  6182. return 1;
  6183. }
  6184. /*
  6185. * Convert &h->q[x] (passed to interrupt handlers) back to h.
  6186. * Relies on (h-q[x] == x) being true for x such that
  6187. * 0 <= x < MAX_REPLY_QUEUES.
  6188. */
  6189. static struct ctlr_info *queue_to_hba(u8 *queue)
  6190. {
  6191. return container_of((queue - *queue), struct ctlr_info, q[0]);
  6192. }
  6193. static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
  6194. {
  6195. struct ctlr_info *h = queue_to_hba(queue);
  6196. u8 q = *(u8 *) queue;
  6197. u32 raw_tag;
  6198. if (ignore_bogus_interrupt(h))
  6199. return IRQ_NONE;
  6200. if (interrupt_not_for_us(h))
  6201. return IRQ_NONE;
  6202. h->last_intr_timestamp = get_jiffies_64();
  6203. while (interrupt_pending(h)) {
  6204. raw_tag = get_next_completion(h, q);
  6205. while (raw_tag != FIFO_EMPTY)
  6206. raw_tag = next_command(h, q);
  6207. }
  6208. return IRQ_HANDLED;
  6209. }
  6210. static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
  6211. {
  6212. struct ctlr_info *h = queue_to_hba(queue);
  6213. u32 raw_tag;
  6214. u8 q = *(u8 *) queue;
  6215. if (ignore_bogus_interrupt(h))
  6216. return IRQ_NONE;
  6217. h->last_intr_timestamp = get_jiffies_64();
  6218. raw_tag = get_next_completion(h, q);
  6219. while (raw_tag != FIFO_EMPTY)
  6220. raw_tag = next_command(h, q);
  6221. return IRQ_HANDLED;
  6222. }
  6223. static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
  6224. {
  6225. struct ctlr_info *h = queue_to_hba((u8 *) queue);
  6226. u32 raw_tag;
  6227. u8 q = *(u8 *) queue;
  6228. if (interrupt_not_for_us(h))
  6229. return IRQ_NONE;
  6230. h->last_intr_timestamp = get_jiffies_64();
  6231. while (interrupt_pending(h)) {
  6232. raw_tag = get_next_completion(h, q);
  6233. while (raw_tag != FIFO_EMPTY) {
  6234. process_indexed_cmd(h, raw_tag);
  6235. raw_tag = next_command(h, q);
  6236. }
  6237. }
  6238. return IRQ_HANDLED;
  6239. }
  6240. static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
  6241. {
  6242. struct ctlr_info *h = queue_to_hba(queue);
  6243. u32 raw_tag;
  6244. u8 q = *(u8 *) queue;
  6245. h->last_intr_timestamp = get_jiffies_64();
  6246. raw_tag = get_next_completion(h, q);
  6247. while (raw_tag != FIFO_EMPTY) {
  6248. process_indexed_cmd(h, raw_tag);
  6249. raw_tag = next_command(h, q);
  6250. }
  6251. return IRQ_HANDLED;
  6252. }
  6253. /* Send a message CDB to the firmware. Careful, this only works
  6254. * in simple mode, not performant mode due to the tag lookup.
  6255. * We only ever use this immediately after a controller reset.
  6256. */
  6257. static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
  6258. unsigned char type)
  6259. {
  6260. struct Command {
  6261. struct CommandListHeader CommandHeader;
  6262. struct RequestBlock Request;
  6263. struct ErrDescriptor ErrorDescriptor;
  6264. };
  6265. struct Command *cmd;
  6266. static const size_t cmd_sz = sizeof(*cmd) +
  6267. sizeof(cmd->ErrorDescriptor);
  6268. dma_addr_t paddr64;
  6269. __le32 paddr32;
  6270. u32 tag;
  6271. void __iomem *vaddr;
  6272. int i, err;
  6273. vaddr = pci_ioremap_bar(pdev, 0);
  6274. if (vaddr == NULL)
  6275. return -ENOMEM;
  6276. /* The Inbound Post Queue only accepts 32-bit physical addresses for the
  6277. * CCISS commands, so they must be allocated from the lower 4GiB of
  6278. * memory.
  6279. */
  6280. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  6281. if (err) {
  6282. iounmap(vaddr);
  6283. return err;
  6284. }
  6285. cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
  6286. if (cmd == NULL) {
  6287. iounmap(vaddr);
  6288. return -ENOMEM;
  6289. }
  6290. /* This must fit, because of the 32-bit consistent DMA mask. Also,
  6291. * although there's no guarantee, we assume that the address is at
  6292. * least 4-byte aligned (most likely, it's page-aligned).
  6293. */
  6294. paddr32 = cpu_to_le32(paddr64);
  6295. cmd->CommandHeader.ReplyQueue = 0;
  6296. cmd->CommandHeader.SGList = 0;
  6297. cmd->CommandHeader.SGTotal = cpu_to_le16(0);
  6298. cmd->CommandHeader.tag = cpu_to_le64(paddr64);
  6299. memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
  6300. cmd->Request.CDBLen = 16;
  6301. cmd->Request.type_attr_dir =
  6302. TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
  6303. cmd->Request.Timeout = 0; /* Don't time out */
  6304. cmd->Request.CDB[0] = opcode;
  6305. cmd->Request.CDB[1] = type;
  6306. memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
  6307. cmd->ErrorDescriptor.Addr =
  6308. cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
  6309. cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
  6310. writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
  6311. for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
  6312. tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
  6313. if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
  6314. break;
  6315. msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
  6316. }
  6317. iounmap(vaddr);
  6318. /* we leak the DMA buffer here ... no choice since the controller could
  6319. * still complete the command.
  6320. */
  6321. if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
  6322. dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
  6323. opcode, type);
  6324. return -ETIMEDOUT;
  6325. }
  6326. pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
  6327. if (tag & HPSA_ERROR_BIT) {
  6328. dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
  6329. opcode, type);
  6330. return -EIO;
  6331. }
  6332. dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
  6333. opcode, type);
  6334. return 0;
  6335. }
  6336. #define hpsa_noop(p) hpsa_message(p, 3, 0)
  6337. static int hpsa_controller_hard_reset(struct pci_dev *pdev,
  6338. void __iomem *vaddr, u32 use_doorbell)
  6339. {
  6340. if (use_doorbell) {
  6341. /* For everything after the P600, the PCI power state method
  6342. * of resetting the controller doesn't work, so we have this
  6343. * other way using the doorbell register.
  6344. */
  6345. dev_info(&pdev->dev, "using doorbell to reset controller\n");
  6346. writel(use_doorbell, vaddr + SA5_DOORBELL);
  6347. /* PMC hardware guys tell us we need a 10 second delay after
  6348. * doorbell reset and before any attempt to talk to the board
  6349. * at all to ensure that this actually works and doesn't fall
  6350. * over in some weird corner cases.
  6351. */
  6352. msleep(10000);
  6353. } else { /* Try to do it the PCI power state way */
  6354. /* Quoting from the Open CISS Specification: "The Power
  6355. * Management Control/Status Register (CSR) controls the power
  6356. * state of the device. The normal operating state is D0,
  6357. * CSR=00h. The software off state is D3, CSR=03h. To reset
  6358. * the controller, place the interface device in D3 then to D0,
  6359. * this causes a secondary PCI reset which will reset the
  6360. * controller." */
  6361. int rc = 0;
  6362. dev_info(&pdev->dev, "using PCI PM to reset controller\n");
  6363. /* enter the D3hot power management state */
  6364. rc = pci_set_power_state(pdev, PCI_D3hot);
  6365. if (rc)
  6366. return rc;
  6367. msleep(500);
  6368. /* enter the D0 power management state */
  6369. rc = pci_set_power_state(pdev, PCI_D0);
  6370. if (rc)
  6371. return rc;
  6372. /*
  6373. * The P600 requires a small delay when changing states.
  6374. * Otherwise we may think the board did not reset and we bail.
  6375. * This for kdump only and is particular to the P600.
  6376. */
  6377. msleep(500);
  6378. }
  6379. return 0;
  6380. }
  6381. static void init_driver_version(char *driver_version, int len)
  6382. {
  6383. memset(driver_version, 0, len);
  6384. strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
  6385. }
  6386. static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
  6387. {
  6388. char *driver_version;
  6389. int i, size = sizeof(cfgtable->driver_version);
  6390. driver_version = kmalloc(size, GFP_KERNEL);
  6391. if (!driver_version)
  6392. return -ENOMEM;
  6393. init_driver_version(driver_version, size);
  6394. for (i = 0; i < size; i++)
  6395. writeb(driver_version[i], &cfgtable->driver_version[i]);
  6396. kfree(driver_version);
  6397. return 0;
  6398. }
  6399. static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
  6400. unsigned char *driver_ver)
  6401. {
  6402. int i;
  6403. for (i = 0; i < sizeof(cfgtable->driver_version); i++)
  6404. driver_ver[i] = readb(&cfgtable->driver_version[i]);
  6405. }
  6406. static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
  6407. {
  6408. char *driver_ver, *old_driver_ver;
  6409. int rc, size = sizeof(cfgtable->driver_version);
  6410. old_driver_ver = kmalloc_array(2, size, GFP_KERNEL);
  6411. if (!old_driver_ver)
  6412. return -ENOMEM;
  6413. driver_ver = old_driver_ver + size;
  6414. /* After a reset, the 32 bytes of "driver version" in the cfgtable
  6415. * should have been changed, otherwise we know the reset failed.
  6416. */
  6417. init_driver_version(old_driver_ver, size);
  6418. read_driver_ver_from_cfgtable(cfgtable, driver_ver);
  6419. rc = !memcmp(driver_ver, old_driver_ver, size);
  6420. kfree(old_driver_ver);
  6421. return rc;
  6422. }
  6423. /* This does a hard reset of the controller using PCI power management
  6424. * states or the using the doorbell register.
  6425. */
  6426. static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
  6427. {
  6428. u64 cfg_offset;
  6429. u32 cfg_base_addr;
  6430. u64 cfg_base_addr_index;
  6431. void __iomem *vaddr;
  6432. unsigned long paddr;
  6433. u32 misc_fw_support;
  6434. int rc;
  6435. struct CfgTable __iomem *cfgtable;
  6436. u32 use_doorbell;
  6437. u16 command_register;
  6438. /* For controllers as old as the P600, this is very nearly
  6439. * the same thing as
  6440. *
  6441. * pci_save_state(pci_dev);
  6442. * pci_set_power_state(pci_dev, PCI_D3hot);
  6443. * pci_set_power_state(pci_dev, PCI_D0);
  6444. * pci_restore_state(pci_dev);
  6445. *
  6446. * For controllers newer than the P600, the pci power state
  6447. * method of resetting doesn't work so we have another way
  6448. * using the doorbell register.
  6449. */
  6450. if (!ctlr_is_resettable(board_id)) {
  6451. dev_warn(&pdev->dev, "Controller not resettable\n");
  6452. return -ENODEV;
  6453. }
  6454. /* if controller is soft- but not hard resettable... */
  6455. if (!ctlr_is_hard_resettable(board_id))
  6456. return -ENOTSUPP; /* try soft reset later. */
  6457. /* Save the PCI command register */
  6458. pci_read_config_word(pdev, 4, &command_register);
  6459. pci_save_state(pdev);
  6460. /* find the first memory BAR, so we can find the cfg table */
  6461. rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
  6462. if (rc)
  6463. return rc;
  6464. vaddr = remap_pci_mem(paddr, 0x250);
  6465. if (!vaddr)
  6466. return -ENOMEM;
  6467. /* find cfgtable in order to check if reset via doorbell is supported */
  6468. rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
  6469. &cfg_base_addr_index, &cfg_offset);
  6470. if (rc)
  6471. goto unmap_vaddr;
  6472. cfgtable = remap_pci_mem(pci_resource_start(pdev,
  6473. cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
  6474. if (!cfgtable) {
  6475. rc = -ENOMEM;
  6476. goto unmap_vaddr;
  6477. }
  6478. rc = write_driver_ver_to_cfgtable(cfgtable);
  6479. if (rc)
  6480. goto unmap_cfgtable;
  6481. /* If reset via doorbell register is supported, use that.
  6482. * There are two such methods. Favor the newest method.
  6483. */
  6484. misc_fw_support = readl(&cfgtable->misc_fw_support);
  6485. use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
  6486. if (use_doorbell) {
  6487. use_doorbell = DOORBELL_CTLR_RESET2;
  6488. } else {
  6489. use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
  6490. if (use_doorbell) {
  6491. dev_warn(&pdev->dev,
  6492. "Soft reset not supported. Firmware update is required.\n");
  6493. rc = -ENOTSUPP; /* try soft reset */
  6494. goto unmap_cfgtable;
  6495. }
  6496. }
  6497. rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
  6498. if (rc)
  6499. goto unmap_cfgtable;
  6500. pci_restore_state(pdev);
  6501. pci_write_config_word(pdev, 4, command_register);
  6502. /* Some devices (notably the HP Smart Array 5i Controller)
  6503. need a little pause here */
  6504. msleep(HPSA_POST_RESET_PAUSE_MSECS);
  6505. rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
  6506. if (rc) {
  6507. dev_warn(&pdev->dev,
  6508. "Failed waiting for board to become ready after hard reset\n");
  6509. goto unmap_cfgtable;
  6510. }
  6511. rc = controller_reset_failed(vaddr);
  6512. if (rc < 0)
  6513. goto unmap_cfgtable;
  6514. if (rc) {
  6515. dev_warn(&pdev->dev, "Unable to successfully reset "
  6516. "controller. Will try soft reset.\n");
  6517. rc = -ENOTSUPP;
  6518. } else {
  6519. dev_info(&pdev->dev, "board ready after hard reset.\n");
  6520. }
  6521. unmap_cfgtable:
  6522. iounmap(cfgtable);
  6523. unmap_vaddr:
  6524. iounmap(vaddr);
  6525. return rc;
  6526. }
  6527. /*
  6528. * We cannot read the structure directly, for portability we must use
  6529. * the io functions.
  6530. * This is for debug only.
  6531. */
  6532. static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
  6533. {
  6534. #ifdef HPSA_DEBUG
  6535. int i;
  6536. char temp_name[17];
  6537. dev_info(dev, "Controller Configuration information\n");
  6538. dev_info(dev, "------------------------------------\n");
  6539. for (i = 0; i < 4; i++)
  6540. temp_name[i] = readb(&(tb->Signature[i]));
  6541. temp_name[4] = '\0';
  6542. dev_info(dev, " Signature = %s\n", temp_name);
  6543. dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
  6544. dev_info(dev, " Transport methods supported = 0x%x\n",
  6545. readl(&(tb->TransportSupport)));
  6546. dev_info(dev, " Transport methods active = 0x%x\n",
  6547. readl(&(tb->TransportActive)));
  6548. dev_info(dev, " Requested transport Method = 0x%x\n",
  6549. readl(&(tb->HostWrite.TransportRequest)));
  6550. dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
  6551. readl(&(tb->HostWrite.CoalIntDelay)));
  6552. dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
  6553. readl(&(tb->HostWrite.CoalIntCount)));
  6554. dev_info(dev, " Max outstanding commands = %d\n",
  6555. readl(&(tb->CmdsOutMax)));
  6556. dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
  6557. for (i = 0; i < 16; i++)
  6558. temp_name[i] = readb(&(tb->ServerName[i]));
  6559. temp_name[16] = '\0';
  6560. dev_info(dev, " Server Name = %s\n", temp_name);
  6561. dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
  6562. readl(&(tb->HeartBeat)));
  6563. #endif /* HPSA_DEBUG */
  6564. }
  6565. static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
  6566. {
  6567. int i, offset, mem_type, bar_type;
  6568. if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
  6569. return 0;
  6570. offset = 0;
  6571. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  6572. bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
  6573. if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
  6574. offset += 4;
  6575. else {
  6576. mem_type = pci_resource_flags(pdev, i) &
  6577. PCI_BASE_ADDRESS_MEM_TYPE_MASK;
  6578. switch (mem_type) {
  6579. case PCI_BASE_ADDRESS_MEM_TYPE_32:
  6580. case PCI_BASE_ADDRESS_MEM_TYPE_1M:
  6581. offset += 4; /* 32 bit */
  6582. break;
  6583. case PCI_BASE_ADDRESS_MEM_TYPE_64:
  6584. offset += 8;
  6585. break;
  6586. default: /* reserved in PCI 2.2 */
  6587. dev_warn(&pdev->dev,
  6588. "base address is invalid\n");
  6589. return -1;
  6590. break;
  6591. }
  6592. }
  6593. if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
  6594. return i + 1;
  6595. }
  6596. return -1;
  6597. }
  6598. static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
  6599. {
  6600. pci_free_irq_vectors(h->pdev);
  6601. h->msix_vectors = 0;
  6602. }
  6603. static void hpsa_setup_reply_map(struct ctlr_info *h)
  6604. {
  6605. const struct cpumask *mask;
  6606. unsigned int queue, cpu;
  6607. for (queue = 0; queue < h->msix_vectors; queue++) {
  6608. mask = pci_irq_get_affinity(h->pdev, queue);
  6609. if (!mask)
  6610. goto fallback;
  6611. for_each_cpu(cpu, mask)
  6612. h->reply_map[cpu] = queue;
  6613. }
  6614. return;
  6615. fallback:
  6616. for_each_possible_cpu(cpu)
  6617. h->reply_map[cpu] = 0;
  6618. }
  6619. /* If MSI/MSI-X is supported by the kernel we will try to enable it on
  6620. * controllers that are capable. If not, we use legacy INTx mode.
  6621. */
  6622. static int hpsa_interrupt_mode(struct ctlr_info *h)
  6623. {
  6624. unsigned int flags = PCI_IRQ_LEGACY;
  6625. int ret;
  6626. /* Some boards advertise MSI but don't really support it */
  6627. switch (h->board_id) {
  6628. case 0x40700E11:
  6629. case 0x40800E11:
  6630. case 0x40820E11:
  6631. case 0x40830E11:
  6632. break;
  6633. default:
  6634. ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
  6635. PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
  6636. if (ret > 0) {
  6637. h->msix_vectors = ret;
  6638. return 0;
  6639. }
  6640. flags |= PCI_IRQ_MSI;
  6641. break;
  6642. }
  6643. ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
  6644. if (ret < 0)
  6645. return ret;
  6646. return 0;
  6647. }
  6648. static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
  6649. bool *legacy_board)
  6650. {
  6651. int i;
  6652. u32 subsystem_vendor_id, subsystem_device_id;
  6653. subsystem_vendor_id = pdev->subsystem_vendor;
  6654. subsystem_device_id = pdev->subsystem_device;
  6655. *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
  6656. subsystem_vendor_id;
  6657. if (legacy_board)
  6658. *legacy_board = false;
  6659. for (i = 0; i < ARRAY_SIZE(products); i++)
  6660. if (*board_id == products[i].board_id) {
  6661. if (products[i].access != &SA5A_access &&
  6662. products[i].access != &SA5B_access)
  6663. return i;
  6664. dev_warn(&pdev->dev,
  6665. "legacy board ID: 0x%08x\n",
  6666. *board_id);
  6667. if (legacy_board)
  6668. *legacy_board = true;
  6669. return i;
  6670. }
  6671. dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
  6672. if (legacy_board)
  6673. *legacy_board = true;
  6674. return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
  6675. }
  6676. static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
  6677. unsigned long *memory_bar)
  6678. {
  6679. int i;
  6680. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
  6681. if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
  6682. /* addressing mode bits already removed */
  6683. *memory_bar = pci_resource_start(pdev, i);
  6684. dev_dbg(&pdev->dev, "memory BAR = %lx\n",
  6685. *memory_bar);
  6686. return 0;
  6687. }
  6688. dev_warn(&pdev->dev, "no memory BAR found\n");
  6689. return -ENODEV;
  6690. }
  6691. static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
  6692. int wait_for_ready)
  6693. {
  6694. int i, iterations;
  6695. u32 scratchpad;
  6696. if (wait_for_ready)
  6697. iterations = HPSA_BOARD_READY_ITERATIONS;
  6698. else
  6699. iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
  6700. for (i = 0; i < iterations; i++) {
  6701. scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
  6702. if (wait_for_ready) {
  6703. if (scratchpad == HPSA_FIRMWARE_READY)
  6704. return 0;
  6705. } else {
  6706. if (scratchpad != HPSA_FIRMWARE_READY)
  6707. return 0;
  6708. }
  6709. msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
  6710. }
  6711. dev_warn(&pdev->dev, "board not ready, timed out.\n");
  6712. return -ENODEV;
  6713. }
  6714. static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
  6715. u32 *cfg_base_addr, u64 *cfg_base_addr_index,
  6716. u64 *cfg_offset)
  6717. {
  6718. *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
  6719. *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
  6720. *cfg_base_addr &= (u32) 0x0000ffff;
  6721. *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
  6722. if (*cfg_base_addr_index == -1) {
  6723. dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
  6724. return -ENODEV;
  6725. }
  6726. return 0;
  6727. }
  6728. static void hpsa_free_cfgtables(struct ctlr_info *h)
  6729. {
  6730. if (h->transtable) {
  6731. iounmap(h->transtable);
  6732. h->transtable = NULL;
  6733. }
  6734. if (h->cfgtable) {
  6735. iounmap(h->cfgtable);
  6736. h->cfgtable = NULL;
  6737. }
  6738. }
  6739. /* Find and map CISS config table and transfer table
  6740. + * several items must be unmapped (freed) later
  6741. + * */
  6742. static int hpsa_find_cfgtables(struct ctlr_info *h)
  6743. {
  6744. u64 cfg_offset;
  6745. u32 cfg_base_addr;
  6746. u64 cfg_base_addr_index;
  6747. u32 trans_offset;
  6748. int rc;
  6749. rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
  6750. &cfg_base_addr_index, &cfg_offset);
  6751. if (rc)
  6752. return rc;
  6753. h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
  6754. cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
  6755. if (!h->cfgtable) {
  6756. dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
  6757. return -ENOMEM;
  6758. }
  6759. rc = write_driver_ver_to_cfgtable(h->cfgtable);
  6760. if (rc)
  6761. return rc;
  6762. /* Find performant mode table. */
  6763. trans_offset = readl(&h->cfgtable->TransMethodOffset);
  6764. h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
  6765. cfg_base_addr_index)+cfg_offset+trans_offset,
  6766. sizeof(*h->transtable));
  6767. if (!h->transtable) {
  6768. dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
  6769. hpsa_free_cfgtables(h);
  6770. return -ENOMEM;
  6771. }
  6772. return 0;
  6773. }
  6774. static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
  6775. {
  6776. #define MIN_MAX_COMMANDS 16
  6777. BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
  6778. h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
  6779. /* Limit commands in memory limited kdump scenario. */
  6780. if (reset_devices && h->max_commands > 32)
  6781. h->max_commands = 32;
  6782. if (h->max_commands < MIN_MAX_COMMANDS) {
  6783. dev_warn(&h->pdev->dev,
  6784. "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
  6785. h->max_commands,
  6786. MIN_MAX_COMMANDS);
  6787. h->max_commands = MIN_MAX_COMMANDS;
  6788. }
  6789. }
  6790. /* If the controller reports that the total max sg entries is greater than 512,
  6791. * then we know that chained SG blocks work. (Original smart arrays did not
  6792. * support chained SG blocks and would return zero for max sg entries.)
  6793. */
  6794. static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
  6795. {
  6796. return h->maxsgentries > 512;
  6797. }
  6798. /* Interrogate the hardware for some limits:
  6799. * max commands, max SG elements without chaining, and with chaining,
  6800. * SG chain block size, etc.
  6801. */
  6802. static void hpsa_find_board_params(struct ctlr_info *h)
  6803. {
  6804. hpsa_get_max_perf_mode_cmds(h);
  6805. h->nr_cmds = h->max_commands;
  6806. h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
  6807. h->fw_support = readl(&(h->cfgtable->misc_fw_support));
  6808. if (hpsa_supports_chained_sg_blocks(h)) {
  6809. /* Limit in-command s/g elements to 32 save dma'able memory. */
  6810. h->max_cmd_sg_entries = 32;
  6811. h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
  6812. h->maxsgentries--; /* save one for chain pointer */
  6813. } else {
  6814. /*
  6815. * Original smart arrays supported at most 31 s/g entries
  6816. * embedded inline in the command (trying to use more
  6817. * would lock up the controller)
  6818. */
  6819. h->max_cmd_sg_entries = 31;
  6820. h->maxsgentries = 31; /* default to traditional values */
  6821. h->chainsize = 0;
  6822. }
  6823. /* Find out what task management functions are supported and cache */
  6824. h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
  6825. if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
  6826. dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
  6827. if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
  6828. dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
  6829. if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
  6830. dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
  6831. }
  6832. static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
  6833. {
  6834. if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
  6835. dev_err(&h->pdev->dev, "not a valid CISS config table\n");
  6836. return false;
  6837. }
  6838. return true;
  6839. }
  6840. static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
  6841. {
  6842. u32 driver_support;
  6843. driver_support = readl(&(h->cfgtable->driver_support));
  6844. /* Need to enable prefetch in the SCSI core for 6400 in x86 */
  6845. #ifdef CONFIG_X86
  6846. driver_support |= ENABLE_SCSI_PREFETCH;
  6847. #endif
  6848. driver_support |= ENABLE_UNIT_ATTN;
  6849. writel(driver_support, &(h->cfgtable->driver_support));
  6850. }
  6851. /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
  6852. * in a prefetch beyond physical memory.
  6853. */
  6854. static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
  6855. {
  6856. u32 dma_prefetch;
  6857. if (h->board_id != 0x3225103C)
  6858. return;
  6859. dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
  6860. dma_prefetch |= 0x8000;
  6861. writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
  6862. }
  6863. static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
  6864. {
  6865. int i;
  6866. u32 doorbell_value;
  6867. unsigned long flags;
  6868. /* wait until the clear_event_notify bit 6 is cleared by controller. */
  6869. for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
  6870. spin_lock_irqsave(&h->lock, flags);
  6871. doorbell_value = readl(h->vaddr + SA5_DOORBELL);
  6872. spin_unlock_irqrestore(&h->lock, flags);
  6873. if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
  6874. goto done;
  6875. /* delay and try again */
  6876. msleep(CLEAR_EVENT_WAIT_INTERVAL);
  6877. }
  6878. return -ENODEV;
  6879. done:
  6880. return 0;
  6881. }
  6882. static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
  6883. {
  6884. int i;
  6885. u32 doorbell_value;
  6886. unsigned long flags;
  6887. /* under certain very rare conditions, this can take awhile.
  6888. * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
  6889. * as we enter this code.)
  6890. */
  6891. for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
  6892. if (h->remove_in_progress)
  6893. goto done;
  6894. spin_lock_irqsave(&h->lock, flags);
  6895. doorbell_value = readl(h->vaddr + SA5_DOORBELL);
  6896. spin_unlock_irqrestore(&h->lock, flags);
  6897. if (!(doorbell_value & CFGTBL_ChangeReq))
  6898. goto done;
  6899. /* delay and try again */
  6900. msleep(MODE_CHANGE_WAIT_INTERVAL);
  6901. }
  6902. return -ENODEV;
  6903. done:
  6904. return 0;
  6905. }
  6906. /* return -ENODEV or other reason on error, 0 on success */
  6907. static int hpsa_enter_simple_mode(struct ctlr_info *h)
  6908. {
  6909. u32 trans_support;
  6910. trans_support = readl(&(h->cfgtable->TransportSupport));
  6911. if (!(trans_support & SIMPLE_MODE))
  6912. return -ENOTSUPP;
  6913. h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
  6914. /* Update the field, and then ring the doorbell */
  6915. writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
  6916. writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
  6917. writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
  6918. if (hpsa_wait_for_mode_change_ack(h))
  6919. goto error;
  6920. print_cfg_table(&h->pdev->dev, h->cfgtable);
  6921. if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
  6922. goto error;
  6923. h->transMethod = CFGTBL_Trans_Simple;
  6924. return 0;
  6925. error:
  6926. dev_err(&h->pdev->dev, "failed to enter simple mode\n");
  6927. return -ENODEV;
  6928. }
  6929. /* free items allocated or mapped by hpsa_pci_init */
  6930. static void hpsa_free_pci_init(struct ctlr_info *h)
  6931. {
  6932. hpsa_free_cfgtables(h); /* pci_init 4 */
  6933. iounmap(h->vaddr); /* pci_init 3 */
  6934. h->vaddr = NULL;
  6935. hpsa_disable_interrupt_mode(h); /* pci_init 2 */
  6936. /*
  6937. * call pci_disable_device before pci_release_regions per
  6938. * Documentation/PCI/pci.txt
  6939. */
  6940. pci_disable_device(h->pdev); /* pci_init 1 */
  6941. pci_release_regions(h->pdev); /* pci_init 2 */
  6942. }
  6943. /* several items must be freed later */
  6944. static int hpsa_pci_init(struct ctlr_info *h)
  6945. {
  6946. int prod_index, err;
  6947. bool legacy_board;
  6948. prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
  6949. if (prod_index < 0)
  6950. return prod_index;
  6951. h->product_name = products[prod_index].product_name;
  6952. h->access = *(products[prod_index].access);
  6953. h->legacy_board = legacy_board;
  6954. pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
  6955. PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
  6956. err = pci_enable_device(h->pdev);
  6957. if (err) {
  6958. dev_err(&h->pdev->dev, "failed to enable PCI device\n");
  6959. pci_disable_device(h->pdev);
  6960. return err;
  6961. }
  6962. err = pci_request_regions(h->pdev, HPSA);
  6963. if (err) {
  6964. dev_err(&h->pdev->dev,
  6965. "failed to obtain PCI resources\n");
  6966. pci_disable_device(h->pdev);
  6967. return err;
  6968. }
  6969. pci_set_master(h->pdev);
  6970. err = hpsa_interrupt_mode(h);
  6971. if (err)
  6972. goto clean1;
  6973. /* setup mapping between CPU and reply queue */
  6974. hpsa_setup_reply_map(h);
  6975. err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
  6976. if (err)
  6977. goto clean2; /* intmode+region, pci */
  6978. h->vaddr = remap_pci_mem(h->paddr, 0x250);
  6979. if (!h->vaddr) {
  6980. dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
  6981. err = -ENOMEM;
  6982. goto clean2; /* intmode+region, pci */
  6983. }
  6984. err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
  6985. if (err)
  6986. goto clean3; /* vaddr, intmode+region, pci */
  6987. err = hpsa_find_cfgtables(h);
  6988. if (err)
  6989. goto clean3; /* vaddr, intmode+region, pci */
  6990. hpsa_find_board_params(h);
  6991. if (!hpsa_CISS_signature_present(h)) {
  6992. err = -ENODEV;
  6993. goto clean4; /* cfgtables, vaddr, intmode+region, pci */
  6994. }
  6995. hpsa_set_driver_support_bits(h);
  6996. hpsa_p600_dma_prefetch_quirk(h);
  6997. err = hpsa_enter_simple_mode(h);
  6998. if (err)
  6999. goto clean4; /* cfgtables, vaddr, intmode+region, pci */
  7000. return 0;
  7001. clean4: /* cfgtables, vaddr, intmode+region, pci */
  7002. hpsa_free_cfgtables(h);
  7003. clean3: /* vaddr, intmode+region, pci */
  7004. iounmap(h->vaddr);
  7005. h->vaddr = NULL;
  7006. clean2: /* intmode+region, pci */
  7007. hpsa_disable_interrupt_mode(h);
  7008. clean1:
  7009. /*
  7010. * call pci_disable_device before pci_release_regions per
  7011. * Documentation/PCI/pci.txt
  7012. */
  7013. pci_disable_device(h->pdev);
  7014. pci_release_regions(h->pdev);
  7015. return err;
  7016. }
  7017. static void hpsa_hba_inquiry(struct ctlr_info *h)
  7018. {
  7019. int rc;
  7020. #define HBA_INQUIRY_BYTE_COUNT 64
  7021. h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
  7022. if (!h->hba_inquiry_data)
  7023. return;
  7024. rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
  7025. h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
  7026. if (rc != 0) {
  7027. kfree(h->hba_inquiry_data);
  7028. h->hba_inquiry_data = NULL;
  7029. }
  7030. }
  7031. static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
  7032. {
  7033. int rc, i;
  7034. void __iomem *vaddr;
  7035. if (!reset_devices)
  7036. return 0;
  7037. /* kdump kernel is loading, we don't know in which state is
  7038. * the pci interface. The dev->enable_cnt is equal zero
  7039. * so we call enable+disable, wait a while and switch it on.
  7040. */
  7041. rc = pci_enable_device(pdev);
  7042. if (rc) {
  7043. dev_warn(&pdev->dev, "Failed to enable PCI device\n");
  7044. return -ENODEV;
  7045. }
  7046. pci_disable_device(pdev);
  7047. msleep(260); /* a randomly chosen number */
  7048. rc = pci_enable_device(pdev);
  7049. if (rc) {
  7050. dev_warn(&pdev->dev, "failed to enable device.\n");
  7051. return -ENODEV;
  7052. }
  7053. pci_set_master(pdev);
  7054. vaddr = pci_ioremap_bar(pdev, 0);
  7055. if (vaddr == NULL) {
  7056. rc = -ENOMEM;
  7057. goto out_disable;
  7058. }
  7059. writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
  7060. iounmap(vaddr);
  7061. /* Reset the controller with a PCI power-cycle or via doorbell */
  7062. rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
  7063. /* -ENOTSUPP here means we cannot reset the controller
  7064. * but it's already (and still) up and running in
  7065. * "performant mode". Or, it might be 640x, which can't reset
  7066. * due to concerns about shared bbwc between 6402/6404 pair.
  7067. */
  7068. if (rc)
  7069. goto out_disable;
  7070. /* Now try to get the controller to respond to a no-op */
  7071. dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
  7072. for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
  7073. if (hpsa_noop(pdev) == 0)
  7074. break;
  7075. else
  7076. dev_warn(&pdev->dev, "no-op failed%s\n",
  7077. (i < 11 ? "; re-trying" : ""));
  7078. }
  7079. out_disable:
  7080. pci_disable_device(pdev);
  7081. return rc;
  7082. }
  7083. static void hpsa_free_cmd_pool(struct ctlr_info *h)
  7084. {
  7085. kfree(h->cmd_pool_bits);
  7086. h->cmd_pool_bits = NULL;
  7087. if (h->cmd_pool) {
  7088. pci_free_consistent(h->pdev,
  7089. h->nr_cmds * sizeof(struct CommandList),
  7090. h->cmd_pool,
  7091. h->cmd_pool_dhandle);
  7092. h->cmd_pool = NULL;
  7093. h->cmd_pool_dhandle = 0;
  7094. }
  7095. if (h->errinfo_pool) {
  7096. pci_free_consistent(h->pdev,
  7097. h->nr_cmds * sizeof(struct ErrorInfo),
  7098. h->errinfo_pool,
  7099. h->errinfo_pool_dhandle);
  7100. h->errinfo_pool = NULL;
  7101. h->errinfo_pool_dhandle = 0;
  7102. }
  7103. }
  7104. static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
  7105. {
  7106. h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
  7107. sizeof(unsigned long),
  7108. GFP_KERNEL);
  7109. h->cmd_pool = pci_alloc_consistent(h->pdev,
  7110. h->nr_cmds * sizeof(*h->cmd_pool),
  7111. &(h->cmd_pool_dhandle));
  7112. h->errinfo_pool = pci_alloc_consistent(h->pdev,
  7113. h->nr_cmds * sizeof(*h->errinfo_pool),
  7114. &(h->errinfo_pool_dhandle));
  7115. if ((h->cmd_pool_bits == NULL)
  7116. || (h->cmd_pool == NULL)
  7117. || (h->errinfo_pool == NULL)) {
  7118. dev_err(&h->pdev->dev, "out of memory in %s", __func__);
  7119. goto clean_up;
  7120. }
  7121. hpsa_preinitialize_commands(h);
  7122. return 0;
  7123. clean_up:
  7124. hpsa_free_cmd_pool(h);
  7125. return -ENOMEM;
  7126. }
  7127. /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
  7128. static void hpsa_free_irqs(struct ctlr_info *h)
  7129. {
  7130. int i;
  7131. if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
  7132. /* Single reply queue, only one irq to free */
  7133. free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
  7134. h->q[h->intr_mode] = 0;
  7135. return;
  7136. }
  7137. for (i = 0; i < h->msix_vectors; i++) {
  7138. free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
  7139. h->q[i] = 0;
  7140. }
  7141. for (; i < MAX_REPLY_QUEUES; i++)
  7142. h->q[i] = 0;
  7143. }
  7144. /* returns 0 on success; cleans up and returns -Enn on error */
  7145. static int hpsa_request_irqs(struct ctlr_info *h,
  7146. irqreturn_t (*msixhandler)(int, void *),
  7147. irqreturn_t (*intxhandler)(int, void *))
  7148. {
  7149. int rc, i;
  7150. /*
  7151. * initialize h->q[x] = x so that interrupt handlers know which
  7152. * queue to process.
  7153. */
  7154. for (i = 0; i < MAX_REPLY_QUEUES; i++)
  7155. h->q[i] = (u8) i;
  7156. if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
  7157. /* If performant mode and MSI-X, use multiple reply queues */
  7158. for (i = 0; i < h->msix_vectors; i++) {
  7159. sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
  7160. rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
  7161. 0, h->intrname[i],
  7162. &h->q[i]);
  7163. if (rc) {
  7164. int j;
  7165. dev_err(&h->pdev->dev,
  7166. "failed to get irq %d for %s\n",
  7167. pci_irq_vector(h->pdev, i), h->devname);
  7168. for (j = 0; j < i; j++) {
  7169. free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
  7170. h->q[j] = 0;
  7171. }
  7172. for (; j < MAX_REPLY_QUEUES; j++)
  7173. h->q[j] = 0;
  7174. return rc;
  7175. }
  7176. }
  7177. } else {
  7178. /* Use single reply pool */
  7179. if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
  7180. sprintf(h->intrname[0], "%s-msi%s", h->devname,
  7181. h->msix_vectors ? "x" : "");
  7182. rc = request_irq(pci_irq_vector(h->pdev, 0),
  7183. msixhandler, 0,
  7184. h->intrname[0],
  7185. &h->q[h->intr_mode]);
  7186. } else {
  7187. sprintf(h->intrname[h->intr_mode],
  7188. "%s-intx", h->devname);
  7189. rc = request_irq(pci_irq_vector(h->pdev, 0),
  7190. intxhandler, IRQF_SHARED,
  7191. h->intrname[0],
  7192. &h->q[h->intr_mode]);
  7193. }
  7194. }
  7195. if (rc) {
  7196. dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
  7197. pci_irq_vector(h->pdev, 0), h->devname);
  7198. hpsa_free_irqs(h);
  7199. return -ENODEV;
  7200. }
  7201. return 0;
  7202. }
  7203. static int hpsa_kdump_soft_reset(struct ctlr_info *h)
  7204. {
  7205. int rc;
  7206. hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
  7207. dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
  7208. rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
  7209. if (rc) {
  7210. dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
  7211. return rc;
  7212. }
  7213. dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
  7214. rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
  7215. if (rc) {
  7216. dev_warn(&h->pdev->dev, "Board failed to become ready "
  7217. "after soft reset.\n");
  7218. return rc;
  7219. }
  7220. return 0;
  7221. }
  7222. static void hpsa_free_reply_queues(struct ctlr_info *h)
  7223. {
  7224. int i;
  7225. for (i = 0; i < h->nreply_queues; i++) {
  7226. if (!h->reply_queue[i].head)
  7227. continue;
  7228. pci_free_consistent(h->pdev,
  7229. h->reply_queue_size,
  7230. h->reply_queue[i].head,
  7231. h->reply_queue[i].busaddr);
  7232. h->reply_queue[i].head = NULL;
  7233. h->reply_queue[i].busaddr = 0;
  7234. }
  7235. h->reply_queue_size = 0;
  7236. }
  7237. static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
  7238. {
  7239. hpsa_free_performant_mode(h); /* init_one 7 */
  7240. hpsa_free_sg_chain_blocks(h); /* init_one 6 */
  7241. hpsa_free_cmd_pool(h); /* init_one 5 */
  7242. hpsa_free_irqs(h); /* init_one 4 */
  7243. scsi_host_put(h->scsi_host); /* init_one 3 */
  7244. h->scsi_host = NULL; /* init_one 3 */
  7245. hpsa_free_pci_init(h); /* init_one 2_5 */
  7246. free_percpu(h->lockup_detected); /* init_one 2 */
  7247. h->lockup_detected = NULL; /* init_one 2 */
  7248. if (h->resubmit_wq) {
  7249. destroy_workqueue(h->resubmit_wq); /* init_one 1 */
  7250. h->resubmit_wq = NULL;
  7251. }
  7252. if (h->rescan_ctlr_wq) {
  7253. destroy_workqueue(h->rescan_ctlr_wq);
  7254. h->rescan_ctlr_wq = NULL;
  7255. }
  7256. kfree(h); /* init_one 1 */
  7257. }
  7258. /* Called when controller lockup detected. */
  7259. static void fail_all_outstanding_cmds(struct ctlr_info *h)
  7260. {
  7261. int i, refcount;
  7262. struct CommandList *c;
  7263. int failcount = 0;
  7264. flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
  7265. for (i = 0; i < h->nr_cmds; i++) {
  7266. c = h->cmd_pool + i;
  7267. refcount = atomic_inc_return(&c->refcount);
  7268. if (refcount > 1) {
  7269. c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
  7270. finish_cmd(c);
  7271. atomic_dec(&h->commands_outstanding);
  7272. failcount++;
  7273. }
  7274. cmd_free(h, c);
  7275. }
  7276. dev_warn(&h->pdev->dev,
  7277. "failed %d commands in fail_all\n", failcount);
  7278. }
  7279. static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
  7280. {
  7281. int cpu;
  7282. for_each_online_cpu(cpu) {
  7283. u32 *lockup_detected;
  7284. lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
  7285. *lockup_detected = value;
  7286. }
  7287. wmb(); /* be sure the per-cpu variables are out to memory */
  7288. }
  7289. static void controller_lockup_detected(struct ctlr_info *h)
  7290. {
  7291. unsigned long flags;
  7292. u32 lockup_detected;
  7293. h->access.set_intr_mask(h, HPSA_INTR_OFF);
  7294. spin_lock_irqsave(&h->lock, flags);
  7295. lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
  7296. if (!lockup_detected) {
  7297. /* no heartbeat, but controller gave us a zero. */
  7298. dev_warn(&h->pdev->dev,
  7299. "lockup detected after %d but scratchpad register is zero\n",
  7300. h->heartbeat_sample_interval / HZ);
  7301. lockup_detected = 0xffffffff;
  7302. }
  7303. set_lockup_detected_for_all_cpus(h, lockup_detected);
  7304. spin_unlock_irqrestore(&h->lock, flags);
  7305. dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
  7306. lockup_detected, h->heartbeat_sample_interval / HZ);
  7307. if (lockup_detected == 0xffff0000) {
  7308. dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
  7309. writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
  7310. }
  7311. pci_disable_device(h->pdev);
  7312. fail_all_outstanding_cmds(h);
  7313. }
  7314. static int detect_controller_lockup(struct ctlr_info *h)
  7315. {
  7316. u64 now;
  7317. u32 heartbeat;
  7318. unsigned long flags;
  7319. now = get_jiffies_64();
  7320. /* If we've received an interrupt recently, we're ok. */
  7321. if (time_after64(h->last_intr_timestamp +
  7322. (h->heartbeat_sample_interval), now))
  7323. return false;
  7324. /*
  7325. * If we've already checked the heartbeat recently, we're ok.
  7326. * This could happen if someone sends us a signal. We
  7327. * otherwise don't care about signals in this thread.
  7328. */
  7329. if (time_after64(h->last_heartbeat_timestamp +
  7330. (h->heartbeat_sample_interval), now))
  7331. return false;
  7332. /* If heartbeat has not changed since we last looked, we're not ok. */
  7333. spin_lock_irqsave(&h->lock, flags);
  7334. heartbeat = readl(&h->cfgtable->HeartBeat);
  7335. spin_unlock_irqrestore(&h->lock, flags);
  7336. if (h->last_heartbeat == heartbeat) {
  7337. controller_lockup_detected(h);
  7338. return true;
  7339. }
  7340. /* We're ok. */
  7341. h->last_heartbeat = heartbeat;
  7342. h->last_heartbeat_timestamp = now;
  7343. return false;
  7344. }
  7345. /*
  7346. * Set ioaccel status for all ioaccel volumes.
  7347. *
  7348. * Called from monitor controller worker (hpsa_event_monitor_worker)
  7349. *
  7350. * A Volume (or Volumes that comprise an Array set) may be undergoing a
  7351. * transformation, so we will be turning off ioaccel for all volumes that
  7352. * make up the Array.
  7353. */
  7354. static void hpsa_set_ioaccel_status(struct ctlr_info *h)
  7355. {
  7356. int rc;
  7357. int i;
  7358. u8 ioaccel_status;
  7359. unsigned char *buf;
  7360. struct hpsa_scsi_dev_t *device;
  7361. if (!h)
  7362. return;
  7363. buf = kmalloc(64, GFP_KERNEL);
  7364. if (!buf)
  7365. return;
  7366. /*
  7367. * Run through current device list used during I/O requests.
  7368. */
  7369. for (i = 0; i < h->ndevices; i++) {
  7370. int offload_to_be_enabled = 0;
  7371. int offload_config = 0;
  7372. device = h->dev[i];
  7373. if (!device)
  7374. continue;
  7375. if (!hpsa_vpd_page_supported(h, device->scsi3addr,
  7376. HPSA_VPD_LV_IOACCEL_STATUS))
  7377. continue;
  7378. memset(buf, 0, 64);
  7379. rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
  7380. VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
  7381. buf, 64);
  7382. if (rc != 0)
  7383. continue;
  7384. ioaccel_status = buf[IOACCEL_STATUS_BYTE];
  7385. /*
  7386. * Check if offload is still configured on
  7387. */
  7388. offload_config =
  7389. !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
  7390. /*
  7391. * If offload is configured on, check to see if ioaccel
  7392. * needs to be enabled.
  7393. */
  7394. if (offload_config)
  7395. offload_to_be_enabled =
  7396. !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
  7397. /*
  7398. * If ioaccel is to be re-enabled, re-enable later during the
  7399. * scan operation so the driver can get a fresh raidmap
  7400. * before turning ioaccel back on.
  7401. */
  7402. if (offload_to_be_enabled)
  7403. continue;
  7404. /*
  7405. * Immediately turn off ioaccel for any volume the
  7406. * controller tells us to. Some of the reasons could be:
  7407. * transformation - change to the LVs of an Array.
  7408. * degraded volume - component failure
  7409. */
  7410. hpsa_turn_off_ioaccel_for_device(device);
  7411. }
  7412. kfree(buf);
  7413. }
  7414. static void hpsa_ack_ctlr_events(struct ctlr_info *h)
  7415. {
  7416. char *event_type;
  7417. if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
  7418. return;
  7419. /* Ask the controller to clear the events we're handling. */
  7420. if ((h->transMethod & (CFGTBL_Trans_io_accel1
  7421. | CFGTBL_Trans_io_accel2)) &&
  7422. (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
  7423. h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
  7424. if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
  7425. event_type = "state change";
  7426. if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
  7427. event_type = "configuration change";
  7428. /* Stop sending new RAID offload reqs via the IO accelerator */
  7429. scsi_block_requests(h->scsi_host);
  7430. hpsa_set_ioaccel_status(h);
  7431. hpsa_drain_accel_commands(h);
  7432. /* Set 'accelerator path config change' bit */
  7433. dev_warn(&h->pdev->dev,
  7434. "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
  7435. h->events, event_type);
  7436. writel(h->events, &(h->cfgtable->clear_event_notify));
  7437. /* Set the "clear event notify field update" bit 6 */
  7438. writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
  7439. /* Wait until ctlr clears 'clear event notify field', bit 6 */
  7440. hpsa_wait_for_clear_event_notify_ack(h);
  7441. scsi_unblock_requests(h->scsi_host);
  7442. } else {
  7443. /* Acknowledge controller notification events. */
  7444. writel(h->events, &(h->cfgtable->clear_event_notify));
  7445. writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
  7446. hpsa_wait_for_clear_event_notify_ack(h);
  7447. }
  7448. return;
  7449. }
  7450. /* Check a register on the controller to see if there are configuration
  7451. * changes (added/changed/removed logical drives, etc.) which mean that
  7452. * we should rescan the controller for devices.
  7453. * Also check flag for driver-initiated rescan.
  7454. */
  7455. static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
  7456. {
  7457. if (h->drv_req_rescan) {
  7458. h->drv_req_rescan = 0;
  7459. return 1;
  7460. }
  7461. if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
  7462. return 0;
  7463. h->events = readl(&(h->cfgtable->event_notify));
  7464. return h->events & RESCAN_REQUIRED_EVENT_BITS;
  7465. }
  7466. /*
  7467. * Check if any of the offline devices have become ready
  7468. */
  7469. static int hpsa_offline_devices_ready(struct ctlr_info *h)
  7470. {
  7471. unsigned long flags;
  7472. struct offline_device_entry *d;
  7473. struct list_head *this, *tmp;
  7474. spin_lock_irqsave(&h->offline_device_lock, flags);
  7475. list_for_each_safe(this, tmp, &h->offline_device_list) {
  7476. d = list_entry(this, struct offline_device_entry,
  7477. offline_list);
  7478. spin_unlock_irqrestore(&h->offline_device_lock, flags);
  7479. if (!hpsa_volume_offline(h, d->scsi3addr)) {
  7480. spin_lock_irqsave(&h->offline_device_lock, flags);
  7481. list_del(&d->offline_list);
  7482. spin_unlock_irqrestore(&h->offline_device_lock, flags);
  7483. return 1;
  7484. }
  7485. spin_lock_irqsave(&h->offline_device_lock, flags);
  7486. }
  7487. spin_unlock_irqrestore(&h->offline_device_lock, flags);
  7488. return 0;
  7489. }
  7490. static int hpsa_luns_changed(struct ctlr_info *h)
  7491. {
  7492. int rc = 1; /* assume there are changes */
  7493. struct ReportLUNdata *logdev = NULL;
  7494. /* if we can't find out if lun data has changed,
  7495. * assume that it has.
  7496. */
  7497. if (!h->lastlogicals)
  7498. return rc;
  7499. logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
  7500. if (!logdev)
  7501. return rc;
  7502. if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
  7503. dev_warn(&h->pdev->dev,
  7504. "report luns failed, can't track lun changes.\n");
  7505. goto out;
  7506. }
  7507. if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
  7508. dev_info(&h->pdev->dev,
  7509. "Lun changes detected.\n");
  7510. memcpy(h->lastlogicals, logdev, sizeof(*logdev));
  7511. goto out;
  7512. } else
  7513. rc = 0; /* no changes detected. */
  7514. out:
  7515. kfree(logdev);
  7516. return rc;
  7517. }
  7518. static void hpsa_perform_rescan(struct ctlr_info *h)
  7519. {
  7520. struct Scsi_Host *sh = NULL;
  7521. unsigned long flags;
  7522. /*
  7523. * Do the scan after the reset
  7524. */
  7525. spin_lock_irqsave(&h->reset_lock, flags);
  7526. if (h->reset_in_progress) {
  7527. h->drv_req_rescan = 1;
  7528. spin_unlock_irqrestore(&h->reset_lock, flags);
  7529. return;
  7530. }
  7531. spin_unlock_irqrestore(&h->reset_lock, flags);
  7532. sh = scsi_host_get(h->scsi_host);
  7533. if (sh != NULL) {
  7534. hpsa_scan_start(sh);
  7535. scsi_host_put(sh);
  7536. h->drv_req_rescan = 0;
  7537. }
  7538. }
  7539. /*
  7540. * watch for controller events
  7541. */
  7542. static void hpsa_event_monitor_worker(struct work_struct *work)
  7543. {
  7544. struct ctlr_info *h = container_of(to_delayed_work(work),
  7545. struct ctlr_info, event_monitor_work);
  7546. unsigned long flags;
  7547. spin_lock_irqsave(&h->lock, flags);
  7548. if (h->remove_in_progress) {
  7549. spin_unlock_irqrestore(&h->lock, flags);
  7550. return;
  7551. }
  7552. spin_unlock_irqrestore(&h->lock, flags);
  7553. if (hpsa_ctlr_needs_rescan(h)) {
  7554. hpsa_ack_ctlr_events(h);
  7555. hpsa_perform_rescan(h);
  7556. }
  7557. spin_lock_irqsave(&h->lock, flags);
  7558. if (!h->remove_in_progress)
  7559. schedule_delayed_work(&h->event_monitor_work,
  7560. HPSA_EVENT_MONITOR_INTERVAL);
  7561. spin_unlock_irqrestore(&h->lock, flags);
  7562. }
  7563. static void hpsa_rescan_ctlr_worker(struct work_struct *work)
  7564. {
  7565. unsigned long flags;
  7566. struct ctlr_info *h = container_of(to_delayed_work(work),
  7567. struct ctlr_info, rescan_ctlr_work);
  7568. spin_lock_irqsave(&h->lock, flags);
  7569. if (h->remove_in_progress) {
  7570. spin_unlock_irqrestore(&h->lock, flags);
  7571. return;
  7572. }
  7573. spin_unlock_irqrestore(&h->lock, flags);
  7574. if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
  7575. hpsa_perform_rescan(h);
  7576. } else if (h->discovery_polling) {
  7577. if (hpsa_luns_changed(h)) {
  7578. dev_info(&h->pdev->dev,
  7579. "driver discovery polling rescan.\n");
  7580. hpsa_perform_rescan(h);
  7581. }
  7582. }
  7583. spin_lock_irqsave(&h->lock, flags);
  7584. if (!h->remove_in_progress)
  7585. queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
  7586. h->heartbeat_sample_interval);
  7587. spin_unlock_irqrestore(&h->lock, flags);
  7588. }
  7589. static void hpsa_monitor_ctlr_worker(struct work_struct *work)
  7590. {
  7591. unsigned long flags;
  7592. struct ctlr_info *h = container_of(to_delayed_work(work),
  7593. struct ctlr_info, monitor_ctlr_work);
  7594. detect_controller_lockup(h);
  7595. if (lockup_detected(h))
  7596. return;
  7597. spin_lock_irqsave(&h->lock, flags);
  7598. if (!h->remove_in_progress)
  7599. schedule_delayed_work(&h->monitor_ctlr_work,
  7600. h->heartbeat_sample_interval);
  7601. spin_unlock_irqrestore(&h->lock, flags);
  7602. }
  7603. static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
  7604. char *name)
  7605. {
  7606. struct workqueue_struct *wq = NULL;
  7607. wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
  7608. if (!wq)
  7609. dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
  7610. return wq;
  7611. }
  7612. static void hpda_free_ctlr_info(struct ctlr_info *h)
  7613. {
  7614. kfree(h->reply_map);
  7615. kfree(h);
  7616. }
  7617. static struct ctlr_info *hpda_alloc_ctlr_info(void)
  7618. {
  7619. struct ctlr_info *h;
  7620. h = kzalloc(sizeof(*h), GFP_KERNEL);
  7621. if (!h)
  7622. return NULL;
  7623. h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL);
  7624. if (!h->reply_map) {
  7625. kfree(h);
  7626. return NULL;
  7627. }
  7628. return h;
  7629. }
  7630. static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  7631. {
  7632. int dac, rc;
  7633. struct ctlr_info *h;
  7634. int try_soft_reset = 0;
  7635. unsigned long flags;
  7636. u32 board_id;
  7637. if (number_of_controllers == 0)
  7638. printk(KERN_INFO DRIVER_NAME "\n");
  7639. rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
  7640. if (rc < 0) {
  7641. dev_warn(&pdev->dev, "Board ID not found\n");
  7642. return rc;
  7643. }
  7644. rc = hpsa_init_reset_devices(pdev, board_id);
  7645. if (rc) {
  7646. if (rc != -ENOTSUPP)
  7647. return rc;
  7648. /* If the reset fails in a particular way (it has no way to do
  7649. * a proper hard reset, so returns -ENOTSUPP) we can try to do
  7650. * a soft reset once we get the controller configured up to the
  7651. * point that it can accept a command.
  7652. */
  7653. try_soft_reset = 1;
  7654. rc = 0;
  7655. }
  7656. reinit_after_soft_reset:
  7657. /* Command structures must be aligned on a 32-byte boundary because
  7658. * the 5 lower bits of the address are used by the hardware. and by
  7659. * the driver. See comments in hpsa.h for more info.
  7660. */
  7661. BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
  7662. h = hpda_alloc_ctlr_info();
  7663. if (!h) {
  7664. dev_err(&pdev->dev, "Failed to allocate controller head\n");
  7665. return -ENOMEM;
  7666. }
  7667. h->pdev = pdev;
  7668. h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
  7669. INIT_LIST_HEAD(&h->offline_device_list);
  7670. spin_lock_init(&h->lock);
  7671. spin_lock_init(&h->offline_device_lock);
  7672. spin_lock_init(&h->scan_lock);
  7673. spin_lock_init(&h->reset_lock);
  7674. atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
  7675. /* Allocate and clear per-cpu variable lockup_detected */
  7676. h->lockup_detected = alloc_percpu(u32);
  7677. if (!h->lockup_detected) {
  7678. dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
  7679. rc = -ENOMEM;
  7680. goto clean1; /* aer/h */
  7681. }
  7682. set_lockup_detected_for_all_cpus(h, 0);
  7683. rc = hpsa_pci_init(h);
  7684. if (rc)
  7685. goto clean2; /* lu, aer/h */
  7686. /* relies on h-> settings made by hpsa_pci_init, including
  7687. * interrupt_mode h->intr */
  7688. rc = hpsa_scsi_host_alloc(h);
  7689. if (rc)
  7690. goto clean2_5; /* pci, lu, aer/h */
  7691. sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
  7692. h->ctlr = number_of_controllers;
  7693. number_of_controllers++;
  7694. /* configure PCI DMA stuff */
  7695. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  7696. if (rc == 0) {
  7697. dac = 1;
  7698. } else {
  7699. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  7700. if (rc == 0) {
  7701. dac = 0;
  7702. } else {
  7703. dev_err(&pdev->dev, "no suitable DMA available\n");
  7704. goto clean3; /* shost, pci, lu, aer/h */
  7705. }
  7706. }
  7707. /* make sure the board interrupts are off */
  7708. h->access.set_intr_mask(h, HPSA_INTR_OFF);
  7709. rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
  7710. if (rc)
  7711. goto clean3; /* shost, pci, lu, aer/h */
  7712. rc = hpsa_alloc_cmd_pool(h);
  7713. if (rc)
  7714. goto clean4; /* irq, shost, pci, lu, aer/h */
  7715. rc = hpsa_alloc_sg_chain_blocks(h);
  7716. if (rc)
  7717. goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
  7718. init_waitqueue_head(&h->scan_wait_queue);
  7719. init_waitqueue_head(&h->event_sync_wait_queue);
  7720. mutex_init(&h->reset_mutex);
  7721. h->scan_finished = 1; /* no scan currently in progress */
  7722. h->scan_waiting = 0;
  7723. pci_set_drvdata(pdev, h);
  7724. h->ndevices = 0;
  7725. spin_lock_init(&h->devlock);
  7726. rc = hpsa_put_ctlr_into_performant_mode(h);
  7727. if (rc)
  7728. goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
  7729. /* create the resubmit workqueue */
  7730. h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
  7731. if (!h->rescan_ctlr_wq) {
  7732. rc = -ENOMEM;
  7733. goto clean7;
  7734. }
  7735. h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
  7736. if (!h->resubmit_wq) {
  7737. rc = -ENOMEM;
  7738. goto clean7; /* aer/h */
  7739. }
  7740. /*
  7741. * At this point, the controller is ready to take commands.
  7742. * Now, if reset_devices and the hard reset didn't work, try
  7743. * the soft reset and see if that works.
  7744. */
  7745. if (try_soft_reset) {
  7746. /* This is kind of gross. We may or may not get a completion
  7747. * from the soft reset command, and if we do, then the value
  7748. * from the fifo may or may not be valid. So, we wait 10 secs
  7749. * after the reset throwing away any completions we get during
  7750. * that time. Unregister the interrupt handler and register
  7751. * fake ones to scoop up any residual completions.
  7752. */
  7753. spin_lock_irqsave(&h->lock, flags);
  7754. h->access.set_intr_mask(h, HPSA_INTR_OFF);
  7755. spin_unlock_irqrestore(&h->lock, flags);
  7756. hpsa_free_irqs(h);
  7757. rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
  7758. hpsa_intx_discard_completions);
  7759. if (rc) {
  7760. dev_warn(&h->pdev->dev,
  7761. "Failed to request_irq after soft reset.\n");
  7762. /*
  7763. * cannot goto clean7 or free_irqs will be called
  7764. * again. Instead, do its work
  7765. */
  7766. hpsa_free_performant_mode(h); /* clean7 */
  7767. hpsa_free_sg_chain_blocks(h); /* clean6 */
  7768. hpsa_free_cmd_pool(h); /* clean5 */
  7769. /*
  7770. * skip hpsa_free_irqs(h) clean4 since that
  7771. * was just called before request_irqs failed
  7772. */
  7773. goto clean3;
  7774. }
  7775. rc = hpsa_kdump_soft_reset(h);
  7776. if (rc)
  7777. /* Neither hard nor soft reset worked, we're hosed. */
  7778. goto clean7;
  7779. dev_info(&h->pdev->dev, "Board READY.\n");
  7780. dev_info(&h->pdev->dev,
  7781. "Waiting for stale completions to drain.\n");
  7782. h->access.set_intr_mask(h, HPSA_INTR_ON);
  7783. msleep(10000);
  7784. h->access.set_intr_mask(h, HPSA_INTR_OFF);
  7785. rc = controller_reset_failed(h->cfgtable);
  7786. if (rc)
  7787. dev_info(&h->pdev->dev,
  7788. "Soft reset appears to have failed.\n");
  7789. /* since the controller's reset, we have to go back and re-init
  7790. * everything. Easiest to just forget what we've done and do it
  7791. * all over again.
  7792. */
  7793. hpsa_undo_allocations_after_kdump_soft_reset(h);
  7794. try_soft_reset = 0;
  7795. if (rc)
  7796. /* don't goto clean, we already unallocated */
  7797. return -ENODEV;
  7798. goto reinit_after_soft_reset;
  7799. }
  7800. /* Enable Accelerated IO path at driver layer */
  7801. h->acciopath_status = 1;
  7802. /* Disable discovery polling.*/
  7803. h->discovery_polling = 0;
  7804. /* Turn the interrupts on so we can service requests */
  7805. h->access.set_intr_mask(h, HPSA_INTR_ON);
  7806. hpsa_hba_inquiry(h);
  7807. h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
  7808. if (!h->lastlogicals)
  7809. dev_info(&h->pdev->dev,
  7810. "Can't track change to report lun data\n");
  7811. /* hook into SCSI subsystem */
  7812. rc = hpsa_scsi_add_host(h);
  7813. if (rc)
  7814. goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
  7815. /* Monitor the controller for firmware lockups */
  7816. h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
  7817. INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
  7818. schedule_delayed_work(&h->monitor_ctlr_work,
  7819. h->heartbeat_sample_interval);
  7820. INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
  7821. queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
  7822. h->heartbeat_sample_interval);
  7823. INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
  7824. schedule_delayed_work(&h->event_monitor_work,
  7825. HPSA_EVENT_MONITOR_INTERVAL);
  7826. return 0;
  7827. clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
  7828. kfree(h->lastlogicals);
  7829. clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
  7830. hpsa_free_performant_mode(h);
  7831. h->access.set_intr_mask(h, HPSA_INTR_OFF);
  7832. clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
  7833. hpsa_free_sg_chain_blocks(h);
  7834. clean5: /* cmd, irq, shost, pci, lu, aer/h */
  7835. hpsa_free_cmd_pool(h);
  7836. clean4: /* irq, shost, pci, lu, aer/h */
  7837. hpsa_free_irqs(h);
  7838. clean3: /* shost, pci, lu, aer/h */
  7839. scsi_host_put(h->scsi_host);
  7840. h->scsi_host = NULL;
  7841. clean2_5: /* pci, lu, aer/h */
  7842. hpsa_free_pci_init(h);
  7843. clean2: /* lu, aer/h */
  7844. if (h->lockup_detected) {
  7845. free_percpu(h->lockup_detected);
  7846. h->lockup_detected = NULL;
  7847. }
  7848. clean1: /* wq/aer/h */
  7849. if (h->resubmit_wq) {
  7850. destroy_workqueue(h->resubmit_wq);
  7851. h->resubmit_wq = NULL;
  7852. }
  7853. if (h->rescan_ctlr_wq) {
  7854. destroy_workqueue(h->rescan_ctlr_wq);
  7855. h->rescan_ctlr_wq = NULL;
  7856. }
  7857. kfree(h);
  7858. return rc;
  7859. }
  7860. static void hpsa_flush_cache(struct ctlr_info *h)
  7861. {
  7862. char *flush_buf;
  7863. struct CommandList *c;
  7864. int rc;
  7865. if (unlikely(lockup_detected(h)))
  7866. return;
  7867. flush_buf = kzalloc(4, GFP_KERNEL);
  7868. if (!flush_buf)
  7869. return;
  7870. c = cmd_alloc(h);
  7871. if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
  7872. RAID_CTLR_LUNID, TYPE_CMD)) {
  7873. goto out;
  7874. }
  7875. rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
  7876. PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
  7877. if (rc)
  7878. goto out;
  7879. if (c->err_info->CommandStatus != 0)
  7880. out:
  7881. dev_warn(&h->pdev->dev,
  7882. "error flushing cache on controller\n");
  7883. cmd_free(h, c);
  7884. kfree(flush_buf);
  7885. }
  7886. /* Make controller gather fresh report lun data each time we
  7887. * send down a report luns request
  7888. */
  7889. static void hpsa_disable_rld_caching(struct ctlr_info *h)
  7890. {
  7891. u32 *options;
  7892. struct CommandList *c;
  7893. int rc;
  7894. /* Don't bother trying to set diag options if locked up */
  7895. if (unlikely(h->lockup_detected))
  7896. return;
  7897. options = kzalloc(sizeof(*options), GFP_KERNEL);
  7898. if (!options)
  7899. return;
  7900. c = cmd_alloc(h);
  7901. /* first, get the current diag options settings */
  7902. if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
  7903. RAID_CTLR_LUNID, TYPE_CMD))
  7904. goto errout;
  7905. rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
  7906. PCI_DMA_FROMDEVICE, NO_TIMEOUT);
  7907. if ((rc != 0) || (c->err_info->CommandStatus != 0))
  7908. goto errout;
  7909. /* Now, set the bit for disabling the RLD caching */
  7910. *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
  7911. if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
  7912. RAID_CTLR_LUNID, TYPE_CMD))
  7913. goto errout;
  7914. rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
  7915. PCI_DMA_TODEVICE, NO_TIMEOUT);
  7916. if ((rc != 0) || (c->err_info->CommandStatus != 0))
  7917. goto errout;
  7918. /* Now verify that it got set: */
  7919. if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
  7920. RAID_CTLR_LUNID, TYPE_CMD))
  7921. goto errout;
  7922. rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
  7923. PCI_DMA_FROMDEVICE, NO_TIMEOUT);
  7924. if ((rc != 0) || (c->err_info->CommandStatus != 0))
  7925. goto errout;
  7926. if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
  7927. goto out;
  7928. errout:
  7929. dev_err(&h->pdev->dev,
  7930. "Error: failed to disable report lun data caching.\n");
  7931. out:
  7932. cmd_free(h, c);
  7933. kfree(options);
  7934. }
  7935. static void __hpsa_shutdown(struct pci_dev *pdev)
  7936. {
  7937. struct ctlr_info *h;
  7938. h = pci_get_drvdata(pdev);
  7939. /* Turn board interrupts off and send the flush cache command
  7940. * sendcmd will turn off interrupt, and send the flush...
  7941. * To write all data in the battery backed cache to disks
  7942. */
  7943. hpsa_flush_cache(h);
  7944. h->access.set_intr_mask(h, HPSA_INTR_OFF);
  7945. hpsa_free_irqs(h); /* init_one 4 */
  7946. hpsa_disable_interrupt_mode(h); /* pci_init 2 */
  7947. }
  7948. static void hpsa_shutdown(struct pci_dev *pdev)
  7949. {
  7950. __hpsa_shutdown(pdev);
  7951. pci_disable_device(pdev);
  7952. }
  7953. static void hpsa_free_device_info(struct ctlr_info *h)
  7954. {
  7955. int i;
  7956. for (i = 0; i < h->ndevices; i++) {
  7957. kfree(h->dev[i]);
  7958. h->dev[i] = NULL;
  7959. }
  7960. }
  7961. static void hpsa_remove_one(struct pci_dev *pdev)
  7962. {
  7963. struct ctlr_info *h;
  7964. unsigned long flags;
  7965. if (pci_get_drvdata(pdev) == NULL) {
  7966. dev_err(&pdev->dev, "unable to remove device\n");
  7967. return;
  7968. }
  7969. h = pci_get_drvdata(pdev);
  7970. /* Get rid of any controller monitoring work items */
  7971. spin_lock_irqsave(&h->lock, flags);
  7972. h->remove_in_progress = 1;
  7973. spin_unlock_irqrestore(&h->lock, flags);
  7974. cancel_delayed_work_sync(&h->monitor_ctlr_work);
  7975. cancel_delayed_work_sync(&h->rescan_ctlr_work);
  7976. cancel_delayed_work_sync(&h->event_monitor_work);
  7977. destroy_workqueue(h->rescan_ctlr_wq);
  7978. destroy_workqueue(h->resubmit_wq);
  7979. hpsa_delete_sas_host(h);
  7980. /*
  7981. * Call before disabling interrupts.
  7982. * scsi_remove_host can trigger I/O operations especially
  7983. * when multipath is enabled. There can be SYNCHRONIZE CACHE
  7984. * operations which cannot complete and will hang the system.
  7985. */
  7986. if (h->scsi_host)
  7987. scsi_remove_host(h->scsi_host); /* init_one 8 */
  7988. /* includes hpsa_free_irqs - init_one 4 */
  7989. /* includes hpsa_disable_interrupt_mode - pci_init 2 */
  7990. __hpsa_shutdown(pdev);
  7991. hpsa_free_device_info(h); /* scan */
  7992. kfree(h->hba_inquiry_data); /* init_one 10 */
  7993. h->hba_inquiry_data = NULL; /* init_one 10 */
  7994. hpsa_free_ioaccel2_sg_chain_blocks(h);
  7995. hpsa_free_performant_mode(h); /* init_one 7 */
  7996. hpsa_free_sg_chain_blocks(h); /* init_one 6 */
  7997. hpsa_free_cmd_pool(h); /* init_one 5 */
  7998. kfree(h->lastlogicals);
  7999. /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
  8000. scsi_host_put(h->scsi_host); /* init_one 3 */
  8001. h->scsi_host = NULL; /* init_one 3 */
  8002. /* includes hpsa_disable_interrupt_mode - pci_init 2 */
  8003. hpsa_free_pci_init(h); /* init_one 2.5 */
  8004. free_percpu(h->lockup_detected); /* init_one 2 */
  8005. h->lockup_detected = NULL; /* init_one 2 */
  8006. /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
  8007. hpda_free_ctlr_info(h); /* init_one 1 */
  8008. }
  8009. static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
  8010. __attribute__((unused)) pm_message_t state)
  8011. {
  8012. return -ENOSYS;
  8013. }
  8014. static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
  8015. {
  8016. return -ENOSYS;
  8017. }
  8018. static struct pci_driver hpsa_pci_driver = {
  8019. .name = HPSA,
  8020. .probe = hpsa_init_one,
  8021. .remove = hpsa_remove_one,
  8022. .id_table = hpsa_pci_device_id, /* id_table */
  8023. .shutdown = hpsa_shutdown,
  8024. .suspend = hpsa_suspend,
  8025. .resume = hpsa_resume,
  8026. };
  8027. /* Fill in bucket_map[], given nsgs (the max number of
  8028. * scatter gather elements supported) and bucket[],
  8029. * which is an array of 8 integers. The bucket[] array
  8030. * contains 8 different DMA transfer sizes (in 16
  8031. * byte increments) which the controller uses to fetch
  8032. * commands. This function fills in bucket_map[], which
  8033. * maps a given number of scatter gather elements to one of
  8034. * the 8 DMA transfer sizes. The point of it is to allow the
  8035. * controller to only do as much DMA as needed to fetch the
  8036. * command, with the DMA transfer size encoded in the lower
  8037. * bits of the command address.
  8038. */
  8039. static void calc_bucket_map(int bucket[], int num_buckets,
  8040. int nsgs, int min_blocks, u32 *bucket_map)
  8041. {
  8042. int i, j, b, size;
  8043. /* Note, bucket_map must have nsgs+1 entries. */
  8044. for (i = 0; i <= nsgs; i++) {
  8045. /* Compute size of a command with i SG entries */
  8046. size = i + min_blocks;
  8047. b = num_buckets; /* Assume the biggest bucket */
  8048. /* Find the bucket that is just big enough */
  8049. for (j = 0; j < num_buckets; j++) {
  8050. if (bucket[j] >= size) {
  8051. b = j;
  8052. break;
  8053. }
  8054. }
  8055. /* for a command with i SG entries, use bucket b. */
  8056. bucket_map[i] = b;
  8057. }
  8058. }
  8059. /*
  8060. * return -ENODEV on err, 0 on success (or no action)
  8061. * allocates numerous items that must be freed later
  8062. */
  8063. static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
  8064. {
  8065. int i;
  8066. unsigned long register_value;
  8067. unsigned long transMethod = CFGTBL_Trans_Performant |
  8068. (trans_support & CFGTBL_Trans_use_short_tags) |
  8069. CFGTBL_Trans_enable_directed_msix |
  8070. (trans_support & (CFGTBL_Trans_io_accel1 |
  8071. CFGTBL_Trans_io_accel2));
  8072. struct access_method access = SA5_performant_access;
  8073. /* This is a bit complicated. There are 8 registers on
  8074. * the controller which we write to to tell it 8 different
  8075. * sizes of commands which there may be. It's a way of
  8076. * reducing the DMA done to fetch each command. Encoded into
  8077. * each command's tag are 3 bits which communicate to the controller
  8078. * which of the eight sizes that command fits within. The size of
  8079. * each command depends on how many scatter gather entries there are.
  8080. * Each SG entry requires 16 bytes. The eight registers are programmed
  8081. * with the number of 16-byte blocks a command of that size requires.
  8082. * The smallest command possible requires 5 such 16 byte blocks.
  8083. * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
  8084. * blocks. Note, this only extends to the SG entries contained
  8085. * within the command block, and does not extend to chained blocks
  8086. * of SG elements. bft[] contains the eight values we write to
  8087. * the registers. They are not evenly distributed, but have more
  8088. * sizes for small commands, and fewer sizes for larger commands.
  8089. */
  8090. int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
  8091. #define MIN_IOACCEL2_BFT_ENTRY 5
  8092. #define HPSA_IOACCEL2_HEADER_SZ 4
  8093. int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
  8094. 13, 14, 15, 16, 17, 18, 19,
  8095. HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
  8096. BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
  8097. BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
  8098. BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
  8099. 16 * MIN_IOACCEL2_BFT_ENTRY);
  8100. BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
  8101. BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
  8102. /* 5 = 1 s/g entry or 4k
  8103. * 6 = 2 s/g entry or 8k
  8104. * 8 = 4 s/g entry or 16k
  8105. * 10 = 6 s/g entry or 24k
  8106. */
  8107. /* If the controller supports either ioaccel method then
  8108. * we can also use the RAID stack submit path that does not
  8109. * perform the superfluous readl() after each command submission.
  8110. */
  8111. if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
  8112. access = SA5_performant_access_no_read;
  8113. /* Controller spec: zero out this buffer. */
  8114. for (i = 0; i < h->nreply_queues; i++)
  8115. memset(h->reply_queue[i].head, 0, h->reply_queue_size);
  8116. bft[7] = SG_ENTRIES_IN_CMD + 4;
  8117. calc_bucket_map(bft, ARRAY_SIZE(bft),
  8118. SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
  8119. for (i = 0; i < 8; i++)
  8120. writel(bft[i], &h->transtable->BlockFetch[i]);
  8121. /* size of controller ring buffer */
  8122. writel(h->max_commands, &h->transtable->RepQSize);
  8123. writel(h->nreply_queues, &h->transtable->RepQCount);
  8124. writel(0, &h->transtable->RepQCtrAddrLow32);
  8125. writel(0, &h->transtable->RepQCtrAddrHigh32);
  8126. for (i = 0; i < h->nreply_queues; i++) {
  8127. writel(0, &h->transtable->RepQAddr[i].upper);
  8128. writel(h->reply_queue[i].busaddr,
  8129. &h->transtable->RepQAddr[i].lower);
  8130. }
  8131. writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
  8132. writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
  8133. /*
  8134. * enable outbound interrupt coalescing in accelerator mode;
  8135. */
  8136. if (trans_support & CFGTBL_Trans_io_accel1) {
  8137. access = SA5_ioaccel_mode1_access;
  8138. writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
  8139. writel(4, &h->cfgtable->HostWrite.CoalIntCount);
  8140. } else
  8141. if (trans_support & CFGTBL_Trans_io_accel2)
  8142. access = SA5_ioaccel_mode2_access;
  8143. writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
  8144. if (hpsa_wait_for_mode_change_ack(h)) {
  8145. dev_err(&h->pdev->dev,
  8146. "performant mode problem - doorbell timeout\n");
  8147. return -ENODEV;
  8148. }
  8149. register_value = readl(&(h->cfgtable->TransportActive));
  8150. if (!(register_value & CFGTBL_Trans_Performant)) {
  8151. dev_err(&h->pdev->dev,
  8152. "performant mode problem - transport not active\n");
  8153. return -ENODEV;
  8154. }
  8155. /* Change the access methods to the performant access methods */
  8156. h->access = access;
  8157. h->transMethod = transMethod;
  8158. if (!((trans_support & CFGTBL_Trans_io_accel1) ||
  8159. (trans_support & CFGTBL_Trans_io_accel2)))
  8160. return 0;
  8161. if (trans_support & CFGTBL_Trans_io_accel1) {
  8162. /* Set up I/O accelerator mode */
  8163. for (i = 0; i < h->nreply_queues; i++) {
  8164. writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
  8165. h->reply_queue[i].current_entry =
  8166. readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
  8167. }
  8168. bft[7] = h->ioaccel_maxsg + 8;
  8169. calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
  8170. h->ioaccel1_blockFetchTable);
  8171. /* initialize all reply queue entries to unused */
  8172. for (i = 0; i < h->nreply_queues; i++)
  8173. memset(h->reply_queue[i].head,
  8174. (u8) IOACCEL_MODE1_REPLY_UNUSED,
  8175. h->reply_queue_size);
  8176. /* set all the constant fields in the accelerator command
  8177. * frames once at init time to save CPU cycles later.
  8178. */
  8179. for (i = 0; i < h->nr_cmds; i++) {
  8180. struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
  8181. cp->function = IOACCEL1_FUNCTION_SCSIIO;
  8182. cp->err_info = (u32) (h->errinfo_pool_dhandle +
  8183. (i * sizeof(struct ErrorInfo)));
  8184. cp->err_info_len = sizeof(struct ErrorInfo);
  8185. cp->sgl_offset = IOACCEL1_SGLOFFSET;
  8186. cp->host_context_flags =
  8187. cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
  8188. cp->timeout_sec = 0;
  8189. cp->ReplyQueue = 0;
  8190. cp->tag =
  8191. cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
  8192. cp->host_addr =
  8193. cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
  8194. (i * sizeof(struct io_accel1_cmd)));
  8195. }
  8196. } else if (trans_support & CFGTBL_Trans_io_accel2) {
  8197. u64 cfg_offset, cfg_base_addr_index;
  8198. u32 bft2_offset, cfg_base_addr;
  8199. int rc;
  8200. rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
  8201. &cfg_base_addr_index, &cfg_offset);
  8202. BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
  8203. bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
  8204. calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
  8205. 4, h->ioaccel2_blockFetchTable);
  8206. bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
  8207. BUILD_BUG_ON(offsetof(struct CfgTable,
  8208. io_accel_request_size_offset) != 0xb8);
  8209. h->ioaccel2_bft2_regs =
  8210. remap_pci_mem(pci_resource_start(h->pdev,
  8211. cfg_base_addr_index) +
  8212. cfg_offset + bft2_offset,
  8213. ARRAY_SIZE(bft2) *
  8214. sizeof(*h->ioaccel2_bft2_regs));
  8215. for (i = 0; i < ARRAY_SIZE(bft2); i++)
  8216. writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
  8217. }
  8218. writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
  8219. if (hpsa_wait_for_mode_change_ack(h)) {
  8220. dev_err(&h->pdev->dev,
  8221. "performant mode problem - enabling ioaccel mode\n");
  8222. return -ENODEV;
  8223. }
  8224. return 0;
  8225. }
  8226. /* Free ioaccel1 mode command blocks and block fetch table */
  8227. static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
  8228. {
  8229. if (h->ioaccel_cmd_pool) {
  8230. pci_free_consistent(h->pdev,
  8231. h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
  8232. h->ioaccel_cmd_pool,
  8233. h->ioaccel_cmd_pool_dhandle);
  8234. h->ioaccel_cmd_pool = NULL;
  8235. h->ioaccel_cmd_pool_dhandle = 0;
  8236. }
  8237. kfree(h->ioaccel1_blockFetchTable);
  8238. h->ioaccel1_blockFetchTable = NULL;
  8239. }
  8240. /* Allocate ioaccel1 mode command blocks and block fetch table */
  8241. static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
  8242. {
  8243. h->ioaccel_maxsg =
  8244. readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
  8245. if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
  8246. h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
  8247. /* Command structures must be aligned on a 128-byte boundary
  8248. * because the 7 lower bits of the address are used by the
  8249. * hardware.
  8250. */
  8251. BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
  8252. IOACCEL1_COMMANDLIST_ALIGNMENT);
  8253. h->ioaccel_cmd_pool =
  8254. pci_alloc_consistent(h->pdev,
  8255. h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
  8256. &(h->ioaccel_cmd_pool_dhandle));
  8257. h->ioaccel1_blockFetchTable =
  8258. kmalloc(((h->ioaccel_maxsg + 1) *
  8259. sizeof(u32)), GFP_KERNEL);
  8260. if ((h->ioaccel_cmd_pool == NULL) ||
  8261. (h->ioaccel1_blockFetchTable == NULL))
  8262. goto clean_up;
  8263. memset(h->ioaccel_cmd_pool, 0,
  8264. h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
  8265. return 0;
  8266. clean_up:
  8267. hpsa_free_ioaccel1_cmd_and_bft(h);
  8268. return -ENOMEM;
  8269. }
  8270. /* Free ioaccel2 mode command blocks and block fetch table */
  8271. static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
  8272. {
  8273. hpsa_free_ioaccel2_sg_chain_blocks(h);
  8274. if (h->ioaccel2_cmd_pool) {
  8275. pci_free_consistent(h->pdev,
  8276. h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
  8277. h->ioaccel2_cmd_pool,
  8278. h->ioaccel2_cmd_pool_dhandle);
  8279. h->ioaccel2_cmd_pool = NULL;
  8280. h->ioaccel2_cmd_pool_dhandle = 0;
  8281. }
  8282. kfree(h->ioaccel2_blockFetchTable);
  8283. h->ioaccel2_blockFetchTable = NULL;
  8284. }
  8285. /* Allocate ioaccel2 mode command blocks and block fetch table */
  8286. static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
  8287. {
  8288. int rc;
  8289. /* Allocate ioaccel2 mode command blocks and block fetch table */
  8290. h->ioaccel_maxsg =
  8291. readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
  8292. if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
  8293. h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
  8294. BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
  8295. IOACCEL2_COMMANDLIST_ALIGNMENT);
  8296. h->ioaccel2_cmd_pool =
  8297. pci_alloc_consistent(h->pdev,
  8298. h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
  8299. &(h->ioaccel2_cmd_pool_dhandle));
  8300. h->ioaccel2_blockFetchTable =
  8301. kmalloc(((h->ioaccel_maxsg + 1) *
  8302. sizeof(u32)), GFP_KERNEL);
  8303. if ((h->ioaccel2_cmd_pool == NULL) ||
  8304. (h->ioaccel2_blockFetchTable == NULL)) {
  8305. rc = -ENOMEM;
  8306. goto clean_up;
  8307. }
  8308. rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
  8309. if (rc)
  8310. goto clean_up;
  8311. memset(h->ioaccel2_cmd_pool, 0,
  8312. h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
  8313. return 0;
  8314. clean_up:
  8315. hpsa_free_ioaccel2_cmd_and_bft(h);
  8316. return rc;
  8317. }
  8318. /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
  8319. static void hpsa_free_performant_mode(struct ctlr_info *h)
  8320. {
  8321. kfree(h->blockFetchTable);
  8322. h->blockFetchTable = NULL;
  8323. hpsa_free_reply_queues(h);
  8324. hpsa_free_ioaccel1_cmd_and_bft(h);
  8325. hpsa_free_ioaccel2_cmd_and_bft(h);
  8326. }
  8327. /* return -ENODEV on error, 0 on success (or no action)
  8328. * allocates numerous items that must be freed later
  8329. */
  8330. static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
  8331. {
  8332. u32 trans_support;
  8333. unsigned long transMethod = CFGTBL_Trans_Performant |
  8334. CFGTBL_Trans_use_short_tags;
  8335. int i, rc;
  8336. if (hpsa_simple_mode)
  8337. return 0;
  8338. trans_support = readl(&(h->cfgtable->TransportSupport));
  8339. if (!(trans_support & PERFORMANT_MODE))
  8340. return 0;
  8341. /* Check for I/O accelerator mode support */
  8342. if (trans_support & CFGTBL_Trans_io_accel1) {
  8343. transMethod |= CFGTBL_Trans_io_accel1 |
  8344. CFGTBL_Trans_enable_directed_msix;
  8345. rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
  8346. if (rc)
  8347. return rc;
  8348. } else if (trans_support & CFGTBL_Trans_io_accel2) {
  8349. transMethod |= CFGTBL_Trans_io_accel2 |
  8350. CFGTBL_Trans_enable_directed_msix;
  8351. rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
  8352. if (rc)
  8353. return rc;
  8354. }
  8355. h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
  8356. hpsa_get_max_perf_mode_cmds(h);
  8357. /* Performant mode ring buffer and supporting data structures */
  8358. h->reply_queue_size = h->max_commands * sizeof(u64);
  8359. for (i = 0; i < h->nreply_queues; i++) {
  8360. h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
  8361. h->reply_queue_size,
  8362. &(h->reply_queue[i].busaddr));
  8363. if (!h->reply_queue[i].head) {
  8364. rc = -ENOMEM;
  8365. goto clean1; /* rq, ioaccel */
  8366. }
  8367. h->reply_queue[i].size = h->max_commands;
  8368. h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
  8369. h->reply_queue[i].current_entry = 0;
  8370. }
  8371. /* Need a block fetch table for performant mode */
  8372. h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
  8373. sizeof(u32)), GFP_KERNEL);
  8374. if (!h->blockFetchTable) {
  8375. rc = -ENOMEM;
  8376. goto clean1; /* rq, ioaccel */
  8377. }
  8378. rc = hpsa_enter_performant_mode(h, trans_support);
  8379. if (rc)
  8380. goto clean2; /* bft, rq, ioaccel */
  8381. return 0;
  8382. clean2: /* bft, rq, ioaccel */
  8383. kfree(h->blockFetchTable);
  8384. h->blockFetchTable = NULL;
  8385. clean1: /* rq, ioaccel */
  8386. hpsa_free_reply_queues(h);
  8387. hpsa_free_ioaccel1_cmd_and_bft(h);
  8388. hpsa_free_ioaccel2_cmd_and_bft(h);
  8389. return rc;
  8390. }
  8391. static int is_accelerated_cmd(struct CommandList *c)
  8392. {
  8393. return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
  8394. }
  8395. static void hpsa_drain_accel_commands(struct ctlr_info *h)
  8396. {
  8397. struct CommandList *c = NULL;
  8398. int i, accel_cmds_out;
  8399. int refcount;
  8400. do { /* wait for all outstanding ioaccel commands to drain out */
  8401. accel_cmds_out = 0;
  8402. for (i = 0; i < h->nr_cmds; i++) {
  8403. c = h->cmd_pool + i;
  8404. refcount = atomic_inc_return(&c->refcount);
  8405. if (refcount > 1) /* Command is allocated */
  8406. accel_cmds_out += is_accelerated_cmd(c);
  8407. cmd_free(h, c);
  8408. }
  8409. if (accel_cmds_out <= 0)
  8410. break;
  8411. msleep(100);
  8412. } while (1);
  8413. }
  8414. static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
  8415. struct hpsa_sas_port *hpsa_sas_port)
  8416. {
  8417. struct hpsa_sas_phy *hpsa_sas_phy;
  8418. struct sas_phy *phy;
  8419. hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
  8420. if (!hpsa_sas_phy)
  8421. return NULL;
  8422. phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
  8423. hpsa_sas_port->next_phy_index);
  8424. if (!phy) {
  8425. kfree(hpsa_sas_phy);
  8426. return NULL;
  8427. }
  8428. hpsa_sas_port->next_phy_index++;
  8429. hpsa_sas_phy->phy = phy;
  8430. hpsa_sas_phy->parent_port = hpsa_sas_port;
  8431. return hpsa_sas_phy;
  8432. }
  8433. static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
  8434. {
  8435. struct sas_phy *phy = hpsa_sas_phy->phy;
  8436. sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
  8437. if (hpsa_sas_phy->added_to_port)
  8438. list_del(&hpsa_sas_phy->phy_list_entry);
  8439. sas_phy_delete(phy);
  8440. kfree(hpsa_sas_phy);
  8441. }
  8442. static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
  8443. {
  8444. int rc;
  8445. struct hpsa_sas_port *hpsa_sas_port;
  8446. struct sas_phy *phy;
  8447. struct sas_identify *identify;
  8448. hpsa_sas_port = hpsa_sas_phy->parent_port;
  8449. phy = hpsa_sas_phy->phy;
  8450. identify = &phy->identify;
  8451. memset(identify, 0, sizeof(*identify));
  8452. identify->sas_address = hpsa_sas_port->sas_address;
  8453. identify->device_type = SAS_END_DEVICE;
  8454. identify->initiator_port_protocols = SAS_PROTOCOL_STP;
  8455. identify->target_port_protocols = SAS_PROTOCOL_STP;
  8456. phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
  8457. phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
  8458. phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
  8459. phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
  8460. phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
  8461. rc = sas_phy_add(hpsa_sas_phy->phy);
  8462. if (rc)
  8463. return rc;
  8464. sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
  8465. list_add_tail(&hpsa_sas_phy->phy_list_entry,
  8466. &hpsa_sas_port->phy_list_head);
  8467. hpsa_sas_phy->added_to_port = true;
  8468. return 0;
  8469. }
  8470. static int
  8471. hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
  8472. struct sas_rphy *rphy)
  8473. {
  8474. struct sas_identify *identify;
  8475. identify = &rphy->identify;
  8476. identify->sas_address = hpsa_sas_port->sas_address;
  8477. identify->initiator_port_protocols = SAS_PROTOCOL_STP;
  8478. identify->target_port_protocols = SAS_PROTOCOL_STP;
  8479. return sas_rphy_add(rphy);
  8480. }
  8481. static struct hpsa_sas_port
  8482. *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
  8483. u64 sas_address)
  8484. {
  8485. int rc;
  8486. struct hpsa_sas_port *hpsa_sas_port;
  8487. struct sas_port *port;
  8488. hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
  8489. if (!hpsa_sas_port)
  8490. return NULL;
  8491. INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
  8492. hpsa_sas_port->parent_node = hpsa_sas_node;
  8493. port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
  8494. if (!port)
  8495. goto free_hpsa_port;
  8496. rc = sas_port_add(port);
  8497. if (rc)
  8498. goto free_sas_port;
  8499. hpsa_sas_port->port = port;
  8500. hpsa_sas_port->sas_address = sas_address;
  8501. list_add_tail(&hpsa_sas_port->port_list_entry,
  8502. &hpsa_sas_node->port_list_head);
  8503. return hpsa_sas_port;
  8504. free_sas_port:
  8505. sas_port_free(port);
  8506. free_hpsa_port:
  8507. kfree(hpsa_sas_port);
  8508. return NULL;
  8509. }
  8510. static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
  8511. {
  8512. struct hpsa_sas_phy *hpsa_sas_phy;
  8513. struct hpsa_sas_phy *next;
  8514. list_for_each_entry_safe(hpsa_sas_phy, next,
  8515. &hpsa_sas_port->phy_list_head, phy_list_entry)
  8516. hpsa_free_sas_phy(hpsa_sas_phy);
  8517. sas_port_delete(hpsa_sas_port->port);
  8518. list_del(&hpsa_sas_port->port_list_entry);
  8519. kfree(hpsa_sas_port);
  8520. }
  8521. static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
  8522. {
  8523. struct hpsa_sas_node *hpsa_sas_node;
  8524. hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
  8525. if (hpsa_sas_node) {
  8526. hpsa_sas_node->parent_dev = parent_dev;
  8527. INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
  8528. }
  8529. return hpsa_sas_node;
  8530. }
  8531. static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
  8532. {
  8533. struct hpsa_sas_port *hpsa_sas_port;
  8534. struct hpsa_sas_port *next;
  8535. if (!hpsa_sas_node)
  8536. return;
  8537. list_for_each_entry_safe(hpsa_sas_port, next,
  8538. &hpsa_sas_node->port_list_head, port_list_entry)
  8539. hpsa_free_sas_port(hpsa_sas_port);
  8540. kfree(hpsa_sas_node);
  8541. }
  8542. static struct hpsa_scsi_dev_t
  8543. *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
  8544. struct sas_rphy *rphy)
  8545. {
  8546. int i;
  8547. struct hpsa_scsi_dev_t *device;
  8548. for (i = 0; i < h->ndevices; i++) {
  8549. device = h->dev[i];
  8550. if (!device->sas_port)
  8551. continue;
  8552. if (device->sas_port->rphy == rphy)
  8553. return device;
  8554. }
  8555. return NULL;
  8556. }
  8557. static int hpsa_add_sas_host(struct ctlr_info *h)
  8558. {
  8559. int rc;
  8560. struct device *parent_dev;
  8561. struct hpsa_sas_node *hpsa_sas_node;
  8562. struct hpsa_sas_port *hpsa_sas_port;
  8563. struct hpsa_sas_phy *hpsa_sas_phy;
  8564. parent_dev = &h->scsi_host->shost_dev;
  8565. hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
  8566. if (!hpsa_sas_node)
  8567. return -ENOMEM;
  8568. hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
  8569. if (!hpsa_sas_port) {
  8570. rc = -ENODEV;
  8571. goto free_sas_node;
  8572. }
  8573. hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
  8574. if (!hpsa_sas_phy) {
  8575. rc = -ENODEV;
  8576. goto free_sas_port;
  8577. }
  8578. rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
  8579. if (rc)
  8580. goto free_sas_phy;
  8581. h->sas_host = hpsa_sas_node;
  8582. return 0;
  8583. free_sas_phy:
  8584. hpsa_free_sas_phy(hpsa_sas_phy);
  8585. free_sas_port:
  8586. hpsa_free_sas_port(hpsa_sas_port);
  8587. free_sas_node:
  8588. hpsa_free_sas_node(hpsa_sas_node);
  8589. return rc;
  8590. }
  8591. static void hpsa_delete_sas_host(struct ctlr_info *h)
  8592. {
  8593. hpsa_free_sas_node(h->sas_host);
  8594. }
  8595. static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
  8596. struct hpsa_scsi_dev_t *device)
  8597. {
  8598. int rc;
  8599. struct hpsa_sas_port *hpsa_sas_port;
  8600. struct sas_rphy *rphy;
  8601. hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
  8602. if (!hpsa_sas_port)
  8603. return -ENOMEM;
  8604. rphy = sas_end_device_alloc(hpsa_sas_port->port);
  8605. if (!rphy) {
  8606. rc = -ENODEV;
  8607. goto free_sas_port;
  8608. }
  8609. hpsa_sas_port->rphy = rphy;
  8610. device->sas_port = hpsa_sas_port;
  8611. rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
  8612. if (rc)
  8613. goto free_sas_port;
  8614. return 0;
  8615. free_sas_port:
  8616. hpsa_free_sas_port(hpsa_sas_port);
  8617. device->sas_port = NULL;
  8618. return rc;
  8619. }
  8620. static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
  8621. {
  8622. if (device->sas_port) {
  8623. hpsa_free_sas_port(device->sas_port);
  8624. device->sas_port = NULL;
  8625. }
  8626. }
  8627. static int
  8628. hpsa_sas_get_linkerrors(struct sas_phy *phy)
  8629. {
  8630. return 0;
  8631. }
  8632. static int
  8633. hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
  8634. {
  8635. struct Scsi_Host *shost = phy_to_shost(rphy);
  8636. struct ctlr_info *h;
  8637. struct hpsa_scsi_dev_t *sd;
  8638. if (!shost)
  8639. return -ENXIO;
  8640. h = shost_to_hba(shost);
  8641. if (!h)
  8642. return -ENXIO;
  8643. sd = hpsa_find_device_by_sas_rphy(h, rphy);
  8644. if (!sd)
  8645. return -ENXIO;
  8646. *identifier = sd->eli;
  8647. return 0;
  8648. }
  8649. static int
  8650. hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
  8651. {
  8652. return -ENXIO;
  8653. }
  8654. static int
  8655. hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
  8656. {
  8657. return 0;
  8658. }
  8659. static int
  8660. hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
  8661. {
  8662. return 0;
  8663. }
  8664. static int
  8665. hpsa_sas_phy_setup(struct sas_phy *phy)
  8666. {
  8667. return 0;
  8668. }
  8669. static void
  8670. hpsa_sas_phy_release(struct sas_phy *phy)
  8671. {
  8672. }
  8673. static int
  8674. hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
  8675. {
  8676. return -EINVAL;
  8677. }
  8678. static struct sas_function_template hpsa_sas_transport_functions = {
  8679. .get_linkerrors = hpsa_sas_get_linkerrors,
  8680. .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
  8681. .get_bay_identifier = hpsa_sas_get_bay_identifier,
  8682. .phy_reset = hpsa_sas_phy_reset,
  8683. .phy_enable = hpsa_sas_phy_enable,
  8684. .phy_setup = hpsa_sas_phy_setup,
  8685. .phy_release = hpsa_sas_phy_release,
  8686. .set_phy_speed = hpsa_sas_phy_speed,
  8687. };
  8688. /*
  8689. * This is it. Register the PCI driver information for the cards we control
  8690. * the OS will call our registered routines when it finds one of our cards.
  8691. */
  8692. static int __init hpsa_init(void)
  8693. {
  8694. int rc;
  8695. hpsa_sas_transport_template =
  8696. sas_attach_transport(&hpsa_sas_transport_functions);
  8697. if (!hpsa_sas_transport_template)
  8698. return -ENODEV;
  8699. rc = pci_register_driver(&hpsa_pci_driver);
  8700. if (rc)
  8701. sas_release_transport(hpsa_sas_transport_template);
  8702. return rc;
  8703. }
  8704. static void __exit hpsa_cleanup(void)
  8705. {
  8706. pci_unregister_driver(&hpsa_pci_driver);
  8707. sas_release_transport(hpsa_sas_transport_template);
  8708. }
  8709. static void __attribute__((unused)) verify_offsets(void)
  8710. {
  8711. #define VERIFY_OFFSET(member, offset) \
  8712. BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
  8713. VERIFY_OFFSET(structure_size, 0);
  8714. VERIFY_OFFSET(volume_blk_size, 4);
  8715. VERIFY_OFFSET(volume_blk_cnt, 8);
  8716. VERIFY_OFFSET(phys_blk_shift, 16);
  8717. VERIFY_OFFSET(parity_rotation_shift, 17);
  8718. VERIFY_OFFSET(strip_size, 18);
  8719. VERIFY_OFFSET(disk_starting_blk, 20);
  8720. VERIFY_OFFSET(disk_blk_cnt, 28);
  8721. VERIFY_OFFSET(data_disks_per_row, 36);
  8722. VERIFY_OFFSET(metadata_disks_per_row, 38);
  8723. VERIFY_OFFSET(row_cnt, 40);
  8724. VERIFY_OFFSET(layout_map_count, 42);
  8725. VERIFY_OFFSET(flags, 44);
  8726. VERIFY_OFFSET(dekindex, 46);
  8727. /* VERIFY_OFFSET(reserved, 48 */
  8728. VERIFY_OFFSET(data, 64);
  8729. #undef VERIFY_OFFSET
  8730. #define VERIFY_OFFSET(member, offset) \
  8731. BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
  8732. VERIFY_OFFSET(IU_type, 0);
  8733. VERIFY_OFFSET(direction, 1);
  8734. VERIFY_OFFSET(reply_queue, 2);
  8735. /* VERIFY_OFFSET(reserved1, 3); */
  8736. VERIFY_OFFSET(scsi_nexus, 4);
  8737. VERIFY_OFFSET(Tag, 8);
  8738. VERIFY_OFFSET(cdb, 16);
  8739. VERIFY_OFFSET(cciss_lun, 32);
  8740. VERIFY_OFFSET(data_len, 40);
  8741. VERIFY_OFFSET(cmd_priority_task_attr, 44);
  8742. VERIFY_OFFSET(sg_count, 45);
  8743. /* VERIFY_OFFSET(reserved3 */
  8744. VERIFY_OFFSET(err_ptr, 48);
  8745. VERIFY_OFFSET(err_len, 56);
  8746. /* VERIFY_OFFSET(reserved4 */
  8747. VERIFY_OFFSET(sg, 64);
  8748. #undef VERIFY_OFFSET
  8749. #define VERIFY_OFFSET(member, offset) \
  8750. BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
  8751. VERIFY_OFFSET(dev_handle, 0x00);
  8752. VERIFY_OFFSET(reserved1, 0x02);
  8753. VERIFY_OFFSET(function, 0x03);
  8754. VERIFY_OFFSET(reserved2, 0x04);
  8755. VERIFY_OFFSET(err_info, 0x0C);
  8756. VERIFY_OFFSET(reserved3, 0x10);
  8757. VERIFY_OFFSET(err_info_len, 0x12);
  8758. VERIFY_OFFSET(reserved4, 0x13);
  8759. VERIFY_OFFSET(sgl_offset, 0x14);
  8760. VERIFY_OFFSET(reserved5, 0x15);
  8761. VERIFY_OFFSET(transfer_len, 0x1C);
  8762. VERIFY_OFFSET(reserved6, 0x20);
  8763. VERIFY_OFFSET(io_flags, 0x24);
  8764. VERIFY_OFFSET(reserved7, 0x26);
  8765. VERIFY_OFFSET(LUN, 0x34);
  8766. VERIFY_OFFSET(control, 0x3C);
  8767. VERIFY_OFFSET(CDB, 0x40);
  8768. VERIFY_OFFSET(reserved8, 0x50);
  8769. VERIFY_OFFSET(host_context_flags, 0x60);
  8770. VERIFY_OFFSET(timeout_sec, 0x62);
  8771. VERIFY_OFFSET(ReplyQueue, 0x64);
  8772. VERIFY_OFFSET(reserved9, 0x65);
  8773. VERIFY_OFFSET(tag, 0x68);
  8774. VERIFY_OFFSET(host_addr, 0x70);
  8775. VERIFY_OFFSET(CISS_LUN, 0x78);
  8776. VERIFY_OFFSET(SG, 0x78 + 8);
  8777. #undef VERIFY_OFFSET
  8778. }
  8779. module_init(hpsa_init);
  8780. module_exit(hpsa_cleanup);